hexsha
stringlengths 40
40
| size
int64 4
1.02M
| ext
stringclasses 8
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
209
| max_stars_repo_name
stringlengths 5
121
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
209
| max_issues_repo_name
stringlengths 5
121
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
209
| max_forks_repo_name
stringlengths 5
121
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 4
1.02M
| avg_line_length
float64 1.07
66.1k
| max_line_length
int64 4
266k
| alphanum_fraction
float64 0.01
1
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
37675e31c38367e5cd174cb18ac480f67fa67fbf
| 3,686 |
py
|
Python
|
model.py
|
samsonmking/BehavioralCloning
|
871802f31fb3e06135a1050e639941ab91112b20
|
[
"MIT"
] | null | null | null |
model.py
|
samsonmking/BehavioralCloning
|
871802f31fb3e06135a1050e639941ab91112b20
|
[
"MIT"
] | null | null | null |
model.py
|
samsonmking/BehavioralCloning
|
871802f31fb3e06135a1050e639941ab91112b20
|
[
"MIT"
] | null | null | null |
import csv
import cv2
import numpy as np
import tensorflow as tf
from keras.callbacks import EarlyStopping, ModelCheckpoint
from keras.layers.convolutional import Conv2D, Convolution2D, Cropping2D
from keras.layers.core import Activation, Dense, Dropout, Flatten, Lambda
from keras.layers.normalization import BatchNormalization
from keras.layers.pooling import MaxPooling2D
from keras.models import Sequential
from keras.optimizers import Optimizer
from sklearn.model_selection import train_test_split
from sklearn.utils import shuffle
def read_data_from_csv():
samples = []
with open('./data/driving_log.csv') as csv_file:
reader = csv.reader(csv_file)
samples = [line for line in reader]
return samples
def generate_data(input_samples, batch_size):
bias = 0.25
input_samples = shuffle(input_samples)
num_samples = len(input_samples)
while True:
for offset in range(0, num_samples, batch_size):
batch_samples = input_samples[offset:offset+batch_size]
images = []
steering_angles = []
for batch_sample in batch_samples:
center_img = cv2.imread(batch_sample[0])
left_img = cv2.imread(batch_sample[1])
right_img = cv2.imread(batch_sample[2])
center_image_flipped = cv2.flip(center_img, 1)
steering_angle = float(batch_sample[3])
images.append(center_img)
steering_angles.append(steering_angle)
images.append(left_img)
steering_angles.append(steering_angle + bias)
images.append(right_img)
steering_angles.append(steering_angle - bias)
images.append(center_image_flipped)
steering_angles.append(-1.0 * steering_angle)
X = np.array(images)
y = np.array(steering_angles)
yield shuffle(X, y)
batch_size = 128
dropout_rate = 0.30
samples = read_data_from_csv()
train_samples, validation_samples = train_test_split(samples, test_size=0.2)
generate_train_data = generate_data(train_samples, batch_size)
generate_validation_data = generate_data(validation_samples, batch_size)
# Implement the model based of of NVIDIA 'End to End Learning for Self-Driving Cars'
model = Sequential()
# Normalize input
model.add(Lambda(lambda img: (img / 255.0) - 0.5, input_shape=(160, 320, 3)))
model.add(Cropping2D(cropping=((70, 25), (0, 0))))
model.add(Conv2D(24, 5, 5, subsample=(2, 2), activation='relu'))
model.add(Conv2D(36, 5, 5, subsample=(2, 2), activation='relu'))
model.add(Conv2D(48, 5, 5, subsample=(2, 2), activation='relu'))
model.add(Conv2D(64, 3, 3, activation='relu'))
model.add(Conv2D(64, 3, 3, activation='relu'))
model.add(Flatten())
model.add(Dense(100))
model.add(Dropout(dropout_rate))
model.add(Dense(50))
model.add(Dropout(dropout_rate))
model.add(Dense(10))
model.add(Dense(1))
model.compile(loss='mse', optimizer='adam')
print(model.summary())
# Stop training the model val_loss has not imporved after 3 epochs
early_stop_callback = EarlyStopping(
monitor='val_loss', patience=3)
# Save the best performing version of the model to model.h5
model_checkpoint_callback = ModelCheckpoint(
filepath='model.h5',
monitor='val_loss',
mode='min',
save_best_only=True,
verbose=1)
# Train
model.fit_generator(
generate_train_data,
steps_per_epoch=np.ceil(len(train_samples)/batch_size),
validation_data=generate_validation_data,
validation_steps=np.ceil(
len(validation_samples)/batch_size),
callbacks=[early_stop_callback, model_checkpoint_callback],
epochs=100, verbose=1)
| 34.12963 | 84 | 0.704286 |
90355f637c1c936cd213e8a2807257b8ff1229bd
| 1,495 |
py
|
Python
|
src/gafaelfawr/handlers/logout.py
|
slaclab/gafaelfawr
|
7a64b0f159003d4745531c89d5b0f7d9777f7bce
|
[
"MIT"
] | null | null | null |
src/gafaelfawr/handlers/logout.py
|
slaclab/gafaelfawr
|
7a64b0f159003d4745531c89d5b0f7d9777f7bce
|
[
"MIT"
] | null | null | null |
src/gafaelfawr/handlers/logout.py
|
slaclab/gafaelfawr
|
7a64b0f159003d4745531c89d5b0f7d9777f7bce
|
[
"MIT"
] | null | null | null |
"""Log out handler (``/logout``)."""
from __future__ import annotations
from typing import Optional
from fastapi import APIRouter, Depends, status
from fastapi.responses import RedirectResponse
from ..dependencies.context import RequestContext, context_dependency
from ..dependencies.return_url import return_url
from ..models.state import State
router = APIRouter()
__all__ = ["get_logout"]
@router.get(
"/logout",
responses={307: {"description": "Redirect to landing page"}},
status_code=status.HTTP_307_TEMPORARY_REDIRECT,
summary="Log out",
tags=["browser"],
)
async def get_logout(
return_url: Optional[str] = Depends(return_url),
context: RequestContext = Depends(context_dependency),
) -> RedirectResponse:
"""Log out and redirect the user.
The user is redirected to the URL given in the rd parameter, if any, and
otherwise to the after_logout_url configuration setting.
If the user was logged in via GitHub (and Gafaelfawr is still configured
to use GitHub), the GitHub OAuth authorization grant is also revoked.
"""
if context.state.token:
auth_provider = context.factory.create_provider()
await auth_provider.logout(context.state)
context.logger.info("Successful logout")
else:
context.logger.info("Logout of already-logged-out session")
context.state = State()
if not return_url:
return_url = context.config.after_logout_url
return RedirectResponse(return_url)
| 30.510204 | 76 | 0.729766 |
8ea11d382b357a5f3839336f76b007bba8878107
| 42 |
py
|
Python
|
react/__init__.py
|
Stift007/react.py
|
e0640cba48debdd745f9ee55fdc073c1927641f2
|
[
"MIT"
] | null | null | null |
react/__init__.py
|
Stift007/react.py
|
e0640cba48debdd745f9ee55fdc073c1927641f2
|
[
"MIT"
] | null | null | null |
react/__init__.py
|
Stift007/react.py
|
e0640cba48debdd745f9ee55fdc073c1927641f2
|
[
"MIT"
] | null | null | null |
from .app import *
from .globals import *
| 14 | 22 | 0.714286 |
5bf1a3737aac5b35994ee53b90ae1638e35f630f
| 137 |
py
|
Python
|
tools/vidTest.py
|
rtgoring/py-faster-rcnn-thesis
|
62d330268147d212fd9661a4ba995b45cd404b6c
|
[
"BSD-2-Clause"
] | null | null | null |
tools/vidTest.py
|
rtgoring/py-faster-rcnn-thesis
|
62d330268147d212fd9661a4ba995b45cd404b6c
|
[
"BSD-2-Clause"
] | null | null | null |
tools/vidTest.py
|
rtgoring/py-faster-rcnn-thesis
|
62d330268147d212fd9661a4ba995b45cd404b6c
|
[
"BSD-2-Clause"
] | 1 |
2019-08-24T05:21:13.000Z
|
2019-08-24T05:21:13.000Z
|
import cv2
cap = cv2.VideoCapture('test.avi')
while True:
ret, im = cap.read()
cv2.imshow('frame',im)
cv2.waitKey(20)
| 13.7 | 34 | 0.605839 |
59a4ff9689662103cabbb1b07e9ef0f4b2a1367a
| 733 |
py
|
Python
|
movie_planet/movies/utils.py
|
d-wysocki/MoviePlanet
|
389fb24189d7ac98b80617deb9a727c9b6bb4dd4
|
[
"MIT"
] | null | null | null |
movie_planet/movies/utils.py
|
d-wysocki/MoviePlanet
|
389fb24189d7ac98b80617deb9a727c9b6bb4dd4
|
[
"MIT"
] | null | null | null |
movie_planet/movies/utils.py
|
d-wysocki/MoviePlanet
|
389fb24189d7ac98b80617deb9a727c9b6bb4dd4
|
[
"MIT"
] | null | null | null |
from collections import Counter
from datetime import datetime, timedelta
def generate_movie_rank(items):
counter = Counter(items).most_common()
result = []
rank = 0
for movie_id, total_comments in counter:
if rank != 0 and counter[rank - 1][1] == total_comments:
pass
else:
rank += 1
result.append(
{"movie_id": movie_id, "total_comments": total_comments, "rank": rank}
)
return result
def prepare_date_range(from_date, to_date):
from_date = datetime.strptime(from_date, "%Y-%m-%d").date()
to_date = datetime.strptime(to_date, "%Y-%m-%d") + timedelta(
hours=23, minutes=59, seconds=59
)
return [from_date, to_date]
| 26.178571 | 82 | 0.624829 |
85090d89bd87ac642e4e4e8fefa3ef7f0c67de84
| 3,578 |
py
|
Python
|
tests/processing_components/test_primary_beam_illumination.py
|
SKA-ScienceDataProcessor/rascil
|
bd3b47f779e18e184781e2928ad1539d1fdc1c9b
|
[
"Apache-2.0"
] | 7 |
2019-12-14T13:42:33.000Z
|
2022-01-28T03:31:45.000Z
|
tests/processing_components/test_primary_beam_illumination.py
|
SKA-ScienceDataProcessor/rascil
|
bd3b47f779e18e184781e2928ad1539d1fdc1c9b
|
[
"Apache-2.0"
] | 6 |
2020-01-08T09:40:08.000Z
|
2020-06-11T14:56:13.000Z
|
tests/processing_components/test_primary_beam_illumination.py
|
SKA-ScienceDataProcessor/rascil
|
bd3b47f779e18e184781e2928ad1539d1fdc1c9b
|
[
"Apache-2.0"
] | 3 |
2020-01-14T11:14:16.000Z
|
2020-09-15T05:21:06.000Z
|
"""Unit tests for testing support
"""
import os
import logging
import unittest
import numpy
from astropy import units as u
from astropy.coordinates import SkyCoord
import matplotlib.pyplot as plt
from rascil.data_models.polarisation import PolarisationFrame
from rascil.processing_components.image.operations import export_image_to_fits, show_image
from rascil.processing_components.imaging.base import create_image_from_visibility
from rascil.processing_components.imaging.primary_beams import create_pb, create_vp, create_vp_generic_numeric
from rascil.processing_components.simulation import create_named_configuration
from rascil.processing_components.visibility.base import create_visibility
log = logging.getLogger('logger')
log.setLevel(logging.WARNING)
class TestPrimaryBeams(unittest.TestCase):
def setUp(self):
from rascil.data_models.parameters import rascil_path, rascil_data_path
self.dir = rascil_path('test_results')
self.persist = os.getenv("RASCIL_PERSIST", False)
self.plot = False
def createVis(self, config='MID', dec=-35.0, rmax=1e3, freq=1e9):
self.frequency = numpy.linspace(freq, 1.5 * freq, 3)
self.channel_bandwidth = numpy.array([2.5e7, 2.5e7, 2.5e7])
self.flux = numpy.array([[100.0], [100.0], [100.0]])
self.phasecentre = SkyCoord(ra=+15.0 * u.deg, dec=-35.0 * u.deg, frame='icrs', equinox='J2000')
self.config = create_named_configuration(config)
self.times = numpy.linspace(-300.0, 300.0, 3) * numpy.pi / 43200.0
nants = self.config.xyz.shape[0]
assert nants > 1
assert len(self.config.names) == nants
assert len(self.config.mount) == nants
self.config = create_named_configuration(config, rmax=rmax)
self.phasecentre = SkyCoord(ra=+15 * u.deg, dec=dec * u.deg, frame='icrs', equinox='J2000')
self.vis = create_visibility(self.config, self.times, self.frequency,
channel_bandwidth=self.channel_bandwidth,
phasecentre=self.phasecentre, weight=1.0,
polarisation_frame=PolarisationFrame('stokesI'))
def test_create_voltage_patterns_illumination(self):
self.createVis(freq=1.4e9)
cellsize = 8 * numpy.pi / 180.0 / 280
model = create_image_from_visibility(self.vis, npixel=512, cellsize=cellsize, override_cellsize=False)
if self.plot:
plt.clf()
fig, axs = plt.subplots(5, 5, gridspec_kw={'hspace': 0, 'wspace': 0})
# (r ** 2 + rho * (dx * dy) + diff * (dx ** 2 - dy ** 2))
for irho, rho in enumerate([-0.1, -0.05, 0.0, 0.05, 0.1]):
for idiff, diff in enumerate([-0.2, -0.15, -0.1, -0.05, 0.0]):
vp = create_vp_generic_numeric(model, pointingcentre=None, diameter=15.0, blockage=0.0, taper='gaussian',
edge=0.03162278, padding=2, use_local=True, rho=rho, diff=diff)
vp_data = vp.data
vp.data = numpy.real(vp_data)
if self.persist: export_image_to_fits(vp, "%s/test_voltage_pattern_real_%s_rho%.3f_diff%.3f.fits" %
(self.dir, "MID_TAPER", rho, diff))
if self.plot:
ax = axs[irho, idiff]
ax.imshow(vp.data[0,0])#, vmax=0.1, vmin=-0.01)
ax.axis('off')
if self.plot: plt.show(block=False)
if __name__ == '__main__':
unittest.main()
| 44.725 | 121 | 0.629961 |
ea8c8a848370363243044d9ed0781e1f2b4f00e0
| 1,086 |
py
|
Python
|
tests/test_crud/test_weapon_crud.py
|
OrderAndCh4oS/drone_squadron_api_prototype
|
4d7c22cebb03576986d443634b17910cb460a60f
|
[
"MIT"
] | 1 |
2020-05-20T09:44:37.000Z
|
2020-05-20T09:44:37.000Z
|
tests/test_crud/test_weapon_crud.py
|
sarcoma/drone_squadron_api_prototype
|
4d7c22cebb03576986d443634b17910cb460a60f
|
[
"MIT"
] | 1 |
2021-06-01T22:30:10.000Z
|
2021-06-01T22:30:10.000Z
|
tests/test_crud/test_weapon_crud.py
|
OrderAndCh4oS/drone_squadron_api_prototype
|
4d7c22cebb03576986d443634b17910cb460a60f
|
[
"MIT"
] | null | null | null |
from sqlalchemy.engine import ResultProxy
from drone_squadron.crud.weapon_crud import WeaponCrud
from drone_squadron.schema import weapon
class TestWeaponCrud:
crud = WeaponCrud
def test_insert(self, setup):
with self.crud() as crud:
result = crud.insert(name="Rifle", fire_rate=5) # type: ResultProxy
assert 1 == result.inserted_primary_key[0]
def test_select(self, setup):
with self.crud() as crud:
result = crud.select() # type: ResultProxy
rows = result.fetchall()
assert "Rifle" == rows[0][weapon.c.name]
assert 5 == rows[0][weapon.c.fire_rate]
def test_update(self, setup):
with self.crud() as crud:
result = crud.update(item_id=1, name="AWP", fire_rate=10) # type: ResultProxy
assert {'id_1': 1, "name": "AWP", "fire_rate": 10} == result.last_updated_params()
def test_delete(self, setup):
with self.crud() as crud:
result = crud.delete(item_id=1) # type: ResultProxy
assert 1 == result.rowcount
| 35.032258 | 94 | 0.624309 |
7666474a3407f4725b46d13ff87602069a02acba
| 605 |
py
|
Python
|
chartify/_core/__init__.py
|
rohankumardubey/chartify
|
5ac3a88e54cf620389741f396cc19d60fe032822
|
[
"Apache-2.0"
] | 3,111 |
2018-09-18T01:59:56.000Z
|
2022-03-29T14:45:00.000Z
|
chartify/_core/__init__.py
|
rohankumardubey/chartify
|
5ac3a88e54cf620389741f396cc19d60fe032822
|
[
"Apache-2.0"
] | 97 |
2018-09-21T19:53:19.000Z
|
2022-03-03T04:48:54.000Z
|
chartify/_core/__init__.py
|
rohankumardubey/chartify
|
5ac3a88e54cf620389741f396cc19d60fe032822
|
[
"Apache-2.0"
] | 316 |
2018-10-06T05:39:39.000Z
|
2022-03-21T08:38:00.000Z
|
# -*- coding: utf-8 -*-
#
# Copyright (c) 2017-2018 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| 37.8125 | 74 | 0.74876 |
9348d86e734a824f9e4153e6af1bf853f83d26f0
| 440 |
py
|
Python
|
p23.py
|
cemulate/python-challenge
|
1c3b790d3a32cd51b3bc9ed4d1acc2760405f358
|
[
"MIT"
] | null | null | null |
p23.py
|
cemulate/python-challenge
|
1c3b790d3a32cd51b3bc9ed4d1acc2760405f358
|
[
"MIT"
] | null | null | null |
p23.py
|
cemulate/python-challenge
|
1c3b790d3a32cd51b3bc9ed4d1acc2760405f358
|
[
"MIT"
] | null | null | null |
import this
print "\nRot13 message:"
string = 'va gur snpr bs jung?'
print string, "\n"
string = string.encode("rot13")
print string
print "\nambiguity"
print "Apologizing to Leopold..."
#import smtplib
#server = smtplib.SMTP('smtp.gmail.com:587')
#server.starttls()
#server.login("[email protected]", raw_input("Enter password: "))
#server.sendmail("[email protected]", ["[email protected]"], "sorry")
#server.quit()
| 23.157895 | 85 | 0.713636 |
e09e409da08b385d66c55ada60e095214621998b
| 40,337 |
py
|
Python
|
openstack_dashboard/api/cinder.py
|
lostmap/horizon
|
584aa54985e6d0945f8ea08195530cb67e7ff72a
|
[
"Apache-2.0"
] | null | null | null |
openstack_dashboard/api/cinder.py
|
lostmap/horizon
|
584aa54985e6d0945f8ea08195530cb67e7ff72a
|
[
"Apache-2.0"
] | null | null | null |
openstack_dashboard/api/cinder.py
|
lostmap/horizon
|
584aa54985e6d0945f8ea08195530cb67e7ff72a
|
[
"Apache-2.0"
] | 1 |
2020-04-21T22:12:55.000Z
|
2020-04-21T22:12:55.000Z
|
# Copyright 2012 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2012 OpenStack Foundation
# Copyright 2012 Nebula, Inc.
# Copyright (c) 2012 X.commerce, a business unit of eBay Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
import math
from django.conf import settings
from django.utils.translation import pgettext_lazy
from django.utils.translation import ugettext_lazy as _
from cinderclient import api_versions
from cinderclient import client as cinder_client
from cinderclient import exceptions as cinder_exception
from cinderclient.v2.contrib import list_extensions as cinder_list_extensions
from horizon import exceptions
from horizon.utils.memoized import memoized
from openstack_dashboard.api import _nova
from openstack_dashboard.api import base
from openstack_dashboard.api import microversions
from openstack_dashboard.contrib.developer.profiler import api as profiler
from openstack_dashboard.utils import settings as utils
LOG = logging.getLogger(__name__)
# API static values
VOLUME_STATE_AVAILABLE = "available"
DEFAULT_QUOTA_NAME = 'default'
# Available consumer choices associated with QOS Specs
CONSUMER_CHOICES = (
('back-end', _('back-end')),
('front-end', _('front-end')),
('both', pgettext_lazy('Both of front-end and back-end', u'both')),
)
VERSIONS = base.APIVersionManager("volume", preferred_version='3')
try:
# pylint: disable=ungrouped-imports
from cinderclient.v2 import client as cinder_client_v2
VERSIONS.load_supported_version('2', {"client": cinder_client_v2,
"version": '2'})
from cinderclient.v3 import client as cinder_client_v3
VERSIONS.load_supported_version('3', {"client": cinder_client_v3,
"version": '3'})
except ImportError:
pass
class BaseCinderAPIResourceWrapper(base.APIResourceWrapper):
@property
def name(self):
# If a volume doesn't have a name, use its id.
return (getattr(self._apiresource, 'name', None) or
getattr(self._apiresource, 'id', None))
@property
def description(self):
return (getattr(self._apiresource, 'description', None) or
getattr(self._apiresource, 'display_description', None))
class Volume(BaseCinderAPIResourceWrapper):
_attrs = ['id', 'name', 'description', 'size', 'status', 'created_at',
'volume_type', 'availability_zone', 'imageRef', 'bootable',
'snapshot_id', 'source_volid', 'attachments', 'tenant_name',
'group_id', 'consistencygroup_id', 'os-vol-host-attr:host',
'os-vol-tenant-attr:tenant_id', 'metadata',
'volume_image_metadata', 'encrypted', 'transfer',
'multiattach']
@property
def is_bootable(self):
return self.bootable == 'true'
@property
def tenant_id(self):
return getattr(self, 'os-vol-tenant-attr:tenant_id', "")
class VolumeSnapshot(BaseCinderAPIResourceWrapper):
_attrs = ['id', 'name', 'description', 'size', 'status',
'created_at', 'volume_id', 'group_snapshot_id',
'os-extended-snapshot-attributes:project_id',
'metadata']
@property
def project_id(self):
return getattr(self, 'os-extended-snapshot-attributes:project_id', "")
class VolumeType(BaseCinderAPIResourceWrapper):
_attrs = ['id', 'name', 'extra_specs', 'created_at', 'encryption',
'associated_qos_spec', 'description',
'os-extended-snapshot-attributes:project_id']
class VolumeBackup(BaseCinderAPIResourceWrapper):
_attrs = ['id', 'name', 'description', 'container', 'size', 'status',
'created_at', 'volume_id', 'availability_zone', 'snapshot_id']
_volume = None
_snapshot = None
@property
def volume(self):
return self._volume
@volume.setter
def volume(self, value):
self._volume = value
@property
def snapshot(self):
return self._snapshot
@snapshot.setter
def snapshot(self, value):
self._snapshot = value
class QosSpecs(BaseCinderAPIResourceWrapper):
_attrs = ['id', 'name', 'consumer', 'specs']
class VolTypeExtraSpec(object):
def __init__(self, type_id, key, val):
self.type_id = type_id
self.id = key
self.key = key
self.value = val
class GroupTypeSpec(object):
def __init__(self, group_type_id, key, val):
self.group_type_id = group_type_id
self.id = key
self.key = key
self.value = val
class QosSpec(object):
def __init__(self, id, key, val):
self.id = id
self.key = key
self.value = val
class VolumeTransfer(base.APIResourceWrapper):
_attrs = ['id', 'name', 'created_at', 'volume_id', 'auth_key']
class VolumePool(base.APIResourceWrapper):
_attrs = ['name', 'pool_name', 'total_capacity_gb', 'free_capacity_gb',
'allocated_capacity_gb', 'QoS_support', 'reserved_percentage',
'volume_backend_name', 'vendor_name', 'driver_version',
'storage_protocol', 'extra_specs']
class Group(base.APIResourceWrapper):
_attrs = ['id', 'status', 'availability_zone', 'created_at', 'name',
'description', 'group_type', 'volume_types',
'group_snapshot_id', 'source_group_id', 'replication_status',
'project_id']
class GroupSnapshot(base.APIResourceWrapper):
_attrs = ['id', 'name', 'description', 'status', 'created_at',
'group_id', 'group_type_id', 'project_id']
class GroupType(base.APIResourceWrapper):
_attrs = ['id', 'name', 'description', 'is_public', 'group_specs']
def _find_cinder_url(request, version=None):
if version is None:
api_version = VERSIONS.get_active_version()
version = api_version['version']
version = base.Version(version)
# We support only cinder v2 and v3.
if version.major == 3:
candidates = ['volumev3', 'volume']
else:
candidates = ['volumev2', 'volume']
for service_name in candidates:
try:
return version, base.url_for(request, service_name)
except exceptions.ServiceCatalogException:
pass
else:
raise exceptions.ServiceCatalogException(
("Cinder %(version)s requested but no '%(service)s' service "
"type available in Keystone catalog.") %
{'version': version, 'service': candidates})
@memoized
def cinderclient(request, version=None):
version, cinder_url = _find_cinder_url(request, version)
insecure = settings.OPENSTACK_SSL_NO_VERIFY
cacert = settings.OPENSTACK_SSL_CACERT
c = cinder_client.Client(
version,
request.user.username,
request.user.token.id,
project_id=request.user.tenant_id,
auth_url=base.url_for(request, 'identity'),
insecure=insecure,
cacert=cacert,
http_log_debug=settings.DEBUG,
)
c.client.auth_token = request.user.token.id
c.client.management_url = cinder_url
return c
def get_microversion(request, features):
try:
version, cinder_url = _find_cinder_url(request)
except exceptions.ServiceCatalogException:
return None
insecure = settings.OPENSTACK_SSL_NO_VERIFY
cacert = settings.OPENSTACK_SSL_CACERT
min_ver, max_ver = cinder_client.get_server_version(cinder_url,
insecure, cacert)
return microversions.get_microversion_for_features(
'cinder', features, api_versions.APIVersion, min_ver, max_ver)
def _cinderclient_with_features(request, features,
raise_exc=False, message=False):
version = get_microversion(request, features)
if version is None:
if message:
versions = microversions.get_requested_versions('cinder', features)
if message is True:
message = ('Insufficient microversion for cinder feature(s) '
'%(features)s. One of the following API '
'microversion(s) is required: %(versions).')
LOG.warning(message,
{'features': features, 'versions': versions})
if raise_exc:
raise microversions.MicroVersionNotFound(features)
if version is not None:
version = version.get_string()
return cinderclient(request, version=version)
def _cinderclient_with_generic_groups(request):
return _cinderclient_with_features(request, 'groups')
def version_get():
api_version = VERSIONS.get_active_version()
return api_version['version']
def volume_list(request, search_opts=None, marker=None, sort_dir="desc"):
volumes, _, __ = volume_list_paged(
request, search_opts=search_opts, marker=marker, paginate=False,
sort_dir=sort_dir)
return volumes
def update_pagination(entities, page_size, marker, sort_dir):
has_more_data, has_prev_data = False, False
if len(entities) > page_size:
has_more_data = True
entities.pop()
if marker is not None:
has_prev_data = True
# first page condition when reached via prev back
elif sort_dir == 'asc' and marker is not None:
has_more_data = True
# last page condition
elif marker is not None:
has_prev_data = True
if sort_dir == 'asc':
entities.reverse()
return entities, has_more_data, has_prev_data
@profiler.trace
def volume_list_paged(request, search_opts=None, marker=None, paginate=False,
sort_dir="desc"):
"""List volumes with pagination.
To see all volumes in the cloud as an admin you can pass in a special
search option: {'all_tenants': 1}
"""
has_more_data = False
has_prev_data = False
volumes = []
# To support filtering with group_id, we need to use the microversion.
c_client = _cinderclient_with_generic_groups(request)
if c_client is None:
return volumes, has_more_data, has_prev_data
# build a dictionary of volume_id -> transfer
transfers = {t.volume_id: t
for t in transfer_list(request, search_opts=search_opts)}
if paginate:
page_size = utils.get_page_size(request)
# sort_key and sort_dir deprecated in kilo, use sort
# if pagination is true, we use a single sort parameter
# by default, it is "created_at"
sort = 'created_at:' + sort_dir
for v in c_client.volumes.list(search_opts=search_opts,
limit=page_size + 1,
marker=marker,
sort=sort):
v.transfer = transfers.get(v.id)
volumes.append(Volume(v))
volumes, has_more_data, has_prev_data = update_pagination(
volumes, page_size, marker, sort_dir)
else:
for v in c_client.volumes.list(search_opts=search_opts):
v.transfer = transfers.get(v.id)
volumes.append(Volume(v))
return volumes, has_more_data, has_prev_data
@profiler.trace
def volume_get(request, volume_id):
client = _cinderclient_with_generic_groups(request)
volume_data = client.volumes.get(volume_id)
for attachment in volume_data.attachments:
if "server_id" in attachment:
instance = _nova.server_get(request, attachment['server_id'])
attachment['instance_name'] = instance.name
else:
# Nova volume can occasionally send back error'd attachments
# the lack a server_id property; to work around that we'll
# give the attached instance a generic name.
attachment['instance_name'] = _("Unknown instance")
volume_data.transfer = None
if volume_data.status == 'awaiting-transfer':
for transfer in transfer_list(request):
if transfer.volume_id == volume_id:
volume_data.transfer = transfer
break
return Volume(volume_data)
@profiler.trace
def volume_create(request, size, name, description, volume_type,
snapshot_id=None, metadata=None, image_id=None,
availability_zone=None, source_volid=None,
group_id=None):
client = _cinderclient_with_generic_groups(request)
data = {'name': name,
'description': description,
'volume_type': volume_type,
'snapshot_id': snapshot_id,
'metadata': metadata,
'imageRef': image_id,
'availability_zone': availability_zone,
'source_volid': source_volid,
'group_id': group_id}
volume = client.volumes.create(size, **data)
return Volume(volume)
@profiler.trace
def volume_extend(request, volume_id, new_size):
client = _cinderclient_with_features(request,
'extend_in_use_volume')
return client.volumes.extend(volume_id, new_size)
@profiler.trace
def volume_delete(request, volume_id):
return cinderclient(request).volumes.delete(volume_id)
@profiler.trace
def volume_retype(request, volume_id, new_type, migration_policy):
return cinderclient(request).volumes.retype(volume_id,
new_type,
migration_policy)
@profiler.trace
def volume_set_bootable(request, volume_id, bootable):
return cinderclient(request).volumes.set_bootable(volume_id,
bootable)
@profiler.trace
def volume_update(request, volume_id, name, description):
vol_data = {'name': name,
'description': description}
return cinderclient(request).volumes.update(volume_id,
**vol_data)
@profiler.trace
def volume_set_metadata(request, volume_id, metadata):
return cinderclient(request).volumes.set_metadata(volume_id, metadata)
@profiler.trace
def volume_delete_metadata(request, volume_id, keys):
return cinderclient(request).volumes.delete_metadata(volume_id, keys)
@profiler.trace
def volume_reset_state(request, volume_id, state):
cinderclient(request).volumes.reset_state(volume_id, state)
@profiler.trace
def volume_upload_to_image(request, volume_id, force, image_name,
container_format, disk_format):
return cinderclient(request).volumes.upload_to_image(volume_id,
force,
image_name,
container_format,
disk_format)
@profiler.trace
def volume_get_encryption_metadata(request, volume_id):
return cinderclient(request).volumes.get_encryption_metadata(volume_id)
@profiler.trace
def volume_migrate(request, volume_id, host, force_host_copy=False,
lock_volume=False):
return cinderclient(request).volumes.migrate_volume(volume_id,
host,
force_host_copy,
lock_volume)
@profiler.trace
def volume_snapshot_get(request, snapshot_id):
client = _cinderclient_with_generic_groups(request)
snapshot = client.volume_snapshots.get(snapshot_id)
return VolumeSnapshot(snapshot)
@profiler.trace
def volume_snapshot_list(request, search_opts=None):
snapshots, _, __ = volume_snapshot_list_paged(request,
search_opts=search_opts,
paginate=False)
return snapshots
@profiler.trace
def volume_snapshot_list_paged(request, search_opts=None, marker=None,
paginate=False, sort_dir="desc"):
has_more_data = False
has_prev_data = False
snapshots = []
c_client = _cinderclient_with_generic_groups(request)
if c_client is None:
return snapshots, has_more_data, has_more_data
if paginate:
page_size = utils.get_page_size(request)
# sort_key and sort_dir deprecated in kilo, use sort
# if pagination is true, we use a single sort parameter
# by default, it is "created_at"
sort = 'created_at:' + sort_dir
for s in c_client.volume_snapshots.list(search_opts=search_opts,
limit=page_size + 1,
marker=marker,
sort=sort):
snapshots.append(VolumeSnapshot(s))
snapshots, has_more_data, has_prev_data = update_pagination(
snapshots, page_size, marker, sort_dir)
else:
for s in c_client.volume_snapshots.list(search_opts=search_opts):
snapshots.append(VolumeSnapshot(s))
return snapshots, has_more_data, has_prev_data
@profiler.trace
def volume_snapshot_create(request, volume_id, name,
description=None, force=False):
data = {'name': name,
'description': description,
'force': force}
return VolumeSnapshot(cinderclient(request).volume_snapshots.create(
volume_id, **data))
@profiler.trace
def volume_snapshot_delete(request, snapshot_id):
return cinderclient(request).volume_snapshots.delete(snapshot_id)
@profiler.trace
def volume_snapshot_update(request, snapshot_id, name, description):
snapshot_data = {'name': name,
'description': description}
return cinderclient(request).volume_snapshots.update(snapshot_id,
**snapshot_data)
@profiler.trace
def volume_snapshot_set_metadata(request, snapshot_id, metadata):
return cinderclient(request).volume_snapshots.set_metadata(
snapshot_id, metadata)
@profiler.trace
def volume_snapshot_delete_metadata(request, snapshot_id, keys):
return cinderclient(request).volume_snapshots.delete_metadata(
snapshot_id, keys)
@profiler.trace
def volume_snapshot_reset_state(request, snapshot_id, state):
return cinderclient(request).volume_snapshots.reset_state(
snapshot_id, state)
@memoized
def volume_backup_supported(request):
"""This method will determine if cinder supports backup."""
# TODO(lcheng) Cinder does not expose the information if cinder
# backup is configured yet. This is a workaround until that
# capability is available.
# https://bugs.launchpad.net/cinder/+bug/1334856
return utils.get_dict_config('OPENSTACK_CINDER_FEATURES', 'enable_backup')
@profiler.trace
def volume_backup_get(request, backup_id):
backup = cinderclient(request).backups.get(backup_id)
return VolumeBackup(backup)
def volume_backup_list(request):
backups, _, __ = volume_backup_list_paged(request, paginate=False)
return backups
@profiler.trace
def volume_backup_list_paged_with_page_menu(request, page_number=1,
sort_dir="desc"):
backups = []
count = 0
pages_count = 0
page_size = utils.get_page_size(request)
c_client = cinderclient(request, '3.45')
if c_client is None:
return backups, 0, count, pages_count
offset = (page_number - 1) * page_size
sort = 'created_at:' + sort_dir
bkps, count = c_client.backups.list(limit=page_size,
sort=sort,
search_opts={'with_count': True,
'offset': offset})
if not bkps:
return backups, page_size, count, pages_count
if isinstance(bkps[0], list):
bkps = bkps[0]
pages_count = int(math.ceil(float(count) / float(page_size)))
for b in bkps:
backups.append(VolumeBackup(b))
return backups, page_size, count, pages_count
@profiler.trace
def volume_backup_list_paged(request, marker=None, paginate=False,
sort_dir="desc"):
has_more_data = False
has_prev_data = False
backups = []
c_client = cinderclient(request)
if c_client is None:
return backups, has_more_data, has_prev_data
if paginate:
page_size = utils.get_page_size(request)
# sort_key and sort_dir deprecated in kilo, use sort
# if pagination is true, we use a single sort parameter
# by default, it is "created_at"
sort = 'created_at:' + sort_dir
for b in c_client.backups.list(limit=page_size + 1,
marker=marker,
sort=sort):
backups.append(VolumeBackup(b))
backups, has_more_data, has_prev_data = update_pagination(
backups, page_size, marker, sort_dir)
else:
for b in c_client.backups.list():
backups.append(VolumeBackup(b))
return backups, has_more_data, has_prev_data
@profiler.trace
def volume_backup_create(request,
volume_id,
container_name,
name,
description,
force=False,
snapshot_id=None):
# need to ensure the container name is not an empty
# string, but pass None to get the container name
# generated correctly
backup = cinderclient(request).backups.create(
volume_id,
container=container_name if container_name else None,
name=name,
description=description,
snapshot_id=snapshot_id,
force=force)
return VolumeBackup(backup)
@profiler.trace
def volume_backup_delete(request, backup_id):
return cinderclient(request).backups.delete(backup_id)
@profiler.trace
def volume_backup_restore(request, backup_id, volume_id):
return cinderclient(request).restores.restore(backup_id=backup_id,
volume_id=volume_id)
@profiler.trace
def volume_manage(request,
host,
identifier,
id_type,
name,
description,
volume_type,
availability_zone,
metadata,
bootable):
source = {id_type: identifier}
cinderclient(request).volumes.manage(
host=host,
ref=source,
name=name,
description=description,
volume_type=volume_type,
availability_zone=availability_zone,
metadata=metadata,
bootable=bootable)
@profiler.trace
def volume_unmanage(request, volume_id):
return cinderclient(request).volumes.unmanage(volume=volume_id)
@profiler.trace
def tenant_quota_get(request, tenant_id):
c_client = cinderclient(request)
if c_client is None:
return base.QuotaSet()
return base.QuotaSet(c_client.quotas.get(tenant_id))
@profiler.trace
def tenant_quota_update(request, tenant_id, **kwargs):
return cinderclient(request).quotas.update(tenant_id, **kwargs)
@profiler.trace
def default_quota_get(request, tenant_id):
return base.QuotaSet(cinderclient(request).quotas.defaults(tenant_id))
def volume_type_list_with_qos_associations(request):
vol_types = volume_type_list(request)
vol_types_dict = {}
# initialize and build a dictionary for lookup access below
for vol_type in vol_types:
vol_type.associated_qos_spec = ""
vol_types_dict[vol_type.id] = vol_type
# get all currently defined qos specs
qos_specs = qos_spec_list(request)
for qos_spec in qos_specs:
# get all volume types this qos spec is associated with
assoc_vol_types = qos_spec_get_associations(request, qos_spec.id)
for assoc_vol_type in assoc_vol_types:
# update volume type to hold this association info
vol_type = vol_types_dict[assoc_vol_type.id]
vol_type.associated_qos_spec = qos_spec.name
return vol_types
def volume_type_get_with_qos_association(request, volume_type_id):
vol_type = volume_type_get(request, volume_type_id)
vol_type.associated_qos_spec = ""
# get all currently defined qos specs
qos_specs = qos_spec_list(request)
for qos_spec in qos_specs:
# get all volume types this qos spec is associated with
assoc_vol_types = qos_spec_get_associations(request, qos_spec.id)
for assoc_vol_type in assoc_vol_types:
if vol_type.id == assoc_vol_type.id:
# update volume type to hold this association info
vol_type.associated_qos_spec = qos_spec.name
return vol_type
return vol_type
@profiler.trace
def default_quota_update(request, **kwargs):
cinderclient(request).quota_classes.update(DEFAULT_QUOTA_NAME, **kwargs)
@profiler.trace
def volume_type_list(request):
return cinderclient(request).volume_types.list()
@profiler.trace
def volume_type_create(request, name, description=None, is_public=True):
return cinderclient(request).volume_types.create(name, description,
is_public)
@profiler.trace
def volume_type_update(request, volume_type_id, name=None, description=None,
is_public=None):
return cinderclient(request).volume_types.update(volume_type_id,
name,
description,
is_public)
@profiler.trace
@memoized
def volume_type_default(request):
return cinderclient(request).volume_types.default()
@profiler.trace
def volume_type_delete(request, volume_type_id):
try:
return cinderclient(request).volume_types.delete(volume_type_id)
except cinder_exception.BadRequest:
raise exceptions.BadRequest(_(
"This volume type is used by one or more volumes."))
@profiler.trace
def volume_type_get(request, volume_type_id):
return cinderclient(request).volume_types.get(volume_type_id)
@profiler.trace
def volume_encryption_type_create(request, volume_type_id, data):
return cinderclient(request).volume_encryption_types.create(volume_type_id,
specs=data)
@profiler.trace
def volume_encryption_type_delete(request, volume_type_id):
return cinderclient(request).volume_encryption_types.delete(volume_type_id)
@profiler.trace
def volume_encryption_type_get(request, volume_type_id):
return cinderclient(request).volume_encryption_types.get(volume_type_id)
@profiler.trace
def volume_encryption_type_list(request):
return cinderclient(request).volume_encryption_types.list()
@profiler.trace
def volume_encryption_type_update(request, volume_type_id, data):
return cinderclient(request).volume_encryption_types.update(volume_type_id,
specs=data)
@profiler.trace
def volume_type_extra_get(request, type_id, raw=False):
vol_type = volume_type_get(request, type_id)
extras = vol_type.get_keys()
if raw:
return extras
return [VolTypeExtraSpec(type_id, key, value) for
key, value in extras.items()]
def volume_type_extra_set(request, type_id, metadata):
vol_type = volume_type_get(request, type_id)
if not metadata:
return None
return vol_type.set_keys(metadata)
def volume_type_extra_delete(request, type_id, keys):
vol_type = volume_type_get(request, type_id)
return vol_type.unset_keys(keys)
@profiler.trace
def qos_spec_list(request):
return cinderclient(request).qos_specs.list()
@profiler.trace
def qos_spec_get(request, qos_spec_id):
return cinderclient(request).qos_specs.get(qos_spec_id)
@profiler.trace
def qos_spec_delete(request, qos_spec_id):
return cinderclient(request).qos_specs.delete(qos_spec_id, force=True)
@profiler.trace
def qos_spec_create(request, name, specs):
return cinderclient(request).qos_specs.create(name, specs)
def qos_spec_get_keys(request, qos_spec_id, raw=False):
spec = qos_spec_get(request, qos_spec_id)
qos_specs = spec.specs
if raw:
return spec
return [QosSpec(qos_spec_id, key, value) for
key, value in qos_specs.items()]
@profiler.trace
def qos_spec_set_keys(request, qos_spec_id, specs):
return cinderclient(request).qos_specs.set_keys(qos_spec_id, specs)
@profiler.trace
def qos_spec_unset_keys(request, qos_spec_id, specs):
return cinderclient(request).qos_specs.unset_keys(qos_spec_id, specs)
@profiler.trace
def qos_spec_associate(request, qos_specs, vol_type_id):
return cinderclient(request).qos_specs.associate(qos_specs, vol_type_id)
@profiler.trace
def qos_spec_disassociate(request, qos_specs, vol_type_id):
return cinderclient(request).qos_specs.disassociate(qos_specs, vol_type_id)
@profiler.trace
def qos_spec_get_associations(request, qos_spec_id):
return cinderclient(request).qos_specs.get_associations(qos_spec_id)
def qos_specs_list(request):
return [QosSpecs(s) for s in qos_spec_list(request)]
@profiler.trace
@memoized
def tenant_absolute_limits(request, tenant_id=None):
_cinderclient = _cinderclient_with_features(
request, ['limits_project_id_query'],
message=('Insufficient microversion for GET limits with '
'project_id query. One of the following API micro '
'version is required: %(versions)s. '
'This causes bug 1810309 on updating quotas.'))
limits = _cinderclient.limits.get(tenant_id=tenant_id).absolute
limits_dict = {}
for limit in limits:
if limit.value < 0:
# In some cases, the absolute limits data in Cinder can get
# out of sync causing the total.*Used limits to return
# negative values instead of 0. For such cases, replace
# negative values with 0.
if limit.name.startswith('total') and limit.name.endswith('Used'):
limits_dict[limit.name] = 0
else:
# -1 is used to represent unlimited quotas
limits_dict[limit.name] = float("inf")
else:
limits_dict[limit.name] = limit.value
return limits_dict
@profiler.trace
def service_list(request):
return cinderclient(request).services.list()
@profiler.trace
def availability_zone_list(request, detailed=False):
return cinderclient(request).availability_zones.list(detailed=detailed)
@profiler.trace
@memoized
def list_extensions(request):
cinder_api = cinderclient(request)
return tuple(cinder_list_extensions.ListExtManager(cinder_api).show_all())
@memoized
def extension_supported(request, extension_name):
"""This method will determine if Cinder supports a given extension name."""
for extension in list_extensions(request):
if extension.name == extension_name:
return True
return False
@profiler.trace
def transfer_list(request, detailed=True, search_opts=None):
"""List volume transfers.
To see all volumes transfers as an admin pass in a special
search option: {'all_tenants': 1}
"""
c_client = cinderclient(request)
try:
return [VolumeTransfer(v) for v in c_client.transfers.list(
detailed=detailed, search_opts=search_opts)]
except cinder_exception.Forbidden as error:
LOG.error(error)
return []
@profiler.trace
def transfer_get(request, transfer_id):
transfer_data = cinderclient(request).transfers.get(transfer_id)
return VolumeTransfer(transfer_data)
@profiler.trace
def transfer_create(request, transfer_id, name):
volume = cinderclient(request).transfers.create(transfer_id, name)
return VolumeTransfer(volume)
@profiler.trace
def transfer_accept(request, transfer_id, auth_key):
return cinderclient(request).transfers.accept(transfer_id, auth_key)
@profiler.trace
def transfer_delete(request, transfer_id):
return cinderclient(request).transfers.delete(transfer_id)
@profiler.trace
def pool_list(request, detailed=False):
c_client = cinderclient(request)
if c_client is None:
return []
return [VolumePool(v) for v in c_client.pools.list(
detailed=detailed)]
@profiler.trace
def message_list(request, search_opts=None):
try:
c_client = _cinderclient_with_features(request, ['message_list'],
raise_exc=True, message=True)
except microversions.MicroVersionNotFound:
LOG.warning("Insufficient microversion for message_list")
return []
return c_client.messages.list(search_opts)
def is_volume_service_enabled(request):
return bool(
base.is_service_enabled(request, 'volumev3') or
base.is_service_enabled(request, 'volumev2') or
base.is_service_enabled(request, 'volume')
)
def volume_type_access_list(request, volume_type):
return cinderclient(request).volume_type_access.list(volume_type)
def volume_type_add_project_access(request, volume_type, project_id):
return cinderclient(request).volume_type_access.add_project_access(
volume_type, project_id)
def volume_type_remove_project_access(request, volume_type, project_id):
return cinderclient(request).volume_type_access.remove_project_access(
volume_type, project_id)
@profiler.trace
def group_type_list(request):
client = _cinderclient_with_generic_groups(request)
return [GroupType(t) for t in client.group_types.list()]
@profiler.trace
def group_type_get(request, group_type_id):
client = _cinderclient_with_generic_groups(request)
return GroupType(client.group_types.get(group_type_id))
@profiler.trace
def group_type_create(request, name, description=None, is_public=None):
client = _cinderclient_with_generic_groups(request)
params = {'name': name}
if description is not None:
params['description'] = description
if is_public is not None:
params['is_public'] = is_public
return GroupType(client.group_types.create(**params))
@profiler.trace
def group_type_update(request, group_type_id, name=None, description=None,
is_public=None):
client = _cinderclient_with_generic_groups(request)
return GroupType(client.group_types.update(group_type_id,
name,
description,
is_public))
@profiler.trace
def group_type_delete(request, group_type_id):
client = _cinderclient_with_generic_groups(request)
client.group_types.delete(group_type_id)
@profiler.trace
def group_type_spec_list(request, group_type_id, raw=False):
group_type = group_type_get(request, group_type_id)
specs = group_type._apiresource.get_keys()
if raw:
return specs
return [GroupTypeSpec(group_type_id, key, value) for
key, value in specs.items()]
@profiler.trace
def group_type_spec_set(request, group_type_id, metadata):
group_type = group_type_get(request, group_type_id)
if not metadata:
return None
return group_type._apiresource.set_keys(metadata)
@profiler.trace
def group_type_spec_unset(request, group_type_id, keys):
group_type = group_type_get(request, group_type_id)
return group_type._apiresource.unset_keys(keys)
@profiler.trace
def group_list(request, search_opts=None):
client = _cinderclient_with_generic_groups(request)
return [Group(g) for g in client.groups.list(search_opts=search_opts)]
@profiler.trace
def group_list_with_vol_type_names(request, search_opts=None):
groups = group_list(request, search_opts)
vol_types = volume_type_list(request)
for group in groups:
group.volume_type_names = []
for vol_type_id in group.volume_types:
for vol_type in vol_types:
if vol_type.id == vol_type_id:
group.volume_type_names.append(vol_type.name)
break
return groups
@profiler.trace
def group_get(request, group_id):
client = _cinderclient_with_generic_groups(request)
group = client.groups.get(group_id)
return Group(group)
@profiler.trace
def group_get_with_vol_type_names(request, group_id):
group = group_get(request, group_id)
vol_types = volume_type_list(request)
group.volume_type_names = []
for vol_type_id in group.volume_types:
for vol_type in vol_types:
if vol_type.id == vol_type_id:
group.volume_type_names.append(vol_type.name)
break
return group
@profiler.trace
def group_create(request, name, group_type, volume_types,
description=None, availability_zone=None):
client = _cinderclient_with_generic_groups(request)
params = {'name': name,
'group_type': group_type,
# cinderclient expects a comma-separated list of volume types.
'volume_types': ','.join(volume_types)}
if description is not None:
params['description'] = description
if availability_zone is not None:
params['availability_zone'] = availability_zone
return Group(client.groups.create(**params))
@profiler.trace
def group_create_from_source(request, name, group_snapshot_id=None,
source_group_id=None, description=None,
user_id=None, project_id=None):
client = _cinderclient_with_generic_groups(request)
return Group(client.groups.create_from_src(
group_snapshot_id, source_group_id, name, description,
user_id, project_id))
@profiler.trace
def group_delete(request, group_id, delete_volumes=False):
client = _cinderclient_with_generic_groups(request)
client.groups.delete(group_id, delete_volumes)
@profiler.trace
def group_update(request, group_id, name=None, description=None,
add_volumes=None, remove_volumes=None):
data = {}
if name is not None:
data['name'] = name
if description is not None:
data['description'] = description
if add_volumes:
# cinderclient expects a comma-separated list of volume types.
data['add_volumes'] = ','.join(add_volumes)
if remove_volumes:
# cinderclient expects a comma-separated list of volume types.
data['remove_volumes'] = ','.join(remove_volumes)
client = _cinderclient_with_generic_groups(request)
return client.groups.update(group_id, **data)
def group_snapshot_create(request, group_id, name, description=None):
client = _cinderclient_with_generic_groups(request)
return GroupSnapshot(client.group_snapshots.create(group_id, name,
description))
def group_snapshot_get(request, group_snapshot_id):
client = _cinderclient_with_generic_groups(request)
return GroupSnapshot(client.group_snapshots.get(group_snapshot_id))
def group_snapshot_list(request, search_opts=None):
client = _cinderclient_with_generic_groups(request)
return [GroupSnapshot(s) for s
in client.group_snapshots.list(search_opts=search_opts)]
def group_snapshot_delete(request, group_snapshot_id):
client = _cinderclient_with_generic_groups(request)
client.group_snapshots.delete(group_snapshot_id)
| 32.874491 | 79 | 0.668394 |
0cd337768335351c2576104180ff8a29305ef3d0
| 20,136 |
py
|
Python
|
aiml/PatternMgr.py
|
gontovnik/python-aiml
|
e063641776cf4d46cb60ebe028f1f02a48e56429
|
[
"BSD-2-Clause"
] | null | null | null |
aiml/PatternMgr.py
|
gontovnik/python-aiml
|
e063641776cf4d46cb60ebe028f1f02a48e56429
|
[
"BSD-2-Clause"
] | null | null | null |
aiml/PatternMgr.py
|
gontovnik/python-aiml
|
e063641776cf4d46cb60ebe028f1f02a48e56429
|
[
"BSD-2-Clause"
] | null | null | null |
'''
This class implements the AIML pattern-matching algorithm described
by Dr. Richard Wallace at the following site:
http://www.alicebot.org/documentation/matching.html
'''
from __future__ import print_function
from collections import namedtuple
import marshal
import pprint
import re
import string
import sys
from .constants import *
MatchResult = namedtuple('MatchResult', 'pattern template')
class PatternMgr:
# special dictionary keys
_UNDERSCORE = '0'
_STAR = '1'
_TEMPLATE = '2'
_THAT = '3'
_TOPIC = '4'
_BOT_NAME = '5'
_CARET = '6'
def __init__(self):
self._root = {}
self._templateCount = 0
self._botName = u"Nameless"
punctuation = r"""`~!@#$%^&*()-_=+[{]}\|;:'",<.>/?"""
self._puncStripRE = re.compile("[" + re.escape(punctuation) + "]")
self._whitespaceRE = re.compile(r"\s+", re.UNICODE)
def numTemplates(self):
"""Return the number of templates currently stored."""
return self._templateCount
def setBotName(self, name):
"""Set the name of the bot, used to match <bot name="name"> tags in
patterns. The name must be a single word!
"""
# Collapse a multi-word name into a single word
self._botName = unicode( ' '.join(name.split()) )
def dump(self):
"""Print all learned patterns, for debugging purposes."""
pprint.pprint(self._root)
def save(self, filename):
"""Dump the current patterns to the file specified by filename. To
restore later, use restore().
"""
try:
outFile = open(filename, "wb")
marshal.dump(self._templateCount, outFile)
marshal.dump(self._botName, outFile)
marshal.dump(self._root, outFile)
outFile.close()
except Exception as e:
print( "Error saving PatternMgr to file %s:" % filename )
raise
def restore(self, filename):
"""Restore a previously save()d collection of patterns."""
try:
inFile = open(filename, "rb")
self._templateCount = marshal.load(inFile)
self._botName = marshal.load(inFile)
self._root = marshal.load(inFile)
inFile.close()
except Exception as e:
print( "Error restoring PatternMgr from file %s:" % filename )
raise
def add(self, data, template):
"""Add a [pattern/that/topic] tuple and its corresponding template
to the node tree.
"""
pattern,that,topic = data
# TODO: make sure words contains only legal characters
# (alphanumerics,*,_)
# Navigate through the node tree to the template's location, adding
# nodes if necessary.
node = self._root
for word in pattern.split():
key = word
if key == u"_":
key = self._UNDERSCORE
elif key == u"*":
key = self._STAR
elif key == u"^":
key = self._CARET
elif key == u"BOT_NAME":
key = self._BOT_NAME
if key not in node:
node[key] = {}
node = node[key]
# navigate further down, if a non-empty "that" pattern was included
if len(that) > 0:
if self._THAT not in node:
node[self._THAT] = {}
node = node[self._THAT]
for word in that.split():
key = word
if key == u"_":
key = self._UNDERSCORE
elif key == u"*":
key = self._STAR
if key not in node:
node[key] = {}
node = node[key]
# navigate yet further down, if a non-empty "topic" string was included
if len(topic) > 0:
if self._TOPIC not in node:
node[self._TOPIC] = {}
node = node[self._TOPIC]
for word in topic.split():
key = word
if key == u"_":
key = self._UNDERSCORE
elif key == u"*":
key = self._STAR
if key not in node:
node[key] = {}
node = node[key]
# add the template.
if self._TEMPLATE not in node:
self._templateCount += 1
node[self._TEMPLATE] = template
def match(self, pattern, that, topic):
"""Return the template which is the closest match to pattern. The
'that' parameter contains the bot's previous response. The 'topic'
parameter contains the current topic of conversation.
Returns None if no template is found.
"""
if len(pattern) == 0:
return None
# Mutilate the input. Remove all punctuation and convert the
# text to all caps.
input_ = pattern.upper()
input_ = re.sub(self._puncStripRE, " ", input_)
if that.strip() == u"": that = u"ULTRABOGUSDUMMYTHAT" # 'that' must never be empty
thatInput = that.upper()
thatInput = re.sub(self._puncStripRE, " ", thatInput)
thatInput = re.sub(self._whitespaceRE, " ", thatInput)
if topic.strip() == u"": topic = u"ULTRABOGUSDUMMYTOPIC" # 'topic' must never be empty
topicInput = topic.upper()
topicInput = re.sub(self._puncStripRE, " ", topicInput)
# Pass the input off to the recursive call
patMatch, template = self._match(input_.split(), thatInput.split(), topicInput.split(), self._root)
if template is None or patMatch is None:
return None
return MatchResult(patMatch, template)
def wildcard(self, wildcardType, pattern, that, topic, index):
"""Returns a string, the portion of pattern that was matched by a *.
The 'wildcardType' parameter specifies which type of wildcard to find.
Legal values are:
- 'caret': matches a caret in the main pattern.
- 'star': matches a star in the main pattern.
- 'thatstar': matches a star in the that pattern.
- 'topicstar': matches a star in the topic pattern.
"""
# Mutilate the input. Remove all punctuation and convert the
# text to all caps.
input_ = pattern.upper()
input_ = re.sub(self._puncStripRE, " ", input_)
input_ = re.sub(self._whitespaceRE, " ", input_)
if that.strip() == u"": that = u"ULTRABOGUSDUMMYTHAT" # 'that' must never be empty
thatInput = that.upper()
thatInput = re.sub(self._puncStripRE, " ", thatInput)
thatInput = re.sub(self._whitespaceRE, " ", thatInput)
if topic.strip() == u"": topic = u"ULTRABOGUSDUMMYTOPIC" # 'topic' must never be empty
topicInput = topic.upper()
topicInput = re.sub(self._puncStripRE, " ", topicInput)
topicInput = re.sub(self._whitespaceRE, " ", topicInput)
# Pass the input off to the recursive pattern-matcher
patMatch, template = self._match(input_.split(), thatInput.split(), topicInput.split(), self._root)
if template == None:
return ""
# Extract the appropriate portion of the pattern, based on the
# wildcardType argument.
words = None
if wildcardType == 'caret':
patMatch = patMatch[:patMatch.index(self._THAT)]
words = input_.split()
elif wildcardType == 'star':
patMatch = patMatch[:patMatch.index(self._THAT)]
words = input_.split()
elif wildcardType == 'thatstar':
patMatch = patMatch[patMatch.index(self._THAT)+1 : patMatch.index(self._TOPIC)]
words = thatInput.split()
elif wildcardType == 'topicstar':
patMatch = patMatch[patMatch.index(self._TOPIC)+1 :]
words = topicInput.split()
else:
# unknown value
raise ValueError( "wildcardType must be in ['caret', 'star', 'thatstar', 'topicstar']" )
# compare the input string to the matched pattern, word by word.
# At the end of this loop, if foundTheRightWildcard is true, start and
# end will contain the start and end indices (in "words") of
# the substring that the desired wildcard matched.
foundTheRightWildcard = False
start = end = j = numStars = numCarets = k = 0
for i in range(len(words)):
# This condition is true after processing a wildcard
# that ISN'T the one we're looking for.
if i < k:
continue
# If we're reached the end of the pattern, we're done.
if j == len(patMatch):
break
if not foundTheRightWildcard:
if patMatch[j] in [self._STAR, self._UNDERSCORE]: #we got a star
numStars += 1
if numStars == index:
# This is the star we care about.
foundTheRightWildcard = 'star' in wildcardType
start = i
# Iterate through the rest of the string.
for k in range (i, len(words)):
# If the star is at the end of the pattern,
# we know exactly where it ends.
if j+1 == len (patMatch):
end = len (words)
break
# If the words have started matching the
# pattern again, the star has ended.
if patMatch[j+1] == words[k]:
end = k - 1
i = k
break
elif patMatch[j] in [self._CARET]: #we got a caret
numCarets += 1
if numCarets == index:
# This is the caret we care about.
foundTheRightWildcard = 'caret' in wildcardType
start = i
# Iterate through the rest of the string.
for k in range (i, len(words)):
# If the caret is at the end of the pattern,
# we know exactly where it ends.
if j+1 == len (patMatch):
end = len (words)
break
# If the words have started matching the
# pattern again, the caret has ended.
if patMatch[j+1] == words[k]:
_i = i
end = k - 1
i = k
if _i == i:
j += 1
break
# If we just finished processing the wildcard we cared
# about, we exit the loop early.
if foundTheRightWildcard:
break
# Move to the next element of the pattern.
j += 1
# extract the wildcard words from the original, unmutilated input.
if foundTheRightWildcard:
#print( ' '.join(pattern.split()[start:end+1]) )
if wildcardType == 'caret': return ' '.join(pattern.split()[start:end+1])
elif wildcardType == 'star': return ' '.join(pattern.split()[start:end+1])
elif wildcardType == 'thatstar': return ' '.join(that.split()[start:end+1])
elif wildcardType == 'topicstar': return ' '.join(topic.split()[start:end+1])
else: return u""
def caret(self, caretType, pattern, that, topic, index):
"""Returns a string, the portion of pattern that was matched by a ^.
The 'caretType' parameter specifies which type of caret to find.
Legal values are:
- 'caret': matches a caret in the main pattern.
"""
# Mutilate the input. Remove all punctuation and convert the
# text to all caps.
input_ = pattern.upper()
input_ = re.sub(self._puncStripRE, " ", input_)
input_ = re.sub(self._whitespaceRE, " ", input_)
if that.strip() == u"": that = u"ULTRABOGUSDUMMYTHAT" # 'that' must never be empty
thatInput = that.upper()
thatInput = re.sub(self._puncStripRE, " ", thatInput)
thatInput = re.sub(self._whitespaceRE, " ", thatInput)
if topic.strip() == u"": topic = u"ULTRABOGUSDUMMYTOPIC" # 'topic' must never be empty
topicInput = topic.upper()
topicInput = re.sub(self._puncStripRE, " ", topicInput)
topicInput = re.sub(self._whitespaceRE, " ", topicInput)
# Pass the input off to the recursive pattern-matcher
patMatch, template = self._match(input_.split(), thatInput.split(), topicInput.split(), self._root)
if template == None:
return ""
# Extract the appropriate portion of the pattern, based on the
# caretType argument.
words = None
if caretType == 'caret':
patMatch = patMatch[:patMatch.index(self._THAT)]
words = input_.split()
else:
# unknown value
raise ValueError( "caretType must be in ['caret']" )
# compare the input string to the matched pattern, word by word.
# At the end of this loop, if foundTheRightCaret is true, start and
# end will contain the start and end indices (in "words") of
# the substring that the desired caret matched.
foundTheRightCaret = False
start = end = j = numCarets = k = 0
for i in range(len(words)):
# This condition is true after processing a caret
# that ISN'T the one we're looking for.
if i < k:
continue
# If we're reached the end of the pattern, we're done.
if j == len(patMatch):
break
if not foundTheRightCaret:
if patMatch[j] in [self._CARET]: #we got a caret
numCarets += 1
if numCarets == index:
# This is the caret we care about.
foundTheRightCaret = True
start = i
# Iterate through the rest of the string.
for k in range (i, len(words)):
# If the caret is at the end of the pattern,
# we know exactly where it ends.
if j+1 == len (patMatch):
end = len (words)
break
# If the words have started matching the
# pattern again, the caret has ended.
if patMatch[j+1] == words[k]:
_i = i
end = k - 1
i = k
if _i == i:
j += 1
break
# If we just finished processing the caret we cared
# about, we exit the loop early.
if foundTheRightCaret:
break
# Move to the next element of the pattern.
j += 1
# extract the caret words from the original, unmutilated input.
if foundTheRightCaret:
#print( ' '.join(pattern.split()[start:end+1]) )
if caretType == 'caret': return ' '.join(pattern.split()[start:end+1])
else: return u""
def _match(self, words, thatWords, topicWords, root):
"""Return a tuple (pat, tem) where pat is a list of nodes, starting
at the root and leading to the matching pattern, and tem is the
matched template.
"""
# base-case: if the word list is empty, return the current node's
# template.
if len(words) == 0:
# we're out of words.
pattern = []
template = None
# Required to make cases when caret is at the end WORK
if self._CARET in root:
pattern, template = self._match(words, thatWords, topicWords, root[self._CARET])
if template is not None:
newPattern = [self._CARET] + pattern
return (newPattern, template)
elif len(thatWords) > 0:
# If thatWords isn't empty, recursively
# pattern-match on the _THAT node with thatWords as words.
try:
pattern, template = self._match(thatWords, [], topicWords, root[self._THAT])
if pattern != None:
pattern = [self._THAT] + pattern
except KeyError:
pattern = []
template = None
elif len(topicWords) > 0:
# If thatWords is empty and topicWords isn't, recursively pattern
# on the _TOPIC node with topicWords as words.
try:
pattern, template = self._match(topicWords, [], [], root[self._TOPIC])
if pattern != None:
pattern = [self._TOPIC] + pattern
except KeyError:
pattern = []
template = None
if template == None:
# we're totally out of input. Grab the template at this node.
pattern = []
try: template = root[self._TEMPLATE]
except KeyError: template = None
return (pattern, template)
first = words[0]
suffix = words[1:]
# Check underscore.
# Note: this is causing problems in the standard AIML set, and is
# currently disabled.
if self._UNDERSCORE in root:
# Must include the case where suf is [] in order to handle the case
# where a * or _ is at the end of the pattern.
for j in range(len(suffix)+1):
suf = suffix[j:]
pattern, template = self._match(suf, thatWords, topicWords, root[self._UNDERSCORE])
if template is not None:
newPattern = [self._UNDERSCORE] + pattern
return (newPattern, template)
# Check first
if first in root:
pattern, template = self._match(suffix, thatWords, topicWords, root[first])
if template is not None:
newPattern = [first] + pattern
return (newPattern, template)
# check bot name
if self._BOT_NAME in root and first == self._botName:
pattern, template = self._match(suffix, thatWords, topicWords, root[self._BOT_NAME])
if template is not None:
newPattern = [first] + pattern
return (newPattern, template)
# check caret
if self._CARET in root:
# Must include the case where suf is [] in order to handle the case
# where a ^ is at the end of the pattern.
_suffix = words
for j in range(len(_suffix)+1):
suf = _suffix[j:]
pattern, template = self._match(suf, thatWords, topicWords, root[self._CARET])
if template is not None:
newPattern = [self._CARET] + pattern
return (newPattern, template)
# check star
if self._STAR in root:
# Must include the case where suf is [] in order to handle the case
# where a * or _ is at the end of the pattern.
for j in range(len(suffix)+1):
suf = suffix[j:]
pattern, template = self._match(suf, thatWords, topicWords, root[self._STAR])
if template is not None:
newPattern = [self._STAR] + pattern
return (newPattern, template)
# No matches were found.
return (None, None)
| 42.391579 | 107 | 0.524732 |
aa6d091d63cc3ce556245976c0d4caa732e26680
| 59 |
py
|
Python
|
python/testData/refactoring/introduceVariable/substringBreaksEscapes.py
|
jnthn/intellij-community
|
8fa7c8a3ace62400c838e0d5926a7be106aa8557
|
[
"Apache-2.0"
] | 2 |
2019-04-28T07:48:50.000Z
|
2020-12-11T14:18:08.000Z
|
python/testData/refactoring/introduceVariable/substringBreaksEscapes.py
|
jnthn/intellij-community
|
8fa7c8a3ace62400c838e0d5926a7be106aa8557
|
[
"Apache-2.0"
] | 173 |
2018-07-05T13:59:39.000Z
|
2018-08-09T01:12:03.000Z
|
python/testData/refactoring/introduceVariable/substringBreaksEscapes.py
|
jnthn/intellij-community
|
8fa7c8a3ace62400c838e0d5926a7be106aa8557
|
[
"Apache-2.0"
] | 2 |
2020-03-15T08:57:37.000Z
|
2020-04-07T04:48:14.000Z
|
print(u"Hel<selection>lo \u00d6sterreich\\!\</selection>n")
| 59 | 59 | 0.745763 |
3573d04395de1ce80942e8535657043b31f6fe5c
| 4,317 |
py
|
Python
|
tests/components/kaleidescape/test_remote.py
|
MrDelik/core
|
93a66cc357b226389967668441000498a10453bb
|
[
"Apache-2.0"
] | 30,023 |
2016-04-13T10:17:53.000Z
|
2020-03-02T12:56:31.000Z
|
tests/components/kaleidescape/test_remote.py
|
MrDelik/core
|
93a66cc357b226389967668441000498a10453bb
|
[
"Apache-2.0"
] | 24,710 |
2016-04-13T08:27:26.000Z
|
2020-03-02T12:59:13.000Z
|
tests/components/kaleidescape/test_remote.py
|
MrDelik/core
|
93a66cc357b226389967668441000498a10453bb
|
[
"Apache-2.0"
] | 11,956 |
2016-04-13T18:42:31.000Z
|
2020-03-02T09:32:12.000Z
|
"""Tests for Kaleidescape remote platform."""
from unittest.mock import MagicMock
import pytest
from homeassistant.components.remote import (
ATTR_COMMAND,
DOMAIN as REMOTE_DOMAIN,
SERVICE_SEND_COMMAND,
)
from homeassistant.const import ATTR_ENTITY_ID, SERVICE_TURN_OFF, SERVICE_TURN_ON
from homeassistant.core import HomeAssistant
from homeassistant.exceptions import HomeAssistantError
from . import MOCK_SERIAL
from tests.common import MockConfigEntry
ENTITY_ID = f"remote.kaleidescape_device_{MOCK_SERIAL}"
async def test_entity(
hass: HomeAssistant,
mock_device: MagicMock,
mock_integration: MockConfigEntry,
) -> None:
"""Test entity attributes."""
assert hass.states.get(ENTITY_ID)
async def test_commands(
hass: HomeAssistant,
mock_device: MagicMock,
mock_integration: MockConfigEntry,
) -> None:
"""Test service calls."""
await hass.services.async_call(
REMOTE_DOMAIN,
SERVICE_TURN_ON,
{ATTR_ENTITY_ID: ENTITY_ID},
blocking=True,
)
assert mock_device.leave_standby.call_count == 1
await hass.services.async_call(
REMOTE_DOMAIN,
SERVICE_TURN_OFF,
{ATTR_ENTITY_ID: ENTITY_ID},
blocking=True,
)
assert mock_device.enter_standby.call_count == 1
await hass.services.async_call(
REMOTE_DOMAIN,
SERVICE_SEND_COMMAND,
{ATTR_ENTITY_ID: ENTITY_ID, ATTR_COMMAND: ["select"]},
blocking=True,
)
assert mock_device.select.call_count == 1
await hass.services.async_call(
REMOTE_DOMAIN,
SERVICE_SEND_COMMAND,
{ATTR_ENTITY_ID: ENTITY_ID, ATTR_COMMAND: ["up"]},
blocking=True,
)
assert mock_device.up.call_count == 1
await hass.services.async_call(
REMOTE_DOMAIN,
SERVICE_SEND_COMMAND,
{ATTR_ENTITY_ID: ENTITY_ID, ATTR_COMMAND: ["down"]},
blocking=True,
)
assert mock_device.down.call_count == 1
await hass.services.async_call(
REMOTE_DOMAIN,
SERVICE_SEND_COMMAND,
{ATTR_ENTITY_ID: ENTITY_ID, ATTR_COMMAND: ["left"]},
blocking=True,
)
assert mock_device.left.call_count == 1
await hass.services.async_call(
REMOTE_DOMAIN,
SERVICE_SEND_COMMAND,
{ATTR_ENTITY_ID: ENTITY_ID, ATTR_COMMAND: ["right"]},
blocking=True,
)
assert mock_device.right.call_count == 1
await hass.services.async_call(
REMOTE_DOMAIN,
SERVICE_SEND_COMMAND,
{ATTR_ENTITY_ID: ENTITY_ID, ATTR_COMMAND: ["cancel"]},
blocking=True,
)
assert mock_device.cancel.call_count == 1
await hass.services.async_call(
REMOTE_DOMAIN,
SERVICE_SEND_COMMAND,
{ATTR_ENTITY_ID: ENTITY_ID, ATTR_COMMAND: ["replay"]},
blocking=True,
)
assert mock_device.replay.call_count == 1
await hass.services.async_call(
REMOTE_DOMAIN,
SERVICE_SEND_COMMAND,
{ATTR_ENTITY_ID: ENTITY_ID, ATTR_COMMAND: ["scan_forward"]},
blocking=True,
)
assert mock_device.scan_forward.call_count == 1
await hass.services.async_call(
REMOTE_DOMAIN,
SERVICE_SEND_COMMAND,
{ATTR_ENTITY_ID: ENTITY_ID, ATTR_COMMAND: ["scan_reverse"]},
blocking=True,
)
assert mock_device.scan_reverse.call_count == 1
await hass.services.async_call(
REMOTE_DOMAIN,
SERVICE_SEND_COMMAND,
{ATTR_ENTITY_ID: ENTITY_ID, ATTR_COMMAND: ["go_movie_covers"]},
blocking=True,
)
assert mock_device.go_movie_covers.call_count == 1
await hass.services.async_call(
REMOTE_DOMAIN,
SERVICE_SEND_COMMAND,
{ATTR_ENTITY_ID: ENTITY_ID, ATTR_COMMAND: ["menu_toggle"]},
blocking=True,
)
assert mock_device.menu_toggle.call_count == 1
async def test_unknown_command(
hass: HomeAssistant,
mock_device: MagicMock,
mock_integration: MockConfigEntry,
) -> None:
"""Test service calls."""
with pytest.raises(HomeAssistantError) as err:
await hass.services.async_call(
REMOTE_DOMAIN,
SERVICE_SEND_COMMAND,
{ATTR_ENTITY_ID: ENTITY_ID, ATTR_COMMAND: ["bad"]},
blocking=True,
)
assert str(err.value) == "bad is not a known command"
| 27.496815 | 81 | 0.672226 |
fb5ffab2181ac3697c207b57d70b1bdb18cc82ec
| 305 |
py
|
Python
|
data/multilingual/Latn.ORH/Serif_12/pdf_to_json_test_Latn.ORH_Serif_12.py
|
antoinecarme/pdf_to_json_tests
|
d57a024fde862e698d916a1178f285883d7a3b2f
|
[
"BSD-3-Clause"
] | 1 |
2021-09-19T19:47:35.000Z
|
2021-09-19T19:47:35.000Z
|
data/multilingual/Latn.ORH/Serif_12/pdf_to_json_test_Latn.ORH_Serif_12.py
|
antoinecarme/pdf_to_json_tests
|
d57a024fde862e698d916a1178f285883d7a3b2f
|
[
"BSD-3-Clause"
] | null | null | null |
data/multilingual/Latn.ORH/Serif_12/pdf_to_json_test_Latn.ORH_Serif_12.py
|
antoinecarme/pdf_to_json_tests
|
d57a024fde862e698d916a1178f285883d7a3b2f
|
[
"BSD-3-Clause"
] | null | null | null |
import pdf_to_json as p2j
import json
url = "file:data/multilingual/Latn.ORH/Serif_12/udhr_Latn.ORH_Serif_12.pdf"
lConverter = p2j.pdf_to_json.pdf_to_json_converter()
lConverter.mImageHashOnly = True
lDict = lConverter.convert(url)
print(json.dumps(lDict, indent=4, ensure_ascii=False, sort_keys=True))
| 30.5 | 75 | 0.813115 |
d9488d43fdcb3dcbccf19e995109a29e77099401
| 1,270 |
py
|
Python
|
wids-datathon-2020/wids_datathon_2020/data/unzip_dataset.py
|
iainwo/kaggle
|
3d36393d6cb14d118a39bbe742cada5914e7fb9b
|
[
"MIT"
] | 2 |
2020-07-30T15:51:12.000Z
|
2020-11-11T23:01:25.000Z
|
wids-datathon-2020/wids_datathon_2020/data/unzip_dataset.py
|
iainwo/kaggle
|
3d36393d6cb14d118a39bbe742cada5914e7fb9b
|
[
"MIT"
] | null | null | null |
wids-datathon-2020/wids_datathon_2020/data/unzip_dataset.py
|
iainwo/kaggle
|
3d36393d6cb14d118a39bbe742cada5914e7fb9b
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
import click
import logging
from pathlib import Path
from dotenv import find_dotenv, load_dotenv
import zipfile
@click.command()
@click.argument('input_filepath', type=click.Path(exists=True), default='data/external/widsdatathon2020.zip')
@click.argument('output_filepath', type=click.Path(exists=True), default='data/raw/')
def unzip_dataset(input_filepath, output_filepath):
""" Unzip a dataset from external data (data/external) into
the raw staging directory (data/raw).
"""
logger = logging.getLogger(__name__)
logger.info(f'unzipping file {input_filepath} to output filepath {output_filepath}')
DATASET = Path.cwd().joinpath(input_filepath)
with zipfile.ZipFile(DATASET, 'r') as zip_ref:
zip_ref.extractall(output_filepath)
if __name__ == '__main__':
log_fmt = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
logging.basicConfig(level=logging.INFO, format=log_fmt)
# not used in this stub but often useful for finding various files
project_dir = Path(__file__).resolve().parents[2]
# find .env automagically by walking up directories until it's found, then
# load up the .env entries as environment variables
load_dotenv(find_dotenv())
unzip_dataset()
| 34.324324 | 109 | 0.725197 |
278e6954791732ac3e0f90cdea3932d7e8c32719
| 499 |
py
|
Python
|
backend/api/views/__init__.py
|
EagleC0318/disfactory-backend
|
d0f3b5bf24f3a5ca1f33696f9c47d354706089b9
|
[
"MIT"
] | 35 |
2020-01-02T10:52:49.000Z
|
2022-03-18T06:01:15.000Z
|
backend/api/views/__init__.py
|
EagleC0318/disfactory-backend
|
d0f3b5bf24f3a5ca1f33696f9c47d354706089b9
|
[
"MIT"
] | 348 |
2019-10-09T12:58:42.000Z
|
2022-03-30T14:17:51.000Z
|
backend/api/views/__init__.py
|
EagleC0318/disfactory-backend
|
d0f3b5bf24f3a5ca1f33696f9c47d354706089b9
|
[
"MIT"
] | 19 |
2019-10-09T12:51:11.000Z
|
2021-12-12T01:02:32.000Z
|
from .factories_cr import get_nearby_or_create_factories, get_factory_by_sectcode
from .factories_u import update_factory_attribute
from .factory_report_record_r import get_factory_report
from .image_c import post_image_url
from .factory_image_c import post_factory_image_url
from .statistics_r import get_factories_count_by_townname
from .statistics_r import get_images_count_by_townname
from .statistics_r import get_report_records_count_by_townname
from .statistics_r import get_statistics_total
| 49.9 | 81 | 0.905812 |
6665937ebea1948cdaf0950151aab7e84bd5ef0a
| 583 |
py
|
Python
|
socks5-node-docker/server.py
|
14avengers/sentinel
|
825768d2242ad28896c41684bc08e8527cdf2f30
|
[
"MIT"
] | 342 |
2017-08-21T20:12:56.000Z
|
2022-03-19T17:58:25.000Z
|
socks5-node-docker/server.py
|
14avengers/sentinel
|
825768d2242ad28896c41684bc08e8527cdf2f30
|
[
"MIT"
] | 57 |
2017-11-13T11:16:47.000Z
|
2022-03-01T13:54:31.000Z
|
socks5-node-docker/server.py
|
14avengers/sentinel
|
825768d2242ad28896c41684bc08e8527cdf2f30
|
[
"MIT"
] | 72 |
2017-11-23T05:13:24.000Z
|
2022-02-25T14:18:33.000Z
|
# coding=utf-8
import json
import falcon
from sentinel.server import GetSockCreds
from sentinel.server import Token
from sentinel.utils import JSONTranslator
class Up(object):
def on_post(self, req, resp):
resp.status = falcon.HTTP_200
resp.body = json.dumps({'status': 'UP'})
def on_get(self, req, resp):
resp.status = falcon.HTTP_200
resp.body = json.dumps({'status': 'UP'})
server = falcon.API(middleware=[JSONTranslator()])
server.add_route('/', Up())
server.add_route('/token', Token())
server.add_route('/creds', GetSockCreds())
| 22.423077 | 50 | 0.684391 |
20b850714f7f0d6ef7c215e037f595ad05eaade7
| 13,847 |
py
|
Python
|
python/paddle/fluid/tests/unittests/test_reduce_op.py
|
abbasidaniyal/Paddle
|
c3527f5526ee96398760cbef11d7de48f41fe998
|
[
"Apache-2.0"
] | 1 |
2020-03-07T16:05:22.000Z
|
2020-03-07T16:05:22.000Z
|
python/paddle/fluid/tests/unittests/test_reduce_op.py
|
abbasidaniyal/Paddle
|
c3527f5526ee96398760cbef11d7de48f41fe998
|
[
"Apache-2.0"
] | null | null | null |
python/paddle/fluid/tests/unittests/test_reduce_op.py
|
abbasidaniyal/Paddle
|
c3527f5526ee96398760cbef11d7de48f41fe998
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import unittest
import numpy as np
from op_test import OpTest
import paddle.fluid.core as core
import paddle.fluid as fluid
from paddle.fluid import compiler, Program, program_guard
class TestSumOp(OpTest):
def setUp(self):
self.op_type = "reduce_sum"
self.inputs = {'X': np.random.random((5, 6, 10)).astype("float64")}
self.outputs = {'Out': self.inputs['X'].sum(axis=0)}
def test_check_output(self):
self.check_output()
def test_check_grad(self):
self.check_grad(['X'], 'Out')
class TestMeanOp(OpTest):
def setUp(self):
self.op_type = "reduce_mean"
self.inputs = {'X': np.random.random((5, 6, 2, 10)).astype("float64")}
self.attrs = {'dim': [1]}
self.outputs = {
'Out': self.inputs['X'].mean(axis=tuple(self.attrs['dim']))
}
def test_check_output(self):
self.check_output()
def test_check_grad(self):
self.check_grad(['X'], 'Out')
class TestMaxOp(OpTest):
"""Remove Max with subgradient from gradient check to confirm the success of CI."""
def setUp(self):
self.op_type = "reduce_max"
self.inputs = {'X': np.random.random((5, 6, 10)).astype("float64")}
self.attrs = {'dim': [-1]}
self.outputs = {
'Out': self.inputs['X'].max(axis=tuple(self.attrs['dim']))
}
def test_check_output(self):
self.check_output()
class TestMinOp(OpTest):
"""Remove Min with subgradient from gradient check to confirm the success of CI."""
def setUp(self):
self.op_type = "reduce_min"
self.inputs = {'X': np.random.random((5, 6, 10)).astype("float64")}
self.attrs = {'dim': [2]}
self.outputs = {
'Out': self.inputs['X'].min(axis=tuple(self.attrs['dim']))
}
def test_check_output(self):
self.check_output()
class TestProdOp(OpTest):
def setUp(self):
self.op_type = "reduce_prod"
self.inputs = {'X': np.random.random((5, 6, 10)).astype("float64")}
self.outputs = {'Out': self.inputs['X'].prod(axis=0)}
def test_check_output(self):
self.check_output()
def test_check_grad(self):
self.check_grad(['X'], 'Out')
class TestAllOp(OpTest):
def setUp(self):
self.op_type = "reduce_all"
self.inputs = {'X': np.random.randint(0, 2, (5, 6, 10)).astype("bool")}
self.outputs = {'Out': self.inputs['X'].all()}
self.attrs = {'reduce_all': True}
def test_check_output(self):
self.check_output()
class TestAllOpWithDim(OpTest):
def setUp(self):
self.op_type = "reduce_all"
self.inputs = {'X': np.random.randint(0, 2, (5, 6, 10)).astype("bool")}
self.attrs = {'dim': [1]}
self.outputs = {'Out': self.inputs['X'].all(axis=1)}
def test_check_output(self):
self.check_output()
class TestAllOpWithKeepDim(OpTest):
def setUp(self):
self.op_type = "reduce_all"
self.inputs = {'X': np.random.randint(0, 2, (5, 6, 10)).astype("bool")}
self.attrs = {'dim': [1], 'keep_dim': True}
self.outputs = {
'Out': np.expand_dims(
self.inputs['X'].all(axis=1), axis=1)
}
def test_check_output(self):
self.check_output()
class TestAnyOp(OpTest):
def setUp(self):
self.op_type = "reduce_any"
self.inputs = {'X': np.random.randint(0, 2, (5, 6, 10)).astype("bool")}
self.outputs = {'Out': self.inputs['X'].any()}
self.attrs = {'reduce_all': True}
def test_check_output(self):
self.check_output()
class TestAnyOpWithDim(OpTest):
def setUp(self):
self.op_type = "reduce_any"
self.inputs = {'X': np.random.randint(0, 2, (5, 6, 10)).astype("bool")}
self.attrs = {'dim': [1]}
self.outputs = {'Out': self.inputs['X'].any(axis=1)}
def test_check_output(self):
self.check_output()
class TestAnyOpWithKeepDim(OpTest):
def setUp(self):
self.op_type = "reduce_any"
self.inputs = {'X': np.random.randint(0, 2, (5, 6, 10)).astype("bool")}
self.attrs = {'dim': [1], 'keep_dim': True}
self.outputs = {
'Out': np.expand_dims(
self.inputs['X'].any(axis=1), axis=1)
}
def test_check_output(self):
self.check_output()
class Test1DReduce(OpTest):
def setUp(self):
self.op_type = "reduce_sum"
self.inputs = {'X': np.random.random(120).astype("float64")}
self.outputs = {'Out': self.inputs['X'].sum(axis=0)}
def test_check_output(self):
self.check_output()
def test_check_grad(self):
self.check_grad(['X'], 'Out')
class Test2DReduce0(Test1DReduce):
def setUp(self):
self.op_type = "reduce_sum"
self.attrs = {'dim': [0]}
self.inputs = {'X': np.random.random((20, 10)).astype("float64")}
self.outputs = {'Out': self.inputs['X'].sum(axis=0)}
class Test2DReduce1(Test1DReduce):
def setUp(self):
self.op_type = "reduce_sum"
self.attrs = {'dim': [1]}
self.inputs = {'X': np.random.random((20, 10)).astype("float64")}
self.outputs = {
'Out': self.inputs['X'].sum(axis=tuple(self.attrs['dim']))
}
class Test3DReduce0(Test1DReduce):
def setUp(self):
self.op_type = "reduce_sum"
self.attrs = {'dim': [1]}
self.inputs = {'X': np.random.random((5, 6, 7)).astype("float64")}
self.outputs = {
'Out': self.inputs['X'].sum(axis=tuple(self.attrs['dim']))
}
class Test3DReduce1(Test1DReduce):
def setUp(self):
self.op_type = "reduce_sum"
self.attrs = {'dim': [2]}
self.inputs = {'X': np.random.random((5, 6, 7)).astype("float64")}
self.outputs = {
'Out': self.inputs['X'].sum(axis=tuple(self.attrs['dim']))
}
class Test3DReduce2(Test1DReduce):
def setUp(self):
self.op_type = "reduce_sum"
self.attrs = {'dim': [-2]}
self.inputs = {'X': np.random.random((5, 6, 7)).astype("float64")}
self.outputs = {
'Out': self.inputs['X'].sum(axis=tuple(self.attrs['dim']))
}
class Test3DReduce3(Test1DReduce):
def setUp(self):
self.op_type = "reduce_sum"
self.attrs = {'dim': [1, 2]}
self.inputs = {'X': np.random.random((5, 6, 7)).astype("float64")}
self.outputs = {
'Out': self.inputs['X'].sum(axis=tuple(self.attrs['dim']))
}
class TestKeepDimReduce(Test1DReduce):
def setUp(self):
self.op_type = "reduce_sum"
self.inputs = {'X': np.random.random((5, 6, 10)).astype("float64")}
self.attrs = {'dim': [1], 'keep_dim': True}
self.outputs = {
'Out': self.inputs['X'].sum(axis=tuple(self.attrs['dim']),
keepdims=self.attrs['keep_dim'])
}
class TestReduceAll(Test1DReduce):
def setUp(self):
self.op_type = "reduce_sum"
self.inputs = {'X': np.random.random((5, 6, 2, 10)).astype("float64")}
self.attrs = {'reduce_all': True}
self.outputs = {'Out': self.inputs['X'].sum()}
## reduction in multi dims
class TestReduceMeanOpMultiAxises(OpTest):
def setUp(self):
self.op_type = "reduce_mean"
self.inputs = {'X': np.random.random((5, 6, 2, 10)).astype("float64")}
self.attrs = {'dim': [1, 2]}
self.outputs = {'Out': self.inputs['X'].mean(axis=(1, 2))}
def test_check_output(self):
self.check_output()
def test_check_grad(self):
self.check_grad(['X'], 'Out')
class TestReduceMaxOpMultiAxises(OpTest):
"""Remove Max with subgradient from gradient check to confirm the success of CI."""
def setUp(self):
self.op_type = "reduce_max"
self.inputs = {'X': np.random.random((5, 6, 10)).astype("float64")}
self.attrs = {'dim': [-2, -1]}
self.outputs = {
'Out': self.inputs['X'].max(axis=tuple(self.attrs['dim']))
}
def test_check_output(self):
self.check_output()
class TestReduceMinOpMultiAxises(OpTest):
"""Remove Min with subgradient from gradient check to confirm the success of CI."""
def setUp(self):
self.op_type = "reduce_min"
self.inputs = {'X': np.random.random((5, 6, 10)).astype("float64")}
self.attrs = {'dim': [1, 2]}
self.outputs = {
'Out': self.inputs['X'].min(axis=tuple(self.attrs['dim']))
}
def test_check_output(self):
self.check_output()
class TestKeepDimReduceSumMultiAxises(OpTest):
def setUp(self):
self.op_type = "reduce_sum"
self.inputs = {'X': np.random.random((5, 6, 10)).astype("float64")}
self.attrs = {'dim': [-2, -1], 'keep_dim': True}
self.outputs = {
'Out':
self.inputs['X'].sum(axis=tuple(self.attrs['dim']), keepdims=True)
}
def test_check_output(self):
self.check_output()
def test_check_grad(self):
self.check_grad(['X'], 'Out')
class TestReduceSumWithDimOne(OpTest):
def setUp(self):
self.op_type = "reduce_sum"
self.inputs = {'X': np.random.random((10, 1, 1)).astype("float64")}
self.attrs = {'dim': [1, 2], 'keep_dim': True}
self.outputs = {
'Out': self.inputs['X'].sum(axis=tuple(self.attrs['dim']),
keepdims=True)
}
def test_check_output(self):
self.check_output()
def test_check_grad(self):
self.check_grad(['X'], 'Out')
class TestReduceSumWithNumelOne(OpTest):
def setUp(self):
self.op_type = "reduce_sum"
self.inputs = {'X': np.random.random((1, 1)).astype("float64")}
self.attrs = {'dim': [1], 'keep_dim': False}
self.outputs = {
'Out': self.inputs['X'].sum(axis=tuple(self.attrs['dim']),
keepdims=False)
}
def test_check_output(self):
self.check_output()
def test_check_grad(self):
self.check_grad(['X'], 'Out')
class TestReduceMeanWithDimOne(OpTest):
def setUp(self):
self.op_type = "reduce_mean"
self.inputs = {'X': np.random.random((10, 1, 1)).astype("float64")}
self.attrs = {'dim': [1], 'keep_dim': False}
self.outputs = {
'Out': self.inputs['X'].mean(
axis=tuple(self.attrs['dim']), keepdims=False)
}
def test_check_output(self):
self.check_output()
def test_check_grad(self):
self.check_grad(['X'], 'Out')
class TestReduceMeanWithNumelOne(OpTest):
def setUp(self):
self.op_type = "reduce_mean"
self.inputs = {'X': np.random.random((1, 1)).astype("float64")}
self.attrs = {'dim': [1], 'keep_dim': True}
self.outputs = {
'Out': self.inputs['X'].mean(
axis=tuple(self.attrs['dim']), keepdims=True)
}
def test_check_output(self):
self.check_output()
def test_check_grad(self):
self.check_grad(['X'], 'Out')
class TestReduceAll(OpTest):
def setUp(self):
self.op_type = "reduce_sum"
self.inputs = {'X': np.random.random((1, 1, 1)).astype("float64")}
self.attrs = {'reduce_all': True, 'keep_dim': False}
self.outputs = {'Out': self.inputs['X'].sum()}
def test_check_output(self):
self.check_output()
def test_check_grad(self):
self.check_grad(['X'], 'Out')
class Test1DReduceWithAxes1(OpTest):
def setUp(self):
self.op_type = "reduce_sum"
self.inputs = {'X': np.random.random(1).astype("float64")}
self.attrs = {'dim': [0], 'keep_dim': False}
self.outputs = {'Out': self.inputs['X'].sum(axis=0)}
def test_check_output(self):
self.check_output()
def test_check_grad(self):
self.check_grad(['X'], 'Out')
class TestReduceSumOpError(unittest.TestCase):
def test_errors(self):
with program_guard(Program(), Program()):
# The input type of reduce_sum_op must be Variable.
x1 = fluid.create_lod_tensor(
np.array([[-1]]), [[1]], fluid.CPUPlace())
self.assertRaises(TypeError, fluid.layers.reduce_sum, x1)
# The input dtype of reduce_sum_op must be float32 or float64 or int32 or int64.
x2 = fluid.layers.data(name='x2', shape=[4], dtype="uint8")
self.assertRaises(TypeError, fluid.layers.reduce_sum, x2)
class TestReduceMeanOpError(unittest.TestCase):
def test_errors(self):
with program_guard(Program(), Program()):
# The input type of reduce_mean_op must be Variable.
x1 = fluid.create_lod_tensor(
np.array([[-1]]), [[1]], fluid.CPUPlace())
self.assertRaises(TypeError, fluid.layers.reduce_mean, x1)
# The input dtype of reduce_mean_op must be float32 or float64 or int32 or int64.
x2 = fluid.layers.data(name='x2', shape=[4], dtype="uint8")
self.assertRaises(TypeError, fluid.layers.reduce_mean, x2)
if __name__ == '__main__':
unittest.main()
| 31.257336 | 94 | 0.583014 |
27f0d397ac683c61ef2e8f9f0ed531f2db303881
| 984 |
py
|
Python
|
metrics/sql2text/evaluator.py
|
HKUNLP/UnifiedSKG
|
49a2ff950bb312b980c22ad72b11520db72ab6a3
|
[
"Apache-2.0"
] | 191 |
2021-12-14T11:33:09.000Z
|
2022-03-31T09:20:41.000Z
|
metrics/sql2text/evaluator.py
|
HKUNLP/UnifiedSKG
|
49a2ff950bb312b980c22ad72b11520db72ab6a3
|
[
"Apache-2.0"
] | 7 |
2022-01-20T05:41:51.000Z
|
2022-03-20T06:43:22.000Z
|
metrics/sql2text/evaluator.py
|
HKUNLP/UnifiedSKG
|
49a2ff950bb312b980c22ad72b11520db72ab6a3
|
[
"Apache-2.0"
] | 22 |
2021-12-14T12:59:42.000Z
|
2022-03-29T03:45:51.000Z
|
# encoding=utf8
from third_party.BLEC.Spider import BLECSpider
class EvaluateTool(object):
def __init__(self, args):
self.args = args
self.blec_test = BLECSpider(template_path="third_party/BLEC/template_to_names_test.json")
self.blec_dev = BLECSpider(template_path="third_party/BLEC/template_to_names_dev.json")
def evaluate(self, preds, golds, section):
if section == 'test':
blec = self.blec_test
elif section == 'dev':
blec = self.blec_dev
else:
raise ValueError()
summary = {}
sqls = [item["text_in"] for item in golds]
assert len(preds) == len(sqls)
true = []
for sql, pred in zip(sqls, preds):
if len(blec.evaluate(pred, sql)) == 0:
true.append(1)
else:
true.append(0)
summary["blec"] = 1.0 * sum(true) / len(preds)
return summary
| 28.114286 | 98 | 0.555894 |
7e2455538bbfba7ac7180462d8f7ca287056b352
| 422 |
py
|
Python
|
py_selenim/sel21_cookies.py
|
MdNazmul9/PYTHON_CODE_ALL
|
75046943f1bb6b4a010955b23bfe3f01cd08a473
|
[
"MIT"
] | null | null | null |
py_selenim/sel21_cookies.py
|
MdNazmul9/PYTHON_CODE_ALL
|
75046943f1bb6b4a010955b23bfe3f01cd08a473
|
[
"MIT"
] | null | null | null |
py_selenim/sel21_cookies.py
|
MdNazmul9/PYTHON_CODE_ALL
|
75046943f1bb6b4a010955b23bfe3f01cd08a473
|
[
"MIT"
] | null | null | null |
from selenium import webdriver
driver = webdriver.Chrome()
driver.get("https://www.amazon.in/")
cookies = driver.get_cookies()
print(len(cookies))
print(cookies)
cookie = {'name':'MyCookie','value':'12346790'}
driver.add_cookie(cookie)
cookies = driver.get_cookies()
print(len(cookies))
print(cookies)
driver.delete_cookie('MyCookie')
cookies = driver.get_cookies()
print(len(cookies))
print(cookies)
| 22.210526 | 48 | 0.725118 |
e3e7baed8a2024acfef94c8726d5d7957b127e2a
| 2,593 |
py
|
Python
|
deep_rl/utils/logger.py
|
Louis-Bagot/DeepRL
|
0b152c52bbba90362c8276c223fee3f9a464eb32
|
[
"MIT"
] | null | null | null |
deep_rl/utils/logger.py
|
Louis-Bagot/DeepRL
|
0b152c52bbba90362c8276c223fee3f9a464eb32
|
[
"MIT"
] | null | null | null |
deep_rl/utils/logger.py
|
Louis-Bagot/DeepRL
|
0b152c52bbba90362c8276c223fee3f9a464eb32
|
[
"MIT"
] | null | null | null |
#######################################################################
# Copyright (C) 2017 Shangtong Zhang([email protected]) #
# Permission given to modify the code as long as you keep this #
# declaration at the top #
#######################################################################
from tensorboardX import SummaryWriter
import os
import numpy as np
import torch
import logging
logging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s: %(message)s')
from .misc import *
local = False
LOGDIR = './' if local else '/project/'
def get_logger(tag='default', log_level=0):
logger = logging.getLogger()
logger.setLevel(logging.INFO)
if tag is not None:
fh = logging.FileHandler(LOGDIR+'log/%s-%s.txt' % (tag, get_time_str()))
fh.setFormatter(logging.Formatter('%(asctime)s - %(name)s - %(levelname)s: %(message)s'))
fh.setLevel(logging.INFO)
logger.addHandler(fh)
return Logger(logger, LOGDIR+'tf_log/logger-%s-%s' % (tag, get_time_str()), log_level)
class Logger(object):
def __init__(self, vanilla_logger, log_dir, log_level=0):
self.log_level = log_level
self.writer = None
if vanilla_logger is not None:
self.info = vanilla_logger.info
self.debug = vanilla_logger.debug
self.warning = vanilla_logger.warning
self.all_steps = {}
self.log_dir = log_dir
def lazy_init_writer(self):
if self.writer is None:
self.writer = SummaryWriter(self.log_dir)
def to_numpy(self, v):
if isinstance(v, torch.Tensor):
v = v.cpu().detach().numpy()
return v
def get_step(self, tag):
if tag not in self.all_steps:
self.all_steps[tag] = 0
step = self.all_steps[tag]
self.all_steps[tag] += 1
return step
def add_scalar(self, tag, value, step=None, log_level=0):
self.lazy_init_writer()
if log_level > self.log_level:
return
value = self.to_numpy(value)
if step is None:
step = self.get_step(tag)
if np.isscalar(value):
value = np.asarray([value])
self.writer.add_scalar(tag, value, step)
def add_histogram(self, tag, values, step=None, log_level=0):
self.lazy_init_writer()
if log_level > self.log_level:
return
values = self.to_numpy(values)
if step is None:
step = self.get_step(tag)
self.writer.add_histogram(tag, values, step)
| 34.118421 | 97 | 0.577324 |
640739f65e05b15fb844614796c5327c7e4025c3
| 140 |
py
|
Python
|
whilesimple.py
|
Yeeeeeeeha/123
|
9e16c8edc32008e7567bf6b6747bfa2b9e734e1a
|
[
"MIT"
] | null | null | null |
whilesimple.py
|
Yeeeeeeeha/123
|
9e16c8edc32008e7567bf6b6747bfa2b9e734e1a
|
[
"MIT"
] | null | null | null |
whilesimple.py
|
Yeeeeeeeha/123
|
9e16c8edc32008e7567bf6b6747bfa2b9e734e1a
|
[
"MIT"
] | null | null | null |
# цикл - while
answer = None
while answer != 'Хорошо':
answer = input('Как дела?')
print('Рассказать анекдот')
print('Все ок.')
| 12.727273 | 31 | 0.614286 |
9feac8f6d395929b12d07bde93240217d81357ff
| 1,366 |
py
|
Python
|
gui/kivy/uix/dialogs/question.py
|
Durendal/electrum-rubycoin
|
a3c55806c29a2c4c846ebefefd57e47b9a102af9
|
[
"MIT"
] | null | null | null |
gui/kivy/uix/dialogs/question.py
|
Durendal/electrum-rubycoin
|
a3c55806c29a2c4c846ebefefd57e47b9a102af9
|
[
"MIT"
] | null | null | null |
gui/kivy/uix/dialogs/question.py
|
Durendal/electrum-rubycoin
|
a3c55806c29a2c4c846ebefefd57e47b9a102af9
|
[
"MIT"
] | null | null | null |
from kivy.app import App
from kivy.factory import Factory
from kivy.properties import ObjectProperty
from kivy.lang import Builder
from kivy.uix.checkbox import CheckBox
from kivy.uix.label import Label
from kivy.uix.widget import Widget
from electrum_rubycoin_gui.kivy.i18n import _
Builder.load_string('''
<Question@Popup>
id: popup
title: ''
message: ''
size_hint: 0.8, 0.5
pos_hint: {'top':0.9}
BoxLayout:
orientation: 'vertical'
Label:
id: label
text: root.message
text_size: self.width, None
Widget:
size_hint: 1, 0.1
BoxLayout:
orientation: 'horizontal'
size_hint: 1, 0.2
Button:
text: _('No')
size_hint: 0.5, None
height: '48dp'
on_release:
root.callback(False)
popup.dismiss()
Button:
text: _('Yes')
size_hint: 0.5, None
height: '48dp'
on_release:
root.callback(True)
popup.dismiss()
''')
class Question(Factory.Popup):
def __init__(self, msg, callback):
Factory.Popup.__init__(self)
self.title = _('Question')
self.message = msg
self.callback = callback
| 25.296296 | 45 | 0.537335 |
560f7145f862e6de8737ab29d9a455cf64e9fc4b
| 4,342 |
py
|
Python
|
src/api/resources/Resource.py
|
girardinsamuel/api
|
6e4ec0a991fb2df1a9e23e8676a93bebfd0b53d5
|
[
"MIT"
] | 12 |
2018-09-23T02:37:57.000Z
|
2021-11-08T08:50:56.000Z
|
src/api/resources/Resource.py
|
girardinsamuel/api
|
6e4ec0a991fb2df1a9e23e8676a93bebfd0b53d5
|
[
"MIT"
] | 12 |
2018-10-01T17:10:59.000Z
|
2021-04-25T16:29:14.000Z
|
src/api/resources/Resource.py
|
girardinsamuel/api
|
6e4ec0a991fb2df1a9e23e8676a93bebfd0b53d5
|
[
"MIT"
] | 8 |
2018-09-23T08:08:07.000Z
|
2021-04-25T18:37:05.000Z
|
import json
from masonite.request import Request
from masonite.routes import BaseHttpRoute
from ..exceptions import (ApiNotAuthenticated, ExpiredToken, InvalidToken,
NoApiTokenFound, PermissionScopeDenied,
RateLimitReached)
class Resource(BaseHttpRoute):
"""Resource class that will use a similar structure as a Route class.
"""
model = None
methods = ['create', 'index', 'show', 'update', 'delete']
prefix = '/api'
required_domain = None
without = []
def __init__(self, url=None, method_type='GET'):
self.list_middleware = []
self.route_url = url
self.method_type = method_type
self.named_route = None
self.model.__hidden__ = self.without
if url and method_type:
self._compiled_url = self.compile_route_to_regex()
def routes(self):
routes = []
if 'create' in self.methods:
routes.append(self.__class__(self.route_url, 'POST').middleware(*self.list_middleware))
if 'index' in self.methods:
routes.append(self.__class__(self.route_url, 'GET').middleware(*self.list_middleware))
if 'show' in self.methods:
routes.append(self.__class__(self.route_url + '/@id', 'GET').middleware(*self.list_middleware))
if 'update' in self.methods:
routes.append(self.__class__(self.route_url + '/@id', 'PUT').middleware(*self.list_middleware))
if 'delete' in self.methods:
routes.append(self.__class__(self.route_url + '/@id', 'DELETE').middleware(*self.list_middleware))
return routes
def get_response(self):
"""Gets the response that should be returned from this resource
"""
response = None
if hasattr(self, 'authenticate'):
# Get a response from the authentication method if one exists
response = self.run_authentication()
if hasattr(self, 'scope'):
# Get a response from the authentication method if one exists
if not response:
response = self.run_scope()
# If the authenticate method did not return a response, continue on to one of the CRUD responses
if not response:
if 'POST' in self.method_type:
response = self.request.app().resolve(getattr(self, 'create'))
elif 'GET' in self.method_type and '@' in self.route_url:
response = self.request.app().resolve(getattr(self, 'show'))
elif 'GET' in self.method_type:
response = self.request.app().resolve(getattr(self, 'index'))
elif 'PUT' in self.method_type or 'PATCH' in self.method_type:
response = self.request.app().resolve(getattr(self, 'update'))
elif 'DELETE' in self.method_type:
response = self.request.app().resolve(getattr(self, 'delete'))
# If the resource needs it's own serializer method
if hasattr(self, 'serialize'):
response = self.serialize(response)
# If the resource needs it's own serializer method
if hasattr(self, 'filter'):
response = self.filter(response)
return response
def load_request(self, request):
self.request = request
return self
def create(self):
"""Logic to create data from a given model
"""
try:
record = self.model.create(self.request.all())
except Exception as e:
return {'error': str(e)}
return record
def index(self):
"""Logic to read data from several models
"""
return self.model.all()
def show(self, request: Request):
"""Logic to read data from 1 model
"""
return self.model.find(request.param('id'))
def update(self, request: Request):
"""Logic to update data from a given model
"""
record = self.model.find(request.param('id'))
record.update(request.all())
return record
def delete(self, request: Request):
"""Logic to delete data from a given model
"""
record = self.model.find(request.param('id'))
if record:
record.delete()
return record
return {'error': 'Record does not exist'}
| 35.884298 | 110 | 0.603178 |
42bad0b2a19169902438dd8d815c988ae5f2cdf3
| 158,123 |
py
|
Python
|
sympy/solvers/tests/test_ode.py
|
GaurangTandon/sympy
|
e3b2c4e302c05d4a438fdebb49d348010b71c882
|
[
"BSD-3-Clause"
] | 1 |
2020-04-09T14:19:51.000Z
|
2020-04-09T14:19:51.000Z
|
sympy/solvers/tests/test_ode.py
|
mohitacecode/sympy
|
1e536959853e86f924855923de70df52e377d991
|
[
"BSD-3-Clause"
] | null | null | null |
sympy/solvers/tests/test_ode.py
|
mohitacecode/sympy
|
1e536959853e86f924855923de70df52e377d991
|
[
"BSD-3-Clause"
] | null | null | null |
from sympy import (acos, acosh, asinh, atan, cos, Derivative, diff, dsolve,
Dummy, Eq, Ne, erf, erfi, exp, Function, I, Integral, LambertW, log, O, pi,
Rational, rootof, S, simplify, sin, sqrt, Subs, Symbol, tan, asin, sinh,
Piecewise, symbols, Poly, sec, Ei)
from sympy.solvers.ode import (_undetermined_coefficients_match,
checkodesol, classify_ode, classify_sysode, constant_renumber,
constantsimp, homogeneous_order, infinitesimals, checkinfsol,
checksysodesol, solve_ics, dsolve, get_numbered_constants)
from sympy.solvers.deutils import ode_order
from sympy.utilities.pytest import XFAIL, skip, raises, slow, ON_TRAVIS
C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10 = symbols('C0:11')
u, x, y, z = symbols('u,x:z', real=True)
f = Function('f')
g = Function('g')
h = Function('h')
# Note: the tests below may fail (but still be correct) if ODE solver,
# the integral engine, solve(), or even simplify() changes. Also, in
# differently formatted solutions, the arbitrary constants might not be
# equal. Using specific hints in tests can help to avoid this.
# Tests of order higher than 1 should run the solutions through
# constant_renumber because it will normalize it (constant_renumber causes
# dsolve() to return different results on different machines)
def test_linear_2eq_order1():
x, y, z = symbols('x, y, z', cls=Function)
k, l, m, n = symbols('k, l, m, n', Integer=True)
t = Symbol('t')
x0, y0 = symbols('x0, y0', cls=Function)
eq1 = (Eq(diff(x(t),t), 9*y(t)), Eq(diff(y(t),t), 12*x(t)))
sol1 = [Eq(x(t), 9*C1*exp(6*sqrt(3)*t) + 9*C2*exp(-6*sqrt(3)*t)), \
Eq(y(t), 6*sqrt(3)*C1*exp(6*sqrt(3)*t) - 6*sqrt(3)*C2*exp(-6*sqrt(3)*t))]
assert checksysodesol(eq1, sol1) == (True, [0, 0])
eq2 = (Eq(diff(x(t),t), 2*x(t) + 4*y(t)), Eq(diff(y(t),t), 12*x(t) + 41*y(t)))
sol2 = [Eq(x(t), 4*C1*exp(t*(sqrt(1713)/2 + S(43)/2)) + 4*C2*exp(t*(-sqrt(1713)/2 + S(43)/2))), \
Eq(y(t), C1*(S(39)/2 + sqrt(1713)/2)*exp(t*(sqrt(1713)/2 + S(43)/2)) + \
C2*(-sqrt(1713)/2 + S(39)/2)*exp(t*(-sqrt(1713)/2 + S(43)/2)))]
assert checksysodesol(eq2, sol2) == (True, [0, 0])
eq3 = (Eq(diff(x(t),t), x(t) + y(t)), Eq(diff(y(t),t), -2*x(t) + 2*y(t)))
sol3 = [Eq(x(t), (C1*cos(sqrt(7)*t/2) + C2*sin(sqrt(7)*t/2))*exp(3*t/2)), \
Eq(y(t), (C1*(-sqrt(7)*sin(sqrt(7)*t/2)/2 + cos(sqrt(7)*t/2)/2) + \
C2*(sin(sqrt(7)*t/2)/2 + sqrt(7)*cos(sqrt(7)*t/2)/2))*exp(3*t/2))]
assert checksysodesol(eq3, sol3) == (True, [0, 0])
eq4 = (Eq(diff(x(t),t), x(t) + y(t) + 9), Eq(diff(y(t),t), 2*x(t) + 5*y(t) + 23))
sol4 = [Eq(x(t), C1*exp(t*(sqrt(6) + 3)) + C2*exp(t*(-sqrt(6) + 3)) - S(22)/3), \
Eq(y(t), C1*(2 + sqrt(6))*exp(t*(sqrt(6) + 3)) + C2*(-sqrt(6) + 2)*exp(t*(-sqrt(6) + 3)) - S(5)/3)]
assert checksysodesol(eq4, sol4) == (True, [0, 0])
eq5 = (Eq(diff(x(t),t), x(t) + y(t) + 81), Eq(diff(y(t),t), -2*x(t) + y(t) + 23))
sol5 = [Eq(x(t), (C1*cos(sqrt(2)*t) + C2*sin(sqrt(2)*t))*exp(t) - S(58)/3), \
Eq(y(t), (-sqrt(2)*C1*sin(sqrt(2)*t) + sqrt(2)*C2*cos(sqrt(2)*t))*exp(t) - S(185)/3)]
assert checksysodesol(eq5, sol5) == (True, [0, 0])
eq6 = (Eq(diff(x(t),t), 5*t*x(t) + 2*y(t)), Eq(diff(y(t),t), 2*x(t) + 5*t*y(t)))
sol6 = [Eq(x(t), (C1*exp(2*t) + C2*exp(-2*t))*exp(S(5)/2*t**2)), \
Eq(y(t), (C1*exp(2*t) - C2*exp(-2*t))*exp(S(5)/2*t**2))]
s = dsolve(eq6)
assert checksysodesol(eq6, sol6) == (True, [0, 0])
eq7 = (Eq(diff(x(t),t), 5*t*x(t) + t**2*y(t)), Eq(diff(y(t),t), -t**2*x(t) + 5*t*y(t)))
sol7 = [Eq(x(t), (C1*cos((t**3)/3) + C2*sin((t**3)/3))*exp(S(5)/2*t**2)), \
Eq(y(t), (-C1*sin((t**3)/3) + C2*cos((t**3)/3))*exp(S(5)/2*t**2))]
assert checksysodesol(eq7, sol7) == (True, [0, 0])
eq8 = (Eq(diff(x(t),t), 5*t*x(t) + t**2*y(t)), Eq(diff(y(t),t), -t**2*x(t) + (5*t+9*t**2)*y(t)))
sol8 = [Eq(x(t), (C1*exp((sqrt(77)/2 + S(9)/2)*(t**3)/3) + \
C2*exp((-sqrt(77)/2 + S(9)/2)*(t**3)/3))*exp(S(5)/2*t**2)), \
Eq(y(t), (C1*(sqrt(77)/2 + S(9)/2)*exp((sqrt(77)/2 + S(9)/2)*(t**3)/3) + \
C2*(-sqrt(77)/2 + S(9)/2)*exp((-sqrt(77)/2 + S(9)/2)*(t**3)/3))*exp(S(5)/2*t**2))]
assert checksysodesol(eq8, sol8) == (True, [0, 0])
eq10 = (Eq(diff(x(t),t), 5*t*x(t) + t**2*y(t)), Eq(diff(y(t),t), (1-t**2)*x(t) + (5*t+9*t**2)*y(t)))
sol10 = [Eq(x(t), C1*x0(t) + C2*x0(t)*Integral(t**2*exp(Integral(5*t, t))*exp(Integral(9*t**2 + 5*t, t))/x0(t)**2, t)), \
Eq(y(t), C1*y0(t) + C2*(y0(t)*Integral(t**2*exp(Integral(5*t, t))*exp(Integral(9*t**2 + 5*t, t))/x0(t)**2, t) + \
exp(Integral(5*t, t))*exp(Integral(9*t**2 + 5*t, t))/x0(t)))]
s = dsolve(eq10)
assert s == sol10 # too complicated to test with subs and simplify
def test_linear_2eq_order1_nonhomog_linear():
e = [Eq(diff(f(x), x), f(x) + g(x) + 5*x),
Eq(diff(g(x), x), f(x) - g(x))]
raises(NotImplementedError, lambda: dsolve(e))
def test_linear_2eq_order1_nonhomog():
# Note: once implemented, add some tests esp. with resonance
e = [Eq(diff(f(x), x), f(x) + exp(x)),
Eq(diff(g(x), x), f(x) + g(x) + x*exp(x))]
raises(NotImplementedError, lambda: dsolve(e))
def test_linear_2eq_order1_type2_degen():
e = [Eq(diff(f(x), x), f(x) + 5),
Eq(diff(g(x), x), f(x) + 7)]
s1 = [Eq(f(x), C1*exp(x) - 5), Eq(g(x), C1*exp(x) - C2 + 2*x - 5)]
assert checksysodesol(e, s1) == (True, [0, 0])
def test_dsolve_linear_2eq_order1_diag_triangular():
e = [Eq(diff(f(x), x), f(x)),
Eq(diff(g(x), x), g(x))]
s1 = [Eq(f(x), C1*exp(x)), Eq(g(x), C2*exp(x))]
assert checksysodesol(e, s1) == (True, [0, 0])
e = [Eq(diff(f(x), x), 2*f(x)),
Eq(diff(g(x), x), 3*f(x) + 7*g(x))]
s1 = [Eq(f(x), -5*C2*exp(2*x)),
Eq(g(x), 5*C1*exp(7*x) + 3*C2*exp(2*x))]
assert checksysodesol(e, s1) == (True, [0, 0])
def test_sysode_linear_2eq_order1_type1_D_lt_0():
e = [Eq(diff(f(x), x), -9*I*f(x) - 4*g(x)),
Eq(diff(g(x), x), -4*I*g(x))]
s1 = [Eq(f(x), -4*C1*exp(-4*I*x) - 4*C2*exp(-9*I*x)), \
Eq(g(x), 5*I*C1*exp(-4*I*x))]
assert checksysodesol(e, s1) == (True, [0, 0])
def test_sysode_linear_2eq_order1_type1_D_lt_0_b_eq_0():
e = [Eq(diff(f(x), x), -9*I*f(x)),
Eq(diff(g(x), x), -4*I*g(x))]
s1 = [Eq(f(x), -5*I*C2*exp(-9*I*x)), Eq(g(x), 5*I*C1*exp(-4*I*x))]
assert checksysodesol(e, s1) == (True, [0, 0])
def test_sysode_linear_2eq_order1_many_zeros():
t = Symbol('t')
corner_cases = [(0, 0, 0, 0), (1, 0, 0, 0), (0, 1, 0, 0),
(0, 0, 1, 0), (0, 0, 0, 1), (1, 0, 0, I),
(I, 0, 0, -I), (0, I, 0, 0), (0, I, I, 0)]
s1 = [[Eq(f(t), C1), Eq(g(t), C2)],
[Eq(f(t), C1*exp(t)), Eq(g(t), -C2)],
[Eq(f(t), C1 + C2*t), Eq(g(t), C2)],
[Eq(f(t), C2), Eq(g(t), C1 + C2*t)],
[Eq(f(t), -C2), Eq(g(t), C1*exp(t))],
[Eq(f(t), C1*(1 - I)*exp(t)), Eq(g(t), C2*(-1 + I)*exp(I*t))],
[Eq(f(t), 2*I*C1*exp(I*t)), Eq(g(t), -2*I*C2*exp(-I*t))],
[Eq(f(t), I*C1 + I*C2*t), Eq(g(t), C2)],
[Eq(f(t), I*C1*exp(I*t) + I*C2*exp(-I*t)), \
Eq(g(t), I*C1*exp(I*t) - I*C2*exp(-I*t))]
]
for r, sol in zip(corner_cases, s1):
eq = [Eq(diff(f(t), t), r[0]*f(t) + r[1]*g(t)),
Eq(diff(g(t), t), r[2]*f(t) + r[3]*g(t))]
assert checksysodesol(eq, sol) == (True, [0, 0])
def test_dsolve_linsystem_symbol_piecewise():
u = Symbol('u') # XXX it's more complicated with real u
eq = (Eq(diff(f(x), x), 2*f(x) + g(x)),
Eq(diff(g(x), x), u*f(x)))
s1 = [Eq(f(x), Piecewise((C1*exp(x*(sqrt(4*u + 4)/2 + 1)) +
C2*exp(x*(-sqrt(4*u + 4)/2 + 1)), Ne(4*u + 4, 0)), ((C1 + C2*(x +
Piecewise((0, Eq(sqrt(4*u + 4)/2 + 1, 2)), (1/(-sqrt(4*u + 4)/2 + 1),
True))))*exp(x*(sqrt(4*u + 4)/2 + 1)), True))), Eq(g(x),
Piecewise((C1*(sqrt(4*u + 4)/2 - 1)*exp(x*(sqrt(4*u + 4)/2 + 1)) +
C2*(-sqrt(4*u + 4)/2 - 1)*exp(x*(-sqrt(4*u + 4)/2 + 1)), Ne(4*u + 4,
0)), ((C1*(sqrt(4*u + 4)/2 - 1) + C2*(x*(sqrt(4*u + 4)/2 - 1) +
Piecewise((1, Eq(sqrt(4*u + 4)/2 + 1, 2)), (0,
True))))*exp(x*(sqrt(4*u + 4)/2 + 1)), True)))]
s = dsolve(eq)
assert s == s1
s = [(l.lhs, l.rhs) for l in s]
for v in [0, 7, -42, 5*I, 3 + 4*I]:
assert eq[0].subs(s).subs(u, v).doit().simplify()
assert eq[1].subs(s).subs(u, v).doit().simplify()
# example from https://groups.google.com/d/msg/sympy/xmzoqW6tWaE/sf0bgQrlCgAJ
i, r1, c1, r2, c2, t = symbols('i, r1, c1, r2, c2, t')
x1 = Function('x1')
x2 = Function('x2')
eq1 = r1*c1*Derivative(x1(t), t) + x1(t) - x2(t) - r1*i
eq2 = r2*c1*Derivative(x1(t), t) + r2*c2*Derivative(x2(t), t) + x2(t) - r2*i
sol = dsolve((eq1, eq2))
# it's a complicated formula, was previously a TypeError
assert all(s.has(Piecewise) for s in sol)
def test_linear_2eq_order2():
x, y, z = symbols('x, y, z', cls=Function)
k, l, m, n = symbols('k, l, m, n', Integer=True)
t, l = symbols('t, l')
x0, y0 = symbols('x0, y0', cls=Function)
eq1 = (Eq(diff(x(t),t,t), 5*x(t) + 43*y(t)), Eq(diff(y(t),t,t), x(t) + 9*y(t)))
sol1 = [Eq(x(t), 43*C1*exp(t*rootof(l**4 - 14*l**2 + 2, 0)) + 43*C2*exp(t*rootof(l**4 - 14*l**2 + 2, 1)) + \
43*C3*exp(t*rootof(l**4 - 14*l**2 + 2, 2)) + 43*C4*exp(t*rootof(l**4 - 14*l**2 + 2, 3))), \
Eq(y(t), C1*(rootof(l**4 - 14*l**2 + 2, 0)**2 - 5)*exp(t*rootof(l**4 - 14*l**2 + 2, 0)) + \
C2*(rootof(l**4 - 14*l**2 + 2, 1)**2 - 5)*exp(t*rootof(l**4 - 14*l**2 + 2, 1)) + \
C3*(rootof(l**4 - 14*l**2 + 2, 2)**2 - 5)*exp(t*rootof(l**4 - 14*l**2 + 2, 2)) + \
C4*(rootof(l**4 - 14*l**2 + 2, 3)**2 - 5)*exp(t*rootof(l**4 - 14*l**2 + 2, 3)))]
assert dsolve(eq1) == sol1
eq2 = (Eq(diff(x(t),t,t), 8*x(t)+3*y(t)+31), Eq(diff(y(t),t,t), 9*x(t)+7*y(t)+12))
sol2 = [Eq(x(t), 3*C1*exp(t*rootof(l**4 - 15*l**2 + 29, 0)) + 3*C2*exp(t*rootof(l**4 - 15*l**2 + 29, 1)) + \
3*C3*exp(t*rootof(l**4 - 15*l**2 + 29, 2)) + 3*C4*exp(t*rootof(l**4 - 15*l**2 + 29, 3)) - S(181)/29), \
Eq(y(t), C1*(rootof(l**4 - 15*l**2 + 29, 0)**2 - 8)*exp(t*rootof(l**4 - 15*l**2 + 29, 0)) + \
C2*(rootof(l**4 - 15*l**2 + 29, 1)**2 - 8)*exp(t*rootof(l**4 - 15*l**2 + 29, 1)) + \
C3*(rootof(l**4 - 15*l**2 + 29, 2)**2 - 8)*exp(t*rootof(l**4 - 15*l**2 + 29, 2)) + \
C4*(rootof(l**4 - 15*l**2 + 29, 3)**2 - 8)*exp(t*rootof(l**4 - 15*l**2 + 29, 3)) + S(183)/29)]
assert dsolve(eq2) == sol2
eq3 = (Eq(diff(x(t),t,t) - 9*diff(y(t),t) + 7*x(t),0), Eq(diff(y(t),t,t) + 9*diff(x(t),t) + 7*y(t),0))
sol3 = [Eq(x(t), C1*cos(t*(S(9)/2 + sqrt(109)/2)) + C2*sin(t*(S(9)/2 + sqrt(109)/2)) + C3*cos(t*(-sqrt(109)/2 + S(9)/2)) + \
C4*sin(t*(-sqrt(109)/2 + S(9)/2))), Eq(y(t), -C1*sin(t*(S(9)/2 + sqrt(109)/2)) + C2*cos(t*(S(9)/2 + sqrt(109)/2)) - \
C3*sin(t*(-sqrt(109)/2 + S(9)/2)) + C4*cos(t*(-sqrt(109)/2 + S(9)/2)))]
assert dsolve(eq3) == sol3
eq4 = (Eq(diff(x(t),t,t), 9*t*diff(y(t),t)-9*y(t)), Eq(diff(y(t),t,t),7*t*diff(x(t),t)-7*x(t)))
sol4 = [Eq(x(t), C3*t + t*Integral((9*C1*exp(3*sqrt(7)*t**2/2) + 9*C2*exp(-3*sqrt(7)*t**2/2))/t**2, t)), \
Eq(y(t), C4*t + t*Integral((3*sqrt(7)*C1*exp(3*sqrt(7)*t**2/2) - 3*sqrt(7)*C2*exp(-3*sqrt(7)*t**2/2))/t**2, t))]
assert dsolve(eq4) == sol4
eq5 = (Eq(diff(x(t),t,t), (log(t)+t**2)*diff(x(t),t)+(log(t)+t**2)*3*diff(y(t),t)), Eq(diff(y(t),t,t), \
(log(t)+t**2)*2*diff(x(t),t)+(log(t)+t**2)*9*diff(y(t),t)))
sol5 = [Eq(x(t), -sqrt(22)*(C1*Integral(exp((-sqrt(22) + 5)*Integral(t**2 + log(t), t)), t) + C2 - \
C3*Integral(exp((sqrt(22) + 5)*Integral(t**2 + log(t), t)), t) - C4 - \
(sqrt(22) + 5)*(C1*Integral(exp((-sqrt(22) + 5)*Integral(t**2 + log(t), t)), t) + C2) + \
(-sqrt(22) + 5)*(C3*Integral(exp((sqrt(22) + 5)*Integral(t**2 + log(t), t)), t) + C4))/88), \
Eq(y(t), -sqrt(22)*(C1*Integral(exp((-sqrt(22) + 5)*Integral(t**2 + log(t), t)), t) + \
C2 - C3*Integral(exp((sqrt(22) + 5)*Integral(t**2 + log(t), t)), t) - C4)/44)]
assert dsolve(eq5) == sol5
eq6 = (Eq(diff(x(t),t,t), log(t)*t*diff(y(t),t) - log(t)*y(t)), Eq(diff(y(t),t,t), log(t)*t*diff(x(t),t) - log(t)*x(t)))
sol6 = [Eq(x(t), C3*t + t*Integral((C1*exp(Integral(t*log(t), t)) + \
C2*exp(-Integral(t*log(t), t)))/t**2, t)), Eq(y(t), C4*t + t*Integral((C1*exp(Integral(t*log(t), t)) - \
C2*exp(-Integral(t*log(t), t)))/t**2, t))]
assert dsolve(eq6) == sol6
eq7 = (Eq(diff(x(t),t,t), log(t)*(t*diff(x(t),t) - x(t)) + exp(t)*(t*diff(y(t),t) - y(t))), \
Eq(diff(y(t),t,t), (t**2)*(t*diff(x(t),t) - x(t)) + (t)*(t*diff(y(t),t) - y(t))))
sol7 = [Eq(x(t), C3*t + t*Integral((C1*x0(t) + C2*x0(t)*Integral(t*exp(t)*exp(Integral(t**2, t))*\
exp(Integral(t*log(t), t))/x0(t)**2, t))/t**2, t)), Eq(y(t), C4*t + t*Integral((C1*y0(t) + \
C2*(y0(t)*Integral(t*exp(t)*exp(Integral(t**2, t))*exp(Integral(t*log(t), t))/x0(t)**2, t) + \
exp(Integral(t**2, t))*exp(Integral(t*log(t), t))/x0(t)))/t**2, t))]
assert dsolve(eq7) == sol7
eq8 = (Eq(diff(x(t),t,t), t*(4*x(t) + 9*y(t))), Eq(diff(y(t),t,t), t*(12*x(t) - 6*y(t))))
sol8 = ("[Eq(x(t), -sqrt(133)*((-sqrt(133) - 1)*(C2*(133*t**8/24 - t**3/6 + sqrt(133)*t**3/2 + 1) + "
"C1*t*(sqrt(133)*t**4/6 - t**3/12 + 1) + O(t**6)) - (-1 + sqrt(133))*(C2*(-sqrt(133)*t**3/6 - t**3/6 + 1) + "
"C1*t*(-sqrt(133)*t**3/12 - t**3/12 + 1) + O(t**6)) - 4*C2*(133*t**8/24 - t**3/6 + sqrt(133)*t**3/2 + 1) + "
"4*C2*(-sqrt(133)*t**3/6 - t**3/6 + 1) - 4*C1*t*(sqrt(133)*t**4/6 - t**3/12 + 1) + "
"4*C1*t*(-sqrt(133)*t**3/12 - t**3/12 + 1) + O(t**6))/3192), Eq(y(t), -sqrt(133)*(-C2*(133*t**8/24 - t**3/6 + "
"sqrt(133)*t**3/2 + 1) + C2*(-sqrt(133)*t**3/6 - t**3/6 + 1) - C1*t*(sqrt(133)*t**4/6 - t**3/12 + 1) + "
"C1*t*(-sqrt(133)*t**3/12 - t**3/12 + 1) + O(t**6))/266)]")
assert str(dsolve(eq8)) == sol8
eq9 = (Eq(diff(x(t),t,t), t*(4*diff(x(t),t) + 9*diff(y(t),t))), Eq(diff(y(t),t,t), t*(12*diff(x(t),t) - 6*diff(y(t),t))))
sol9 = [Eq(x(t), -sqrt(133)*(4*C1*Integral(exp((-sqrt(133) - 1)*Integral(t, t)), t) + 4*C2 - \
4*C3*Integral(exp((-1 + sqrt(133))*Integral(t, t)), t) - 4*C4 - (-1 + sqrt(133))*(C1*Integral(exp((-sqrt(133) - \
1)*Integral(t, t)), t) + C2) + (-sqrt(133) - 1)*(C3*Integral(exp((-1 + sqrt(133))*Integral(t, t)), t) + \
C4))/3192), Eq(y(t), -sqrt(133)*(C1*Integral(exp((-sqrt(133) - 1)*Integral(t, t)), t) + C2 - \
C3*Integral(exp((-1 + sqrt(133))*Integral(t, t)), t) - C4)/266)]
assert dsolve(eq9) == sol9
eq10 = (t**2*diff(x(t),t,t) + 3*t*diff(x(t),t) + 4*t*diff(y(t),t) + 12*x(t) + 9*y(t), \
t**2*diff(y(t),t,t) + 2*t*diff(x(t),t) - 5*t*diff(y(t),t) + 15*x(t) + 8*y(t))
sol10 = [Eq(x(t), -C1*(-2*sqrt(-346/(3*(S(4333)/4 + 5*sqrt(70771857)/36)**(S(1)/3)) + 4 + 2*(S(4333)/4 + \
5*sqrt(70771857)/36)**(S(1)/3)) + 13 + 2*sqrt(-284/sqrt(-346/(3*(S(4333)/4 + 5*sqrt(70771857)/36)**(S(1)/3)) + \
4 + 2*(S(4333)/4 + 5*sqrt(70771857)/36)**(S(1)/3)) - 2*(S(4333)/4 + 5*sqrt(70771857)/36)**(S(1)/3) + 8 + \
346/(3*(S(4333)/4 + 5*sqrt(70771857)/36)**(S(1)/3))))*exp((-sqrt(-346/(3*(S(4333)/4 + 5*sqrt(70771857)/36)**(S(1)/3)) + \
4 + 2*(S(4333)/4 + 5*sqrt(70771857)/36)**(S(1)/3))/2 + 1 + sqrt(-284/sqrt(-346/(3*(S(4333)/4 + \
5*sqrt(70771857)/36)**(S(1)/3)) + 4 + 2*(S(4333)/4 + 5*sqrt(70771857)/36)**(S(1)/3)) - 2*(S(4333)/4 + \
5*sqrt(70771857)/36)**(S(1)/3) + 8 + 346/(3*(S(4333)/4 + 5*sqrt(70771857)/36)**(S(1)/3)))/2)*log(t)) - \
C2*(-2*sqrt(-346/(3*(S(4333)/4 + 5*sqrt(70771857)/36)**(S(1)/3)) + 4 + 2*(S(4333)/4 + 5*sqrt(70771857)/36)**(S(1)/3)) + \
13 - 2*sqrt(-284/sqrt(-346/(3*(S(4333)/4 + 5*sqrt(70771857)/36)**(S(1)/3)) + 4 + 2*(S(4333)/4 + \
5*sqrt(70771857)/36)**(S(1)/3)) - 2*(S(4333)/4 + 5*sqrt(70771857)/36)**(S(1)/3) + 8 + 346/(3*(S(4333)/4 + \
5*sqrt(70771857)/36)**(S(1)/3))))*exp((-sqrt(-346/(3*(S(4333)/4 + 5*sqrt(70771857)/36)**(S(1)/3)) + 4 + \
2*(S(4333)/4 + 5*sqrt(70771857)/36)**(S(1)/3))/2 + 1 - sqrt(-284/sqrt(-346/(3*(S(4333)/4 + 5*sqrt(70771857)/36)**(S(1)/3)) + \
4 + 2*(S(4333)/4 + 5*sqrt(70771857)/36)**(S(1)/3)) - 2*(S(4333)/4 + 5*sqrt(70771857)/36)**(S(1)/3) + 8 + 346/(3*(S(4333)/4 + \
5*sqrt(70771857)/36)**(S(1)/3)))/2)*log(t)) - C3*t**(1 + sqrt(-346/(3*(S(4333)/4 + 5*sqrt(70771857)/36)**(S(1)/3)) + 4 + \
2*(S(4333)/4 + 5*sqrt(70771857)/36)**(S(1)/3))/2 + sqrt(-2*(S(4333)/4 + 5*sqrt(70771857)/36)**(S(1)/3) + 8 + 346/(3*(S(4333)/4 + \
5*sqrt(70771857)/36)**(S(1)/3)) + 284/sqrt(-346/(3*(S(4333)/4 + 5*sqrt(70771857)/36)**(S(1)/3)) + 4 + 2*(S(4333)/4 + \
5*sqrt(70771857)/36)**(S(1)/3)))/2)*(2*sqrt(-346/(3*(S(4333)/4 + 5*sqrt(70771857)/36)**(S(1)/3)) + 4 + 2*(S(4333)/4 + \
5*sqrt(70771857)/36)**(S(1)/3)) + 13 + 2*sqrt(-2*(S(4333)/4 + 5*sqrt(70771857)/36)**(S(1)/3) + 8 + 346/(3*(S(4333)/4 + \
5*sqrt(70771857)/36)**(S(1)/3)) + 284/sqrt(-346/(3*(S(4333)/4 + 5*sqrt(70771857)/36)**(S(1)/3)) + 4 + 2*(S(4333)/4 + \
5*sqrt(70771857)/36)**(S(1)/3)))) - C4*t**(-sqrt(-2*(S(4333)/4 + 5*sqrt(70771857)/36)**(S(1)/3) + 8 + 346/(3*(S(4333)/4 + \
5*sqrt(70771857)/36)**(S(1)/3)) + 284/sqrt(-346/(3*(S(4333)/4 + 5*sqrt(70771857)/36)**(S(1)/3)) + 4 + 2*(S(4333)/4 + \
5*sqrt(70771857)/36)**(S(1)/3)))/2 + 1 + sqrt(-346/(3*(S(4333)/4 + 5*sqrt(70771857)/36)**(S(1)/3)) + 4 + 2*(S(4333)/4 + \
5*sqrt(70771857)/36)**(S(1)/3))/2)*(-2*sqrt(-2*(S(4333)/4 + 5*sqrt(70771857)/36)**(S(1)/3) + 8 + 346/(3*(S(4333)/4 + \
5*sqrt(70771857)/36)**(S(1)/3)) + 284/sqrt(-346/(3*(S(4333)/4 + 5*sqrt(70771857)/36)**(S(1)/3)) + 4 + 2*(S(4333)/4 + \
5*sqrt(70771857)/36)**(S(1)/3))) + 2*sqrt(-346/(3*(S(4333)/4 + 5*sqrt(70771857)/36)**(S(1)/3)) + 4 + 2*(S(4333)/4 + \
5*sqrt(70771857)/36)**(S(1)/3)) + 13)), Eq(y(t), C1*(-sqrt(-346/(3*(S(4333)/4 + 5*sqrt(70771857)/36)**(S(1)/3)) + 4 + \
2*(S(4333)/4 + 5*sqrt(70771857)/36)**(S(1)/3)) + 14 + (-sqrt(-346/(3*(S(4333)/4 + 5*sqrt(70771857)/36)**(S(1)/3)) + 4 + \
2*(S(4333)/4 + 5*sqrt(70771857)/36)**(S(1)/3))/2 + 1 + sqrt(-284/sqrt(-346/(3*(S(4333)/4 + 5*sqrt(70771857)/36)**(S(1)/3)) + \
4 + 2*(S(4333)/4 + 5*sqrt(70771857)/36)**(S(1)/3)) - 2*(S(4333)/4 + 5*sqrt(70771857)/36)**(S(1)/3) + 8 + 346/(3*(S(4333)/4 + \
5*sqrt(70771857)/36)**(S(1)/3)))/2)**2 + sqrt(-284/sqrt(-346/(3*(S(4333)/4 + 5*sqrt(70771857)/36)**(S(1)/3)) + 4 + \
2*(S(4333)/4 + 5*sqrt(70771857)/36)**(S(1)/3)) - 2*(S(4333)/4 + 5*sqrt(70771857)/36)**(S(1)/3) + 8 + 346/(3*(S(4333)/4 + \
5*sqrt(70771857)/36)**(S(1)/3))))*exp((-sqrt(-346/(3*(S(4333)/4 + 5*sqrt(70771857)/36)**(S(1)/3)) + 4 + 2*(S(4333)/4 + \
5*sqrt(70771857)/36)**(S(1)/3))/2 + 1 + sqrt(-284/sqrt(-346/(3*(S(4333)/4 + 5*sqrt(70771857)/36)**(S(1)/3)) + 4 + \
2*(S(4333)/4 + 5*sqrt(70771857)/36)**(S(1)/3)) - 2*(S(4333)/4 + 5*sqrt(70771857)/36)**(S(1)/3) + 8 + 346/(3*(S(4333)/4 + \
5*sqrt(70771857)/36)**(S(1)/3)))/2)*log(t)) + C2*(-sqrt(-346/(3*(S(4333)/4 + 5*sqrt(70771857)/36)**(S(1)/3)) + 4 + \
2*(S(4333)/4 + 5*sqrt(70771857)/36)**(S(1)/3)) + 14 - sqrt(-284/sqrt(-346/(3*(S(4333)/4 + 5*sqrt(70771857)/36)**(S(1)/3)) + \
4 + 2*(S(4333)/4 + 5*sqrt(70771857)/36)**(S(1)/3)) - 2*(S(4333)/4 + 5*sqrt(70771857)/36)**(S(1)/3) + 8 + 346/(3*(S(4333)/4 + \
5*sqrt(70771857)/36)**(S(1)/3))) + (-sqrt(-346/(3*(S(4333)/4 + 5*sqrt(70771857)/36)**(S(1)/3)) + 4 + 2*(S(4333)/4 + \
5*sqrt(70771857)/36)**(S(1)/3))/2 + 1 - sqrt(-284/sqrt(-346/(3*(S(4333)/4 + 5*sqrt(70771857)/36)**(S(1)/3)) + 4 + \
2*(S(4333)/4 + 5*sqrt(70771857)/36)**(S(1)/3)) - 2*(S(4333)/4 + 5*sqrt(70771857)/36)**(S(1)/3) + 8 + 346/(3*(S(4333)/4 + \
5*sqrt(70771857)/36)**(S(1)/3)))/2)**2)*exp((-sqrt(-346/(3*(S(4333)/4 + 5*sqrt(70771857)/36)**(S(1)/3)) + 4 + 2*(S(4333)/4 + \
5*sqrt(70771857)/36)**(S(1)/3))/2 + 1 - sqrt(-284/sqrt(-346/(3*(S(4333)/4 + 5*sqrt(70771857)/36)**(S(1)/3)) + 4 + \
2*(S(4333)/4 + 5*sqrt(70771857)/36)**(S(1)/3)) - 2*(S(4333)/4 + 5*sqrt(70771857)/36)**(S(1)/3) + 8 + 346/(3*(S(4333)/4 + \
5*sqrt(70771857)/36)**(S(1)/3)))/2)*log(t)) + C3*t**(1 + sqrt(-346/(3*(S(4333)/4 + 5*sqrt(70771857)/36)**(S(1)/3)) + 4 + \
2*(S(4333)/4 + 5*sqrt(70771857)/36)**(S(1)/3))/2 + sqrt(-2*(S(4333)/4 + 5*sqrt(70771857)/36)**(S(1)/3) + 8 + 346/(3*(S(4333)/4 + \
5*sqrt(70771857)/36)**(S(1)/3)) + 284/sqrt(-346/(3*(S(4333)/4 + 5*sqrt(70771857)/36)**(S(1)/3)) + 4 + 2*(S(4333)/4 + \
5*sqrt(70771857)/36)**(S(1)/3)))/2)*(sqrt(-346/(3*(S(4333)/4 + 5*sqrt(70771857)/36)**(S(1)/3)) + 4 + 2*(S(4333)/4 + \
5*sqrt(70771857)/36)**(S(1)/3)) + sqrt(-2*(S(4333)/4 + 5*sqrt(70771857)/36)**(S(1)/3) + 8 + 346/(3*(S(4333)/4 + \
5*sqrt(70771857)/36)**(S(1)/3)) + 284/sqrt(-346/(3*(S(4333)/4 + 5*sqrt(70771857)/36)**(S(1)/3)) + 4 + 2*(S(4333)/4 + \
5*sqrt(70771857)/36)**(S(1)/3))) + 14 + (1 + sqrt(-346/(3*(S(4333)/4 + 5*sqrt(70771857)/36)**(S(1)/3)) + 4 + 2*(S(4333)/4 + \
5*sqrt(70771857)/36)**(S(1)/3))/2 + sqrt(-2*(S(4333)/4 + 5*sqrt(70771857)/36)**(S(1)/3) + 8 + 346/(3*(S(4333)/4 + \
5*sqrt(70771857)/36)**(S(1)/3)) + 284/sqrt(-346/(3*(S(4333)/4 + 5*sqrt(70771857)/36)**(S(1)/3)) + 4 + 2*(S(4333)/4 + \
5*sqrt(70771857)/36)**(S(1)/3)))/2)**2) + C4*t**(-sqrt(-2*(S(4333)/4 + 5*sqrt(70771857)/36)**(S(1)/3) + 8 + \
346/(3*(S(4333)/4 + 5*sqrt(70771857)/36)**(S(1)/3)) + 284/sqrt(-346/(3*(S(4333)/4 + 5*sqrt(70771857)/36)**(S(1)/3)) + \
4 + 2*(S(4333)/4 + 5*sqrt(70771857)/36)**(S(1)/3)))/2 + 1 + sqrt(-346/(3*(S(4333)/4 + 5*sqrt(70771857)/36)**(S(1)/3)) + \
4 + 2*(S(4333)/4 + 5*sqrt(70771857)/36)**(S(1)/3))/2)*(-sqrt(-2*(S(4333)/4 + 5*sqrt(70771857)/36)**(S(1)/3) + \
8 + 346/(3*(S(4333)/4 + 5*sqrt(70771857)/36)**(S(1)/3)) + 284/sqrt(-346/(3*(S(4333)/4 + 5*sqrt(70771857)/36)**(S(1)/3)) + \
4 + 2*(S(4333)/4 + 5*sqrt(70771857)/36)**(S(1)/3))) + (-sqrt(-2*(S(4333)/4 + 5*sqrt(70771857)/36)**(S(1)/3) + 8 + \
346/(3*(S(4333)/4 + 5*sqrt(70771857)/36)**(S(1)/3)) + 284/sqrt(-346/(3*(S(4333)/4 + 5*sqrt(70771857)/36)**(S(1)/3)) + \
4 + 2*(S(4333)/4 + 5*sqrt(70771857)/36)**(S(1)/3)))/2 + 1 + sqrt(-346/(3*(S(4333)/4 + 5*sqrt(70771857)/36)**(S(1)/3)) + \
4 + 2*(S(4333)/4 + 5*sqrt(70771857)/36)**(S(1)/3))/2)**2 + sqrt(-346/(3*(S(4333)/4 + \
5*sqrt(70771857)/36)**(S(1)/3)) + 4 + 2*(S(4333)/4 + 5*sqrt(70771857)/36)**(S(1)/3)) + 14))]
assert dsolve(eq10) == sol10
def test_linear_3eq_order1():
x, y, z = symbols('x, y, z', cls=Function)
t = Symbol('t')
eq1 = (Eq(diff(x(t),t), 21*x(t)), Eq(diff(y(t),t), 17*x(t)+3*y(t)), Eq(diff(z(t),t), 5*x(t)+7*y(t)+9*z(t)))
sol1 = [Eq(x(t), C1*exp(21*t)), Eq(y(t), 17*C1*exp(21*t)/18 + C2*exp(3*t)), \
Eq(z(t), 209*C1*exp(21*t)/216 - 7*C2*exp(3*t)/6 + C3*exp(9*t))]
assert checksysodesol(eq1, sol1) == (True, [0, 0, 0])
eq2 = (Eq(diff(x(t),t),3*y(t)-11*z(t)),Eq(diff(y(t),t),7*z(t)-3*x(t)),Eq(diff(z(t),t),11*x(t)-7*y(t)))
sol2 = [Eq(x(t), 7*C0 + sqrt(179)*C1*cos(sqrt(179)*t) + (77*C1/3 + 130*C2/3)*sin(sqrt(179)*t)), \
Eq(y(t), 11*C0 + sqrt(179)*C2*cos(sqrt(179)*t) + (-58*C1/3 - 77*C2/3)*sin(sqrt(179)*t)), \
Eq(z(t), 3*C0 + sqrt(179)*(-7*C1/3 - 11*C2/3)*cos(sqrt(179)*t) + (11*C1 - 7*C2)*sin(sqrt(179)*t))]
assert checksysodesol(eq2, sol2) == (True, [0, 0, 0])
eq3 = (Eq(3*diff(x(t),t),4*5*(y(t)-z(t))),Eq(4*diff(y(t),t),3*5*(z(t)-x(t))),Eq(5*diff(z(t),t),3*4*(x(t)-y(t))))
sol3 = [Eq(x(t), C0 + 5*sqrt(2)*C1*cos(5*sqrt(2)*t) + (12*C1/5 + 164*C2/15)*sin(5*sqrt(2)*t)), \
Eq(y(t), C0 + 5*sqrt(2)*C2*cos(5*sqrt(2)*t) + (-51*C1/10 - 12*C2/5)*sin(5*sqrt(2)*t)), \
Eq(z(t), C0 + 5*sqrt(2)*(-9*C1/25 - 16*C2/25)*cos(5*sqrt(2)*t) + (12*C1/5 - 12*C2/5)*sin(5*sqrt(2)*t))]
assert checksysodesol(eq3, sol3) == (True, [0, 0, 0])
f = t**3 + log(t)
g = t**2 + sin(t)
eq4 = (Eq(diff(x(t),t),(4*f+g)*x(t)-f*y(t)-2*f*z(t)), Eq(diff(y(t),t),2*f*x(t)+(f+g)*y(t)-2*f*z(t)), Eq(diff(z(t),t),5*f*x(t)+f*y(t)+(-3*f+g)*z(t)))
sol4 = [Eq(x(t), (C1*exp(-2*Integral(t**3 + log(t), t)) + C2*(sqrt(3)*sin(sqrt(3)*Integral(t**3 + log(t), t))/6 \
+ cos(sqrt(3)*Integral(t**3 + log(t), t))/2) + C3*(sin(sqrt(3)*Integral(t**3 + log(t), t))/2 - \
sqrt(3)*cos(sqrt(3)*Integral(t**3 + log(t), t))/6))*exp(Integral(-t**2 - sin(t), t))), Eq(y(t), \
(C2*(sqrt(3)*sin(sqrt(3)*Integral(t**3 + log(t), t))/6 + cos(sqrt(3)*Integral(t**3 + log(t), t))/2) + \
C3*(sin(sqrt(3)*Integral(t**3 + log(t), t))/2 - sqrt(3)*cos(sqrt(3)*Integral(t**3 + log(t), t))/6))*\
exp(Integral(-t**2 - sin(t), t))), Eq(z(t), (C1*exp(-2*Integral(t**3 + log(t), t)) + C2*cos(sqrt(3)*\
Integral(t**3 + log(t), t)) + C3*sin(sqrt(3)*Integral(t**3 + log(t), t)))*exp(Integral(-t**2 - sin(t), t)))]
assert dsolve(eq4) == sol4
eq5 = (Eq(diff(x(t),t),4*x(t) - z(t)),Eq(diff(y(t),t),2*x(t)+2*y(t)-z(t)),Eq(diff(z(t),t),3*x(t)+y(t)))
sol5 = [Eq(x(t), C1*exp(2*t) + C2*t*exp(2*t) + C2*exp(2*t) + C3*t**2*exp(2*t)/2 + C3*t*exp(2*t) + C3*exp(2*t)), \
Eq(y(t), C1*exp(2*t) + C2*t*exp(2*t) + C2*exp(2*t) + C3*t**2*exp(2*t)/2 + C3*t*exp(2*t)), \
Eq(z(t), 2*C1*exp(2*t) + 2*C2*t*exp(2*t) + C2*exp(2*t) + C3*t**2*exp(2*t) + C3*t*exp(2*t) + C3*exp(2*t))]
assert checksysodesol(eq5, sol5) == (True, [0, 0, 0])
eq6 = (Eq(diff(x(t),t),4*x(t) - y(t) - 2*z(t)),Eq(diff(y(t),t),2*x(t) + y(t)- 2*z(t)),Eq(diff(z(t),t),5*x(t)-3*z(t)))
sol6 = [Eq(x(t), C1*exp(2*t) + C2*(-sin(t)/5 + 3*cos(t)/5) + C3*(3*sin(t)/5 + cos(t)/5)),
Eq(y(t), C2*(-sin(t)/5 + 3*cos(t)/5) + C3*(3*sin(t)/5 + cos(t)/5)),
Eq(z(t), C1*exp(2*t) + C2*cos(t) + C3*sin(t))]
assert checksysodesol(eq5, sol5) == (True, [0, 0, 0])
def test_linear_3eq_order1_nonhomog():
e = [Eq(diff(f(x), x), -9*f(x) - 4*g(x)),
Eq(diff(g(x), x), -4*g(x)),
Eq(diff(h(x), x), h(x) + exp(x))]
raises(NotImplementedError, lambda: dsolve(e))
@XFAIL
def test_linear_3eq_order1_diagonal():
# code makes assumptions about coefficients being nonzero, breaks when assumptions are not true
e = [Eq(diff(f(x), x), f(x)),
Eq(diff(g(x), x), g(x)),
Eq(diff(h(x), x), h(x))]
s1 = [Eq(f(x), C1*exp(x)), Eq(g(x), C2*exp(x)), Eq(h(x), C3*exp(x))]
s = dsolve(e)
assert s == s1
def test_nonlinear_2eq_order1():
x, y, z = symbols('x, y, z', cls=Function)
t = Symbol('t')
eq1 = (Eq(diff(x(t),t),x(t)*y(t)**3), Eq(diff(y(t),t),y(t)**5))
sol1 = [
Eq(x(t), C1*exp((-1/(4*C2 + 4*t))**(-S(1)/4))),
Eq(y(t), -(-1/(4*C2 + 4*t))**(S(1)/4)),
Eq(x(t), C1*exp(-1/(-1/(4*C2 + 4*t))**(S(1)/4))),
Eq(y(t), (-1/(4*C2 + 4*t))**(S(1)/4)),
Eq(x(t), C1*exp(-I/(-1/(4*C2 + 4*t))**(S(1)/4))),
Eq(y(t), -I*(-1/(4*C2 + 4*t))**(S(1)/4)),
Eq(x(t), C1*exp(I/(-1/(4*C2 + 4*t))**(S(1)/4))),
Eq(y(t), I*(-1/(4*C2 + 4*t))**(S(1)/4))]
assert dsolve(eq1) == sol1
eq2 = (Eq(diff(x(t),t), exp(3*x(t))*y(t)**3),Eq(diff(y(t),t), y(t)**5))
sol2 = [
Eq(x(t), -log(C1 - 3/(-1/(4*C2 + 4*t))**(S(1)/4))/3),
Eq(y(t), -(-1/(4*C2 + 4*t))**(S(1)/4)),
Eq(x(t), -log(C1 + 3/(-1/(4*C2 + 4*t))**(S(1)/4))/3),
Eq(y(t), (-1/(4*C2 + 4*t))**(S(1)/4)),
Eq(x(t), -log(C1 + 3*I/(-1/(4*C2 + 4*t))**(S(1)/4))/3),
Eq(y(t), -I*(-1/(4*C2 + 4*t))**(S(1)/4)),
Eq(x(t), -log(C1 - 3*I/(-1/(4*C2 + 4*t))**(S(1)/4))/3),
Eq(y(t), I*(-1/(4*C2 + 4*t))**(S(1)/4))]
assert dsolve(eq2) == sol2
eq3 = (Eq(diff(x(t),t), y(t)*x(t)), Eq(diff(y(t),t), x(t)**3))
tt = S(2)/3
sol3 = [
Eq(x(t), 6**tt/(6*(-sinh(sqrt(C1)*(C2 + t)/2)/sqrt(C1))**tt)),
Eq(y(t), sqrt(C1 + C1/sinh(sqrt(C1)*(C2 + t)/2)**2)/3)]
assert dsolve(eq3) == sol3
eq4 = (Eq(diff(x(t),t),x(t)*y(t)*sin(t)**2), Eq(diff(y(t),t),y(t)**2*sin(t)**2))
sol4 = set([Eq(x(t), -2*exp(C1)/(C2*exp(C1) + t - sin(2*t)/2)), Eq(y(t), -2/(C1 + t - sin(2*t)/2))])
assert dsolve(eq4) == sol4
eq5 = (Eq(x(t),t*diff(x(t),t)+diff(x(t),t)*diff(y(t),t)), Eq(y(t),t*diff(y(t),t)+diff(y(t),t)**2))
sol5 = set([Eq(x(t), C1*C2 + C1*t), Eq(y(t), C2**2 + C2*t)])
assert dsolve(eq5) == sol5
eq6 = (Eq(diff(x(t),t),x(t)**2*y(t)**3), Eq(diff(y(t),t),y(t)**5))
sol6 = [
Eq(x(t), 1/(C1 - 1/(-1/(4*C2 + 4*t))**(S(1)/4))),
Eq(y(t), -(-1/(4*C2 + 4*t))**(S(1)/4)),
Eq(x(t), 1/(C1 + (-1/(4*C2 + 4*t))**(-S(1)/4))),
Eq(y(t), (-1/(4*C2 + 4*t))**(S(1)/4)),
Eq(x(t), 1/(C1 + I/(-1/(4*C2 + 4*t))**(S(1)/4))),
Eq(y(t), -I*(-1/(4*C2 + 4*t))**(S(1)/4)),
Eq(x(t), 1/(C1 - I/(-1/(4*C2 + 4*t))**(S(1)/4))),
Eq(y(t), I*(-1/(4*C2 + 4*t))**(S(1)/4))]
assert dsolve(eq6) == sol6
def test_checksysodesol():
x, y, z = symbols('x, y, z', cls=Function)
t = Symbol('t')
eq = (Eq(diff(x(t),t), 9*y(t)), Eq(diff(y(t),t), 12*x(t)))
sol = [Eq(x(t), 9*C1*exp(-6*sqrt(3)*t) + 9*C2*exp(6*sqrt(3)*t)), \
Eq(y(t), -6*sqrt(3)*C1*exp(-6*sqrt(3)*t) + 6*sqrt(3)*C2*exp(6*sqrt(3)*t))]
assert checksysodesol(eq, sol) == (True, [0, 0])
eq = (Eq(diff(x(t),t), 2*x(t) + 4*y(t)), Eq(diff(y(t),t), 12*x(t) + 41*y(t)))
sol = [Eq(x(t), 4*C1*exp(t*(-sqrt(1713)/2 + S(43)/2)) + 4*C2*exp(t*(sqrt(1713)/2 + \
S(43)/2))), Eq(y(t), C1*(-sqrt(1713)/2 + S(39)/2)*exp(t*(-sqrt(1713)/2 + \
S(43)/2)) + C2*(S(39)/2 + sqrt(1713)/2)*exp(t*(sqrt(1713)/2 + S(43)/2)))]
assert checksysodesol(eq, sol) == (True, [0, 0])
eq = (Eq(diff(x(t),t), x(t) + y(t)), Eq(diff(y(t),t), -2*x(t) + 2*y(t)))
sol = [Eq(x(t), (C1*sin(sqrt(7)*t/2) + C2*cos(sqrt(7)*t/2))*exp(3*t/2)), \
Eq(y(t), ((C1/2 - sqrt(7)*C2/2)*sin(sqrt(7)*t/2) + (sqrt(7)*C1/2 + \
C2/2)*cos(sqrt(7)*t/2))*exp(3*t/2))]
assert checksysodesol(eq, sol) == (True, [0, 0])
eq = (Eq(diff(x(t),t), x(t) + y(t) + 9), Eq(diff(y(t),t), 2*x(t) + 5*y(t) + 23))
sol = [Eq(x(t), C1*exp(t*(-sqrt(6) + 3)) + C2*exp(t*(sqrt(6) + 3)) - \
S(22)/3), Eq(y(t), C1*(-sqrt(6) + 2)*exp(t*(-sqrt(6) + 3)) + C2*(2 + \
sqrt(6))*exp(t*(sqrt(6) + 3)) - S(5)/3)]
assert checksysodesol(eq, sol) == (True, [0, 0])
eq = (Eq(diff(x(t),t), x(t) + y(t) + 81), Eq(diff(y(t),t), -2*x(t) + y(t) + 23))
sol = [Eq(x(t), (C1*sin(sqrt(2)*t) + C2*cos(sqrt(2)*t))*exp(t) - S(58)/3), \
Eq(y(t), (sqrt(2)*C1*cos(sqrt(2)*t) - sqrt(2)*C2*sin(sqrt(2)*t))*exp(t) - S(185)/3)]
assert checksysodesol(eq, sol) == (True, [0, 0])
eq = (Eq(diff(x(t),t), 5*t*x(t) + 2*y(t)), Eq(diff(y(t),t), 2*x(t) + 5*t*y(t)))
sol = [Eq(x(t), (C1*exp((Integral(2, t).doit())) + C2*exp(-(Integral(2, t)).doit()))*\
exp((Integral(5*t, t)).doit())), Eq(y(t), (C1*exp((Integral(2, t)).doit()) - \
C2*exp(-(Integral(2, t)).doit()))*exp((Integral(5*t, t)).doit()))]
assert checksysodesol(eq, sol) == (True, [0, 0])
eq = (Eq(diff(x(t),t), 5*t*x(t) + t**2*y(t)), Eq(diff(y(t),t), -t**2*x(t) + 5*t*y(t)))
sol = [Eq(x(t), (C1*cos((Integral(t**2, t)).doit()) + C2*sin((Integral(t**2, t)).doit()))*\
exp((Integral(5*t, t)).doit())), Eq(y(t), (-C1*sin((Integral(t**2, t)).doit()) + \
C2*cos((Integral(t**2, t)).doit()))*exp((Integral(5*t, t)).doit()))]
assert checksysodesol(eq, sol) == (True, [0, 0])
eq = (Eq(diff(x(t),t), 5*t*x(t) + t**2*y(t)), Eq(diff(y(t),t), -t**2*x(t) + (5*t+9*t**2)*y(t)))
sol = [Eq(x(t), (C1*exp((-sqrt(77)/2 + S(9)/2)*(Integral(t**2, t)).doit()) + \
C2*exp((sqrt(77)/2 + S(9)/2)*(Integral(t**2, t)).doit()))*exp((Integral(5*t, t)).doit())), \
Eq(y(t), (C1*(-sqrt(77)/2 + S(9)/2)*exp((-sqrt(77)/2 + S(9)/2)*(Integral(t**2, t)).doit()) + \
C2*(sqrt(77)/2 + S(9)/2)*exp((sqrt(77)/2 + S(9)/2)*(Integral(t**2, t)).doit()))*exp((Integral(5*t, t)).doit()))]
assert checksysodesol(eq, sol) == (True, [0, 0])
eq = (Eq(diff(x(t),t,t), 5*x(t) + 43*y(t)), Eq(diff(y(t),t,t), x(t) + 9*y(t)))
root0 = -sqrt(-sqrt(47) + 7)
root1 = sqrt(-sqrt(47) + 7)
root2 = -sqrt(sqrt(47) + 7)
root3 = sqrt(sqrt(47) + 7)
sol = [Eq(x(t), 43*C1*exp(t*root0) + 43*C2*exp(t*root1) + 43*C3*exp(t*root2) + 43*C4*exp(t*root3)), \
Eq(y(t), C1*(root0**2 - 5)*exp(t*root0) + C2*(root1**2 - 5)*exp(t*root1) + \
C3*(root2**2 - 5)*exp(t*root2) + C4*(root3**2 - 5)*exp(t*root3))]
assert checksysodesol(eq, sol) == (True, [0, 0])
eq = (Eq(diff(x(t),t,t), 8*x(t)+3*y(t)+31), Eq(diff(y(t),t,t), 9*x(t)+7*y(t)+12))
root0 = -sqrt(-sqrt(109)/2 + S(15)/2)
root1 = sqrt(-sqrt(109)/2 + S(15)/2)
root2 = -sqrt(sqrt(109)/2 + S(15)/2)
root3 = sqrt(sqrt(109)/2 + S(15)/2)
sol = [Eq(x(t), 3*C1*exp(t*root0) + 3*C2*exp(t*root1) + 3*C3*exp(t*root2) + 3*C4*exp(t*root3) - S(181)/29), \
Eq(y(t), C1*(root0**2 - 8)*exp(t*root0) + C2*(root1**2 - 8)*exp(t*root1) + \
C3*(root2**2 - 8)*exp(t*root2) + C4*(root3**2 - 8)*exp(t*root3) + S(183)/29)]
assert checksysodesol(eq, sol) == (True, [0, 0])
eq = (Eq(diff(x(t),t,t) - 9*diff(y(t),t) + 7*x(t),0), Eq(diff(y(t),t,t) + 9*diff(x(t),t) + 7*y(t),0))
sol = [Eq(x(t), C1*cos(t*(S(9)/2 + sqrt(109)/2)) + C2*sin(t*(S(9)/2 + sqrt(109)/2)) + \
C3*cos(t*(-sqrt(109)/2 + S(9)/2)) + C4*sin(t*(-sqrt(109)/2 + S(9)/2))), Eq(y(t), -C1*sin(t*(S(9)/2 + sqrt(109)/2)) \
+ C2*cos(t*(S(9)/2 + sqrt(109)/2)) - C3*sin(t*(-sqrt(109)/2 + S(9)/2)) + C4*cos(t*(-sqrt(109)/2 + S(9)/2)))]
assert checksysodesol(eq, sol) == (True, [0, 0])
eq = (Eq(diff(x(t),t,t), 9*t*diff(y(t),t)-9*y(t)), Eq(diff(y(t),t,t),7*t*diff(x(t),t)-7*x(t)))
I1 = sqrt(6)*7**(S(1)/4)*sqrt(pi)*erfi(sqrt(6)*7**(S(1)/4)*t/2)/2 - exp(3*sqrt(7)*t**2/2)/t
I2 = -sqrt(6)*7**(S(1)/4)*sqrt(pi)*erf(sqrt(6)*7**(S(1)/4)*t/2)/2 - exp(-3*sqrt(7)*t**2/2)/t
sol = [Eq(x(t), C3*t + t*(9*C1*I1 + 9*C2*I2)), Eq(y(t), C4*t + t*(3*sqrt(7)*C1*I1 - 3*sqrt(7)*C2*I2))]
assert checksysodesol(eq, sol) == (True, [0, 0])
eq = (Eq(diff(x(t),t), 21*x(t)), Eq(diff(y(t),t), 17*x(t)+3*y(t)), Eq(diff(z(t),t), 5*x(t)+7*y(t)+9*z(t)))
sol = [Eq(x(t), C1*exp(21*t)), Eq(y(t), 17*C1*exp(21*t)/18 + C2*exp(3*t)), \
Eq(z(t), 209*C1*exp(21*t)/216 - 7*C2*exp(3*t)/6 + C3*exp(9*t))]
assert checksysodesol(eq, sol) == (True, [0, 0, 0])
eq = (Eq(diff(x(t),t),3*y(t)-11*z(t)),Eq(diff(y(t),t),7*z(t)-3*x(t)),Eq(diff(z(t),t),11*x(t)-7*y(t)))
sol = [Eq(x(t), 7*C0 + sqrt(179)*C1*cos(sqrt(179)*t) + (77*C1/3 + 130*C2/3)*sin(sqrt(179)*t)), \
Eq(y(t), 11*C0 + sqrt(179)*C2*cos(sqrt(179)*t) + (-58*C1/3 - 77*C2/3)*sin(sqrt(179)*t)), \
Eq(z(t), 3*C0 + sqrt(179)*(-7*C1/3 - 11*C2/3)*cos(sqrt(179)*t) + (11*C1 - 7*C2)*sin(sqrt(179)*t))]
assert checksysodesol(eq, sol) == (True, [0, 0, 0])
eq = (Eq(3*diff(x(t),t),4*5*(y(t)-z(t))),Eq(4*diff(y(t),t),3*5*(z(t)-x(t))),Eq(5*diff(z(t),t),3*4*(x(t)-y(t))))
sol = [Eq(x(t), C0 + 5*sqrt(2)*C1*cos(5*sqrt(2)*t) + (12*C1/5 + 164*C2/15)*sin(5*sqrt(2)*t)), \
Eq(y(t), C0 + 5*sqrt(2)*C2*cos(5*sqrt(2)*t) + (-51*C1/10 - 12*C2/5)*sin(5*sqrt(2)*t)), \
Eq(z(t), C0 + 5*sqrt(2)*(-9*C1/25 - 16*C2/25)*cos(5*sqrt(2)*t) + (12*C1/5 - 12*C2/5)*sin(5*sqrt(2)*t))]
assert checksysodesol(eq, sol) == (True, [0, 0, 0])
eq = (Eq(diff(x(t),t),4*x(t) - z(t)),Eq(diff(y(t),t),2*x(t)+2*y(t)-z(t)),Eq(diff(z(t),t),3*x(t)+y(t)))
sol = [Eq(x(t), C1*exp(2*t) + C2*t*exp(2*t) + C2*exp(2*t) + C3*t**2*exp(2*t)/2 + C3*t*exp(2*t) + C3*exp(2*t)), \
Eq(y(t), C1*exp(2*t) + C2*t*exp(2*t) + C2*exp(2*t) + C3*t**2*exp(2*t)/2 + C3*t*exp(2*t)), \
Eq(z(t), 2*C1*exp(2*t) + 2*C2*t*exp(2*t) + C2*exp(2*t) + C3*t**2*exp(2*t) + C3*t*exp(2*t) + C3*exp(2*t))]
assert checksysodesol(eq, sol) == (True, [0, 0, 0])
eq = (Eq(diff(x(t),t),4*x(t) - y(t) - 2*z(t)),Eq(diff(y(t),t),2*x(t) + y(t)- 2*z(t)),Eq(diff(z(t),t),5*x(t)-3*z(t)))
sol = [Eq(x(t), C1*exp(2*t) + C2*(-sin(t) + 3*cos(t)) + C3*(3*sin(t) + cos(t))), \
Eq(y(t), C2*(-sin(t) + 3*cos(t)) + C3*(3*sin(t) + cos(t))), Eq(z(t), C1*exp(2*t) + 5*C2*cos(t) + 5*C3*sin(t))]
assert checksysodesol(eq, sol) == (True, [0, 0, 0])
eq = (Eq(diff(x(t),t),x(t)*y(t)**3), Eq(diff(y(t),t),y(t)**5))
sol = [Eq(x(t), C1*exp((-1/(4*C2 + 4*t))**(-S(1)/4))), Eq(y(t), -(-1/(4*C2 + 4*t))**(S(1)/4)), \
Eq(x(t), C1*exp(-1/(-1/(4*C2 + 4*t))**(S(1)/4))), Eq(y(t), (-1/(4*C2 + 4*t))**(S(1)/4)), \
Eq(x(t), C1*exp(-I/(-1/(4*C2 + 4*t))**(S(1)/4))), Eq(y(t), -I*(-1/(4*C2 + 4*t))**(S(1)/4)), \
Eq(x(t), C1*exp(I/(-1/(4*C2 + 4*t))**(S(1)/4))), Eq(y(t), I*(-1/(4*C2 + 4*t))**(S(1)/4))]
assert checksysodesol(eq, sol) == (True, [0, 0])
eq = (Eq(diff(x(t),t), exp(3*x(t))*y(t)**3),Eq(diff(y(t),t), y(t)**5))
sol = [Eq(x(t), -log(C1 - 3/(-1/(4*C2 + 4*t))**(S(1)/4))/3), Eq(y(t), -(-1/(4*C2 + 4*t))**(S(1)/4)), \
Eq(x(t), -log(C1 + 3/(-1/(4*C2 + 4*t))**(S(1)/4))/3), Eq(y(t), (-1/(4*C2 + 4*t))**(S(1)/4)), \
Eq(x(t), -log(C1 + 3*I/(-1/(4*C2 + 4*t))**(S(1)/4))/3), Eq(y(t), -I*(-1/(4*C2 + 4*t))**(S(1)/4)), \
Eq(x(t), -log(C1 - 3*I/(-1/(4*C2 + 4*t))**(S(1)/4))/3), Eq(y(t), I*(-1/(4*C2 + 4*t))**(S(1)/4))]
assert checksysodesol(eq, sol) == (True, [0, 0])
eq = (Eq(x(t),t*diff(x(t),t)+diff(x(t),t)*diff(y(t),t)), Eq(y(t),t*diff(y(t),t)+diff(y(t),t)**2))
sol = set([Eq(x(t), C1*C2 + C1*t), Eq(y(t), C2**2 + C2*t)])
assert checksysodesol(eq, sol) == (True, [0, 0])
@slow
def test_nonlinear_3eq_order1():
x, y, z = symbols('x, y, z', cls=Function)
t, u = symbols('t u')
eq1 = (4*diff(x(t),t) + 2*y(t)*z(t), 3*diff(y(t),t) - z(t)*x(t), 5*diff(z(t),t) - x(t)*y(t))
sol1 = [Eq(4*Integral(1/(sqrt(-4*u**2 - 3*C1 + C2)*sqrt(-4*u**2 + 5*C1 - C2)), (u, x(t))),
C3 - sqrt(15)*t/15), Eq(3*Integral(1/(sqrt(-6*u**2 - C1 + 5*C2)*sqrt(3*u**2 + C1 - 4*C2)),
(u, y(t))), C3 + sqrt(5)*t/10), Eq(5*Integral(1/(sqrt(-10*u**2 - 3*C1 + C2)*
sqrt(5*u**2 + 4*C1 - C2)), (u, z(t))), C3 + sqrt(3)*t/6)]
assert [i.dummy_eq(j) for i, j in zip(dsolve(eq1), sol1)]
eq2 = (4*diff(x(t),t) + 2*y(t)*z(t)*sin(t), 3*diff(y(t),t) - z(t)*x(t)*sin(t), 5*diff(z(t),t) - x(t)*y(t)*sin(t))
sol2 = [Eq(3*Integral(1/(sqrt(-6*u**2 - C1 + 5*C2)*sqrt(3*u**2 + C1 - 4*C2)), (u, x(t))), C3 +
sqrt(5)*cos(t)/10), Eq(4*Integral(1/(sqrt(-4*u**2 - 3*C1 + C2)*sqrt(-4*u**2 + 5*C1 - C2)),
(u, y(t))), C3 - sqrt(15)*cos(t)/15), Eq(5*Integral(1/(sqrt(-10*u**2 - 3*C1 + C2)*
sqrt(5*u**2 + 4*C1 - C2)), (u, z(t))), C3 + sqrt(3)*cos(t)/6)]
assert [i.dummy_eq(j) for i, j in zip(dsolve(eq2), sol2)]
def test_checkodesol():
from sympy import Ei
# For the most part, checkodesol is well tested in the tests below.
# These tests only handle cases not checked below.
raises(ValueError, lambda: checkodesol(f(x, y).diff(x), Eq(f(x, y), x)))
raises(ValueError, lambda: checkodesol(f(x).diff(x), Eq(f(x, y),
x), f(x, y)))
assert checkodesol(f(x).diff(x), Eq(f(x, y), x)) == \
(False, -f(x).diff(x) + f(x, y).diff(x) - 1)
assert checkodesol(f(x).diff(x), Eq(f(x), x)) is not True
assert checkodesol(f(x).diff(x), Eq(f(x), x)) == (False, 1)
sol1 = Eq(f(x)**5 + 11*f(x) - 2*f(x) + x, 0)
assert checkodesol(diff(sol1.lhs, x), sol1) == (True, 0)
assert checkodesol(diff(sol1.lhs, x)*exp(f(x)), sol1) == (True, 0)
assert checkodesol(diff(sol1.lhs, x, 2), sol1) == (True, 0)
assert checkodesol(diff(sol1.lhs, x, 2)*exp(f(x)), sol1) == (True, 0)
assert checkodesol(diff(sol1.lhs, x, 3), sol1) == (True, 0)
assert checkodesol(diff(sol1.lhs, x, 3)*exp(f(x)), sol1) == (True, 0)
assert checkodesol(diff(sol1.lhs, x, 3), Eq(f(x), x*log(x))) == \
(False, 60*x**4*((log(x) + 1)**2 + log(x))*(
log(x) + 1)*log(x)**2 - 5*x**4*log(x)**4 - 9)
assert checkodesol(diff(exp(f(x)) + x, x)*x, Eq(exp(f(x)) + x)) == \
(True, 0)
assert checkodesol(diff(exp(f(x)) + x, x)*x, Eq(exp(f(x)) + x),
solve_for_func=False) == (True, 0)
assert checkodesol(f(x).diff(x, 2), [Eq(f(x), C1 + C2*x),
Eq(f(x), C2 + C1*x), Eq(f(x), C1*x + C2*x**2)]) == \
[(True, 0), (True, 0), (False, C2)]
assert checkodesol(f(x).diff(x, 2), set([Eq(f(x), C1 + C2*x),
Eq(f(x), C2 + C1*x), Eq(f(x), C1*x + C2*x**2)])) == \
set([(True, 0), (True, 0), (False, C2)])
assert checkodesol(f(x).diff(x) - 1/f(x)/2, Eq(f(x)**2, x)) == \
[(True, 0), (True, 0)]
assert checkodesol(f(x).diff(x) - f(x), Eq(C1*exp(x), f(x))) == (True, 0)
# Based on test_1st_homogeneous_coeff_ode2_eq3sol. Make sure that
# checkodesol tries back substituting f(x) when it can.
eq3 = x*exp(f(x)/x) + f(x) - x*f(x).diff(x)
sol3 = Eq(f(x), log(log(C1/x)**(-x)))
assert not checkodesol(eq3, sol3)[1].has(f(x))
# This case was failing intermittently depending on hash-seed:
eqn = Eq(Derivative(x*Derivative(f(x), x), x)/x, exp(x))
sol = Eq(f(x), C1 + C2*log(x) + exp(x) - Ei(x))
assert checkodesol(eqn, sol, order=2, solve_for_func=False)[0]
@slow
def test_dsolve_options():
eq = x*f(x).diff(x) + f(x)
a = dsolve(eq, hint='all')
b = dsolve(eq, hint='all', simplify=False)
c = dsolve(eq, hint='all_Integral')
keys = ['1st_exact', '1st_exact_Integral', '1st_homogeneous_coeff_best',
'1st_homogeneous_coeff_subs_dep_div_indep',
'1st_homogeneous_coeff_subs_dep_div_indep_Integral',
'1st_homogeneous_coeff_subs_indep_div_dep',
'1st_homogeneous_coeff_subs_indep_div_dep_Integral', '1st_linear',
'1st_linear_Integral', 'almost_linear', 'almost_linear_Integral',
'best', 'best_hint', 'default', 'lie_group',
'nth_linear_euler_eq_homogeneous', 'order',
'separable', 'separable_Integral']
Integral_keys = ['1st_exact_Integral',
'1st_homogeneous_coeff_subs_dep_div_indep_Integral',
'1st_homogeneous_coeff_subs_indep_div_dep_Integral', '1st_linear_Integral',
'almost_linear_Integral', 'best', 'best_hint', 'default',
'nth_linear_euler_eq_homogeneous',
'order', 'separable_Integral']
assert sorted(a.keys()) == keys
assert a['order'] == ode_order(eq, f(x))
assert a['best'] == Eq(f(x), C1/x)
assert dsolve(eq, hint='best') == Eq(f(x), C1/x)
assert a['default'] == 'separable'
assert a['best_hint'] == 'separable'
assert not a['1st_exact'].has(Integral)
assert not a['separable'].has(Integral)
assert not a['1st_homogeneous_coeff_best'].has(Integral)
assert not a['1st_homogeneous_coeff_subs_dep_div_indep'].has(Integral)
assert not a['1st_homogeneous_coeff_subs_indep_div_dep'].has(Integral)
assert not a['1st_linear'].has(Integral)
assert a['1st_linear_Integral'].has(Integral)
assert a['1st_exact_Integral'].has(Integral)
assert a['1st_homogeneous_coeff_subs_dep_div_indep_Integral'].has(Integral)
assert a['1st_homogeneous_coeff_subs_indep_div_dep_Integral'].has(Integral)
assert a['separable_Integral'].has(Integral)
assert sorted(b.keys()) == keys
assert b['order'] == ode_order(eq, f(x))
assert b['best'] == Eq(f(x), C1/x)
assert dsolve(eq, hint='best', simplify=False) == Eq(f(x), C1/x)
assert b['default'] == 'separable'
assert b['best_hint'] == '1st_linear'
assert a['separable'] != b['separable']
assert a['1st_homogeneous_coeff_subs_dep_div_indep'] != \
b['1st_homogeneous_coeff_subs_dep_div_indep']
assert a['1st_homogeneous_coeff_subs_indep_div_dep'] != \
b['1st_homogeneous_coeff_subs_indep_div_dep']
assert not b['1st_exact'].has(Integral)
assert not b['separable'].has(Integral)
assert not b['1st_homogeneous_coeff_best'].has(Integral)
assert not b['1st_homogeneous_coeff_subs_dep_div_indep'].has(Integral)
assert not b['1st_homogeneous_coeff_subs_indep_div_dep'].has(Integral)
assert not b['1st_linear'].has(Integral)
assert b['1st_linear_Integral'].has(Integral)
assert b['1st_exact_Integral'].has(Integral)
assert b['1st_homogeneous_coeff_subs_dep_div_indep_Integral'].has(Integral)
assert b['1st_homogeneous_coeff_subs_indep_div_dep_Integral'].has(Integral)
assert b['separable_Integral'].has(Integral)
assert sorted(c.keys()) == Integral_keys
raises(ValueError, lambda: dsolve(eq, hint='notarealhint'))
raises(ValueError, lambda: dsolve(eq, hint='Liouville'))
assert dsolve(f(x).diff(x) - 1/f(x)**2, hint='all')['best'] == \
dsolve(f(x).diff(x) - 1/f(x)**2, hint='best')
assert dsolve(f(x) + f(x).diff(x) + sin(x).diff(x) + 1, f(x),
hint="1st_linear_Integral") == \
Eq(f(x), (C1 + Integral((-sin(x).diff(x) - 1)*
exp(Integral(1, x)), x))*exp(-Integral(1, x)))
def test_classify_ode():
assert classify_ode(f(x).diff(x, 2), f(x)) == \
('nth_algebraic',
'nth_linear_constant_coeff_homogeneous',
'nth_linear_euler_eq_homogeneous',
'Liouville',
'2nd_power_series_ordinary',
'nth_algebraic_Integral',
'Liouville_Integral',
)
assert classify_ode(f(x), f(x)) == ()
assert classify_ode(Eq(f(x).diff(x), 0), f(x)) == (
'nth_algebraic',
'separable',
'1st_linear', '1st_homogeneous_coeff_best',
'1st_homogeneous_coeff_subs_indep_div_dep',
'1st_homogeneous_coeff_subs_dep_div_indep',
'1st_power_series', 'lie_group',
'nth_linear_constant_coeff_homogeneous',
'nth_linear_euler_eq_homogeneous',
'nth_algebraic_Integral',
'separable_Integral',
'1st_linear_Integral',
'1st_homogeneous_coeff_subs_indep_div_dep_Integral',
'1st_homogeneous_coeff_subs_dep_div_indep_Integral')
assert classify_ode(f(x).diff(x)**2, f(x)) == (
'nth_algebraic',
'lie_group',
'nth_algebraic_Integral')
# issue 4749: f(x) should be cleared from highest derivative before classifying
a = classify_ode(Eq(f(x).diff(x) + f(x), x), f(x))
b = classify_ode(f(x).diff(x)*f(x) + f(x)*f(x) - x*f(x), f(x))
c = classify_ode(f(x).diff(x)/f(x) + f(x)/f(x) - x/f(x), f(x))
assert a == ('1st_linear',
'Bernoulli',
'almost_linear',
'1st_power_series', "lie_group",
'nth_linear_constant_coeff_undetermined_coefficients',
'nth_linear_constant_coeff_variation_of_parameters',
'1st_linear_Integral',
'Bernoulli_Integral',
'almost_linear_Integral',
'nth_linear_constant_coeff_variation_of_parameters_Integral')
assert b == c != ()
assert classify_ode(
2*x*f(x)*f(x).diff(x) + (1 + x)*f(x)**2 - exp(x), f(x)
) == ('Bernoulli', 'almost_linear', 'lie_group',
'Bernoulli_Integral', 'almost_linear_Integral')
assert 'Riccati_special_minus2' in \
classify_ode(2*f(x).diff(x) + f(x)**2 - f(x)/x + 3*x**(-2), f(x))
raises(ValueError, lambda: classify_ode(x + f(x, y).diff(x).diff(
y), f(x, y)))
# issue 5176
k = Symbol('k')
assert classify_ode(f(x).diff(x)/(k*f(x) + k*x*f(x)) + 2*f(x)/(k*f(x) +
k*x*f(x)) + x*f(x).diff(x)/(k*f(x) + k*x*f(x)) + z, f(x)) == \
('separable', '1st_exact', '1st_power_series', 'lie_group',
'separable_Integral', '1st_exact_Integral')
# preprocessing
ans = ('nth_algebraic', 'separable', '1st_exact', '1st_linear', 'Bernoulli',
'1st_homogeneous_coeff_best',
'1st_homogeneous_coeff_subs_indep_div_dep',
'1st_homogeneous_coeff_subs_dep_div_indep',
'1st_power_series', 'lie_group',
'nth_linear_constant_coeff_undetermined_coefficients',
'nth_linear_euler_eq_nonhomogeneous_undetermined_coefficients',
'nth_linear_constant_coeff_variation_of_parameters',
'nth_linear_euler_eq_nonhomogeneous_variation_of_parameters',
'nth_algebraic_Integral',
'separable_Integral', '1st_exact_Integral',
'1st_linear_Integral',
'Bernoulli_Integral',
'1st_homogeneous_coeff_subs_indep_div_dep_Integral',
'1st_homogeneous_coeff_subs_dep_div_indep_Integral',
'nth_linear_constant_coeff_variation_of_parameters_Integral',
'nth_linear_euler_eq_nonhomogeneous_variation_of_parameters_Integral')
# w/o f(x) given
assert classify_ode(diff(f(x) + x, x) + diff(f(x), x)) == ans
# w/ f(x) and prep=True
assert classify_ode(diff(f(x) + x, x) + diff(f(x), x), f(x),
prep=True) == ans
assert classify_ode(Eq(2*x**3*f(x).diff(x), 0), f(x)) == \
('nth_algebraic', 'separable', '1st_linear', '1st_power_series',
'lie_group', 'nth_linear_euler_eq_homogeneous',
'nth_algebraic_Integral', 'separable_Integral',
'1st_linear_Integral')
assert classify_ode(Eq(2*f(x)**3*f(x).diff(x), 0), f(x)) == \
('nth_algebraic', 'separable', '1st_power_series', 'lie_group',
'nth_algebraic_Integral', 'separable_Integral')
# test issue 13864
assert classify_ode(Eq(diff(f(x), x) - f(x)**x, 0), f(x)) == \
('1st_power_series', 'lie_group')
assert isinstance(classify_ode(Eq(f(x), 5), f(x), dict=True), dict)
def test_classify_ode_ics():
# Dummy
eq = f(x).diff(x, x) - f(x)
# Not f(0) or f'(0)
ics = {x: 1}
raises(ValueError, lambda: classify_ode(eq, f(x), ics=ics))
############################
# f(0) type (AppliedUndef) #
############################
# Wrong function
ics = {g(0): 1}
raises(ValueError, lambda: classify_ode(eq, f(x), ics=ics))
# Contains x
ics = {f(x): 1}
raises(ValueError, lambda: classify_ode(eq, f(x), ics=ics))
# Too many args
ics = {f(0, 0): 1}
raises(ValueError, lambda: classify_ode(eq, f(x), ics=ics))
# point contains f
# XXX: Should be NotImplementedError
ics = {f(0): f(1)}
raises(ValueError, lambda: classify_ode(eq, f(x), ics=ics))
# Does not raise
ics = {f(0): 1}
classify_ode(eq, f(x), ics=ics)
#####################
# f'(0) type (Subs) #
#####################
# Wrong function
ics = {g(x).diff(x).subs(x, 0): 1}
raises(ValueError, lambda: classify_ode(eq, f(x), ics=ics))
# Contains x
ics = {f(y).diff(y).subs(y, x): 1}
raises(ValueError, lambda: classify_ode(eq, f(x), ics=ics))
# Wrong variable
ics = {f(y).diff(y).subs(y, 0): 1}
raises(ValueError, lambda: classify_ode(eq, f(x), ics=ics))
# Too many args
ics = {f(x, y).diff(x).subs(x, 0): 1}
raises(ValueError, lambda: classify_ode(eq, f(x), ics=ics))
# Derivative wrt wrong vars
ics = {Derivative(f(x), x, y).subs(x, 0): 1}
raises(ValueError, lambda: classify_ode(eq, f(x), ics=ics))
# point contains f
# XXX: Should be NotImplementedError
ics = {f(x).diff(x).subs(x, 0): f(0)}
raises(ValueError, lambda: classify_ode(eq, f(x), ics=ics))
# Does not raise
ics = {f(x).diff(x).subs(x, 0): 1}
classify_ode(eq, f(x), ics=ics)
###########################
# f'(y) type (Derivative) #
###########################
# Wrong function
ics = {g(x).diff(x).subs(x, y): 1}
raises(ValueError, lambda: classify_ode(eq, f(x), ics=ics))
# Contains x
ics = {f(y).diff(y).subs(y, x): 1}
raises(ValueError, lambda: classify_ode(eq, f(x), ics=ics))
# Too many args
ics = {f(x, y).diff(x).subs(x, y): 1}
raises(ValueError, lambda: classify_ode(eq, f(x), ics=ics))
# Derivative wrt wrong vars
ics = {Derivative(f(x), x, z).subs(x, y): 1}
raises(ValueError, lambda: classify_ode(eq, f(x), ics=ics))
# point contains f
# XXX: Should be NotImplementedError
ics = {f(x).diff(x).subs(x, y): f(0)}
raises(ValueError, lambda: classify_ode(eq, f(x), ics=ics))
# Does not raise
ics = {f(x).diff(x).subs(x, y): 1}
classify_ode(eq, f(x), ics=ics)
def test_classify_sysode():
# Here x is assumed to be x(t) and y as y(t) for simplicity.
# Similarly diff(x,t) and diff(y,y) is assumed to be x1 and y1 respectively.
k, l, m, n = symbols('k, l, m, n', Integer=True)
k1, k2, k3, l1, l2, l3, m1, m2, m3 = symbols('k1, k2, k3, l1, l2, l3, m1, m2, m3', Integer=True)
P, Q, R, p, q, r = symbols('P, Q, R, p, q, r', cls=Function)
P1, P2, P3, Q1, Q2, R1, R2 = symbols('P1, P2, P3, Q1, Q2, R1, R2', cls=Function)
x, y, z = symbols('x, y, z', cls=Function)
t = symbols('t')
x1 = diff(x(t),t) ; y1 = diff(y(t),t) ; z1 = diff(z(t),t)
x2 = diff(x(t),t,t) ; y2 = diff(y(t),t,t) ; z2 = diff(z(t),t,t)
eq1 = (Eq(diff(x(t),t), 5*t*x(t) + 2*y(t)), Eq(diff(y(t),t), 2*x(t) + 5*t*y(t)))
sol1 = {'no_of_equation': 2, 'func_coeff': {(0, x(t), 0): -5*t, (1, x(t), 1): 0, (0, x(t), 1): 1, \
(1, y(t), 0): -5*t, (1, x(t), 0): -2, (0, y(t), 1): 0, (0, y(t), 0): -2, (1, y(t), 1): 1}, \
'type_of_equation': 'type3', 'func': [x(t), y(t)], 'is_linear': True, 'eq': [-5*t*x(t) - 2*y(t) + \
Derivative(x(t), t), -5*t*y(t) - 2*x(t) + Derivative(y(t), t)], 'order': {y(t): 1, x(t): 1}}
assert classify_sysode(eq1) == sol1
eq2 = (Eq(x2, k*x(t) - l*y1), Eq(y2, l*x1 + k*y(t)))
sol2 = {'order': {y(t): 2, x(t): 2}, 'type_of_equation': 'type3', 'is_linear': True, 'eq': \
[-k*x(t) + l*Derivative(y(t), t) + Derivative(x(t), t, t), -k*y(t) - l*Derivative(x(t), t) + \
Derivative(y(t), t, t)], 'no_of_equation': 2, 'func_coeff': {(0, y(t), 0): 0, (0, x(t), 2): 1, \
(1, y(t), 1): 0, (1, y(t), 2): 1, (1, x(t), 2): 0, (0, y(t), 2): 0, (0, x(t), 0): -k, (1, x(t), 1): \
-l, (0, x(t), 1): 0, (0, y(t), 1): l, (1, x(t), 0): 0, (1, y(t), 0): -k}, 'func': [x(t), y(t)]}
assert classify_sysode(eq2) == sol2
eq3 = (Eq(x2+4*x1+3*y1+9*x(t)+7*y(t), 11*exp(I*t)), Eq(y2+5*x1+8*y1+3*x(t)+12*y(t), 2*exp(I*t)))
sol3 = {'no_of_equation': 2, 'func_coeff': {(1, x(t), 2): 0, (0, y(t), 2): 0, (0, x(t), 0): 9, \
(1, x(t), 1): 5, (0, x(t), 1): 4, (0, y(t), 1): 3, (1, x(t), 0): 3, (1, y(t), 0): 12, (0, y(t), 0): 7, \
(0, x(t), 2): 1, (1, y(t), 2): 1, (1, y(t), 1): 8}, 'type_of_equation': 'type4', 'func': [x(t), y(t)], \
'is_linear': True, 'eq': [9*x(t) + 7*y(t) - 11*exp(I*t) + 4*Derivative(x(t), t) + 3*Derivative(y(t), t) + \
Derivative(x(t), t, t), 3*x(t) + 12*y(t) - 2*exp(I*t) + 5*Derivative(x(t), t) + 8*Derivative(y(t), t) + \
Derivative(y(t), t, t)], 'order': {y(t): 2, x(t): 2}}
assert classify_sysode(eq3) == sol3
eq4 = (Eq((4*t**2 + 7*t + 1)**2*x2, 5*x(t) + 35*y(t)), Eq((4*t**2 + 7*t + 1)**2*y2, x(t) + 9*y(t)))
sol4 = {'no_of_equation': 2, 'func_coeff': {(1, x(t), 2): 0, (0, y(t), 2): 0, (0, x(t), 0): -5, \
(1, x(t), 1): 0, (0, x(t), 1): 0, (0, y(t), 1): 0, (1, x(t), 0): -1, (1, y(t), 0): -9, (0, y(t), 0): -35, \
(0, x(t), 2): 16*t**4 + 56*t**3 + 57*t**2 + 14*t + 1, (1, y(t), 2): 16*t**4 + 56*t**3 + 57*t**2 + 14*t + 1, \
(1, y(t), 1): 0}, 'type_of_equation': 'type10', 'func': [x(t), y(t)], 'is_linear': True, \
'eq': [(4*t**2 + 7*t + 1)**2*Derivative(x(t), t, t) - 5*x(t) - 35*y(t), (4*t**2 + 7*t + 1)**2*Derivative(y(t), t, t)\
- x(t) - 9*y(t)], 'order': {y(t): 2, x(t): 2}}
assert classify_sysode(eq4) == sol4
eq5 = (Eq(diff(x(t),t), x(t) + y(t) + 9), Eq(diff(y(t),t), 2*x(t) + 5*y(t) + 23))
sol5 = {'no_of_equation': 2, 'func_coeff': {(0, x(t), 0): -1, (1, x(t), 1): 0, (0, x(t), 1): 1, (1, y(t), 0): -5, \
(1, x(t), 0): -2, (0, y(t), 1): 0, (0, y(t), 0): -1, (1, y(t), 1): 1}, 'type_of_equation': 'type2', \
'func': [x(t), y(t)], 'is_linear': True, 'eq': [-x(t) - y(t) + Derivative(x(t), t) - 9, -2*x(t) - 5*y(t) + \
Derivative(y(t), t) - 23], 'order': {y(t): 1, x(t): 1}}
assert classify_sysode(eq5) == sol5
eq6 = (Eq(x1, exp(k*x(t))*P(x(t),y(t))), Eq(y1,r(y(t))*P(x(t),y(t))))
sol6 = {'no_of_equation': 2, 'func_coeff': {(0, x(t), 0): 0, (1, x(t), 1): 0, (0, x(t), 1): 1, (1, y(t), 0): 0, \
(1, x(t), 0): 0, (0, y(t), 1): 0, (0, y(t), 0): 0, (1, y(t), 1): 1}, 'type_of_equation': 'type2', 'func': \
[x(t), y(t)], 'is_linear': False, 'eq': [-P(x(t), y(t))*exp(k*x(t)) + Derivative(x(t), t), -P(x(t), \
y(t))*r(y(t)) + Derivative(y(t), t)], 'order': {y(t): 1, x(t): 1}}
assert classify_sysode(eq6) == sol6
eq7 = (Eq(x1, x(t)**2+y(t)/x(t)), Eq(y1, x(t)/y(t)))
sol7 = {'no_of_equation': 2, 'func_coeff': {(0, x(t), 0): 0, (1, x(t), 1): 0, (0, x(t), 1): 1, (1, y(t), 0): 0, \
(1, x(t), 0): -1/y(t), (0, y(t), 1): 0, (0, y(t), 0): -1/x(t), (1, y(t), 1): 1}, 'type_of_equation': 'type3', \
'func': [x(t), y(t)], 'is_linear': False, 'eq': [-x(t)**2 + Derivative(x(t), t) - y(t)/x(t), -x(t)/y(t) + \
Derivative(y(t), t)], 'order': {y(t): 1, x(t): 1}}
assert classify_sysode(eq7) == sol7
eq8 = (Eq(x1, P1(x(t))*Q1(y(t))*R(x(t),y(t),t)), Eq(y1, P1(x(t))*Q1(y(t))*R(x(t),y(t),t)))
sol8 = {'func': [x(t), y(t)], 'is_linear': False, 'type_of_equation': 'type4', 'eq': \
[-P1(x(t))*Q1(y(t))*R(x(t), y(t), t) + Derivative(x(t), t), -P1(x(t))*Q1(y(t))*R(x(t), y(t), t) + \
Derivative(y(t), t)], 'func_coeff': {(0, y(t), 1): 0, (1, y(t), 1): 1, (1, x(t), 1): 0, (0, y(t), 0): 0, \
(1, x(t), 0): 0, (0, x(t), 0): 0, (1, y(t), 0): 0, (0, x(t), 1): 1}, 'order': {y(t): 1, x(t): 1}, 'no_of_equation': 2}
assert classify_sysode(eq8) == sol8
eq9 = (Eq(x1,3*y(t)-11*z(t)),Eq(y1,7*z(t)-3*x(t)),Eq(z1,11*x(t)-7*y(t)))
sol9 = {'no_of_equation': 3, 'func_coeff': {(1, y(t), 0): 0, (2, y(t), 1): 0, (2, z(t), 1): 1, \
(0, x(t), 0): 0, (2, x(t), 1): 0, (1, x(t), 1): 0, (2, y(t), 0): 7, (0, x(t), 1): 1, (1, z(t), 1): 0, \
(0, y(t), 1): 0, (1, x(t), 0): 3, (0, z(t), 0): 11, (0, y(t), 0): -3, (1, z(t), 0): -7, (0, z(t), 1): 0, \
(2, x(t), 0): -11, (2, z(t), 0): 0, (1, y(t), 1): 1}, 'type_of_equation': 'type2', 'func': [x(t), y(t), z(t)], \
'is_linear': True, 'eq': [-3*y(t) + 11*z(t) + Derivative(x(t), t), 3*x(t) - 7*z(t) + Derivative(y(t), t), \
-11*x(t) + 7*y(t) + Derivative(z(t), t)], 'order': {z(t): 1, y(t): 1, x(t): 1}}
assert classify_sysode(eq9) == sol9
eq10 = (x2 + log(t)*(t*x1 - x(t)) + exp(t)*(t*y1 - y(t)), y2 + (t**2)*(t*x1 - x(t)) + (t)*(t*y1 - y(t)))
sol10 = {'no_of_equation': 2, 'func_coeff': {(1, x(t), 2): 0, (0, y(t), 2): 0, (0, x(t), 0): -log(t), \
(1, x(t), 1): t**3, (0, x(t), 1): t*log(t), (0, y(t), 1): t*exp(t), (1, x(t), 0): -t**2, (1, y(t), 0): -t, \
(0, y(t), 0): -exp(t), (0, x(t), 2): 1, (1, y(t), 2): 1, (1, y(t), 1): t**2}, 'type_of_equation': 'type11', \
'func': [x(t), y(t)], 'is_linear': True, 'eq': [(t*Derivative(x(t), t) - x(t))*log(t) + (t*Derivative(y(t), t) - \
y(t))*exp(t) + Derivative(x(t), t, t), t**2*(t*Derivative(x(t), t) - x(t)) + t*(t*Derivative(y(t), t) - y(t)) \
+ Derivative(y(t), t, t)], 'order': {y(t): 2, x(t): 2}}
assert classify_sysode(eq10) == sol10
eq11 = (Eq(x1,x(t)*y(t)**3), Eq(y1,y(t)**5))
sol11 = {'no_of_equation': 2, 'func_coeff': {(0, x(t), 0): -y(t)**3, (1, x(t), 1): 0, (0, x(t), 1): 1, \
(1, y(t), 0): 0, (1, x(t), 0): 0, (0, y(t), 1): 0, (0, y(t), 0): 0, (1, y(t), 1): 1}, 'type_of_equation': \
'type1', 'func': [x(t), y(t)], 'is_linear': False, 'eq': [-x(t)*y(t)**3 + Derivative(x(t), t), \
-y(t)**5 + Derivative(y(t), t)], 'order': {y(t): 1, x(t): 1}}
assert classify_sysode(eq11) == sol11
eq12 = (Eq(x1, y(t)), Eq(y1, x(t)))
sol12 = {'no_of_equation': 2, 'func_coeff': {(0, x(t), 0): 0, (1, x(t), 1): 0, (0, x(t), 1): 1, (1, y(t), 0): 0, \
(1, x(t), 0): -1, (0, y(t), 1): 0, (0, y(t), 0): -1, (1, y(t), 1): 1}, 'type_of_equation': 'type1', 'func': \
[x(t), y(t)], 'is_linear': True, 'eq': [-y(t) + Derivative(x(t), t), -x(t) + Derivative(y(t), t)], 'order': {y(t): 1, x(t): 1}}
assert classify_sysode(eq12) == sol12
eq13 = (Eq(x1,x(t)*y(t)*sin(t)**2), Eq(y1,y(t)**2*sin(t)**2))
sol13 = {'no_of_equation': 2, 'func_coeff': {(0, x(t), 0): -y(t)*sin(t)**2, (1, x(t), 1): 0, (0, x(t), 1): 1, \
(1, y(t), 0): 0, (1, x(t), 0): 0, (0, y(t), 1): 0, (0, y(t), 0): -x(t)*sin(t)**2, (1, y(t), 1): 1}, \
'type_of_equation': 'type4', 'func': [x(t), y(t)], 'is_linear': False, 'eq': [-x(t)*y(t)*sin(t)**2 + \
Derivative(x(t), t), -y(t)**2*sin(t)**2 + Derivative(y(t), t)], 'order': {y(t): 1, x(t): 1}}
assert classify_sysode(eq13) == sol13
eq14 = (Eq(x1, 21*x(t)), Eq(y1, 17*x(t)+3*y(t)), Eq(z1, 5*x(t)+7*y(t)+9*z(t)))
sol14 = {'no_of_equation': 3, 'func_coeff': {(1, y(t), 0): -3, (2, y(t), 1): 0, (2, z(t), 1): 1, \
(0, x(t), 0): -21, (2, x(t), 1): 0, (1, x(t), 1): 0, (2, y(t), 0): -7, (0, x(t), 1): 1, (1, z(t), 1): 0, \
(0, y(t), 1): 0, (1, x(t), 0): -17, (0, z(t), 0): 0, (0, y(t), 0): 0, (1, z(t), 0): 0, (0, z(t), 1): 0, \
(2, x(t), 0): -5, (2, z(t), 0): -9, (1, y(t), 1): 1}, 'type_of_equation': 'type1', 'func': [x(t), y(t), z(t)], \
'is_linear': True, 'eq': [-21*x(t) + Derivative(x(t), t), -17*x(t) - 3*y(t) + Derivative(y(t), t), -5*x(t) - \
7*y(t) - 9*z(t) + Derivative(z(t), t)], 'order': {z(t): 1, y(t): 1, x(t): 1}}
assert classify_sysode(eq14) == sol14
eq15 = (Eq(x1,4*x(t)+5*y(t)+2*z(t)),Eq(y1,x(t)+13*y(t)+9*z(t)),Eq(z1,32*x(t)+41*y(t)+11*z(t)))
sol15 = {'no_of_equation': 3, 'func_coeff': {(1, y(t), 0): -13, (2, y(t), 1): 0, (2, z(t), 1): 1, \
(0, x(t), 0): -4, (2, x(t), 1): 0, (1, x(t), 1): 0, (2, y(t), 0): -41, (0, x(t), 1): 1, (1, z(t), 1): 0, \
(0, y(t), 1): 0, (1, x(t), 0): -1, (0, z(t), 0): -2, (0, y(t), 0): -5, (1, z(t), 0): -9, (0, z(t), 1): 0, \
(2, x(t), 0): -32, (2, z(t), 0): -11, (1, y(t), 1): 1}, 'type_of_equation': 'type6', 'func': \
[x(t), y(t), z(t)], 'is_linear': True, 'eq': [-4*x(t) - 5*y(t) - 2*z(t) + Derivative(x(t), t), -x(t) - 13*y(t) - \
9*z(t) + Derivative(y(t), t), -32*x(t) - 41*y(t) - 11*z(t) + Derivative(z(t), t)], 'order': {z(t): 1, y(t): 1, x(t): 1}}
assert classify_sysode(eq15) == sol15
eq16 = (Eq(3*x1,4*5*(y(t)-z(t))),Eq(4*y1,3*5*(z(t)-x(t))),Eq(5*z1,3*4*(x(t)-y(t))))
sol16 = {'no_of_equation': 3, 'func_coeff': {(1, y(t), 0): 0, (2, y(t), 1): 0, (2, z(t), 1): 5, \
(0, x(t), 0): 0, (2, x(t), 1): 0, (1, x(t), 1): 0, (2, y(t), 0): 12, (0, x(t), 1): 3, (1, z(t), 1): 0, \
(0, y(t), 1): 0, (1, x(t), 0): 15, (0, z(t), 0): 20, (0, y(t), 0): -20, (1, z(t), 0): -15, (0, z(t), 1): 0, \
(2, x(t), 0): -12, (2, z(t), 0): 0, (1, y(t), 1): 4}, 'type_of_equation': 'type3', 'func': [x(t), y(t), z(t)], \
'is_linear': True, 'eq': [-20*y(t) + 20*z(t) + 3*Derivative(x(t), t), 15*x(t) - 15*z(t) + 4*Derivative(y(t), t), \
-12*x(t) + 12*y(t) + 5*Derivative(z(t), t)], 'order': {z(t): 1, y(t): 1, x(t): 1}}
assert classify_sysode(eq16) == sol16
# issue 8193: funcs parameter for classify_sysode has to actually work
assert classify_sysode(eq1, funcs=[x(t), y(t)]) == sol1
def test_solve_ics():
# Basic tests that things work from dsolve.
assert dsolve(f(x).diff(x) - f(x), f(x), ics={f(0): 1}) == Eq(f(x), exp(x))
assert dsolve(f(x).diff(x) - f(x), f(x), ics={f(x).diff(x).subs(x, 0): 1}) == Eq(f(x), exp(x))
assert dsolve(f(x).diff(x, x) + f(x), f(x), ics={f(0): 1,
f(x).diff(x).subs(x, 0): 1}) == Eq(f(x), sin(x) + cos(x))
assert dsolve([f(x).diff(x) - f(x) + g(x), g(x).diff(x) - g(x) - f(x)],
[f(x), g(x)], ics={f(0): 1, g(0): 0}) == [Eq(f(x), exp(x)*cos(x)),
Eq(g(x), exp(x)*sin(x))]
# Test cases where dsolve returns two solutions.
eq = (x**2*f(x)**2 - x).diff(x)
assert dsolve(eq, f(x), ics={f(1): 0}) == [Eq(f(x),
-sqrt(x - 1)/x), Eq(f(x), sqrt(x - 1)/x)]
assert dsolve(eq, f(x), ics={f(x).diff(x).subs(x, 1): 0}) == [Eq(f(x),
-sqrt(x - S(1)/2)/x), Eq(f(x), sqrt(x - S(1)/2)/x)]
eq = cos(f(x)) - (x*sin(f(x)) - f(x)**2)*f(x).diff(x)
assert dsolve(eq, f(x),
ics={f(0):1}, hint='1st_exact', simplify=False) == Eq(x*cos(f(x)) + f(x)**3/3, S(1)/3)
assert dsolve(eq, f(x),
ics={f(0):1}, hint='1st_exact', simplify=True) == Eq(x*cos(f(x)) + f(x)**3/3, S(1)/3)
assert solve_ics([Eq(f(x), C1*exp(x))], [f(x)], [C1], {f(0): 1}) == {C1: 1}
assert solve_ics([Eq(f(x), C1*sin(x) + C2*cos(x))], [f(x)], [C1, C2],
{f(0): 1, f(pi/2): 1}) == {C1: 1, C2: 1}
assert solve_ics([Eq(f(x), C1*sin(x) + C2*cos(x))], [f(x)], [C1, C2],
{f(0): 1, f(x).diff(x).subs(x, 0): 1}) == {C1: 1, C2: 1}
# XXX: Ought to be ValueError
raises(NotImplementedError, lambda: solve_ics([Eq(f(x), C1*sin(x) + C2*cos(x))], [f(x)], [C1, C2], {f(0): 1, f(pi): 1}))
# XXX: Ought to be ValueError
raises(ValueError, lambda: solve_ics([Eq(f(x), C1*sin(x) + C2*cos(x))], [f(x)], [C1, C2], {f(0): 1}))
# Degenerate case. f'(0) is identically 0.
raises(ValueError, lambda: solve_ics([Eq(f(x), sqrt(C1 - x**2))], [f(x)], [C1], {f(x).diff(x).subs(x, 0): 0}))
EI, q, L = symbols('EI q L')
# eq = Eq(EI*diff(f(x), x, 4), q)
sols = [Eq(f(x), C1 + C2*x + C3*x**2 + C4*x**3 + q*x**4/(24*EI))]
funcs = [f(x)]
constants = [C1, C2, C3, C4]
# Test both cases, Derivative (the default from f(x).diff(x).subs(x, L)),
# and Subs
ics1 = {f(0): 0,
f(x).diff(x).subs(x, 0): 0,
f(L).diff(L, 2): 0,
f(L).diff(L, 3): 0}
ics2 = {f(0): 0,
f(x).diff(x).subs(x, 0): 0,
Subs(f(x).diff(x, 2), x, L): 0,
Subs(f(x).diff(x, 3), x, L): 0}
solved_constants1 = solve_ics(sols, funcs, constants, ics1)
solved_constants2 = solve_ics(sols, funcs, constants, ics2)
assert solved_constants1 == solved_constants2 == {
C1: 0,
C2: 0,
C3: L**2*q/(4*EI),
C4: -L*q/(6*EI)}
def test_ode_order():
f = Function('f')
g = Function('g')
x = Symbol('x')
assert ode_order(3*x*exp(f(x)), f(x)) == 0
assert ode_order(x*diff(f(x), x) + 3*x*f(x) - sin(x)/x, f(x)) == 1
assert ode_order(x**2*f(x).diff(x, x) + x*diff(f(x), x) - f(x), f(x)) == 2
assert ode_order(diff(x*exp(f(x)), x, x), f(x)) == 2
assert ode_order(diff(x*diff(x*exp(f(x)), x, x), x), f(x)) == 3
assert ode_order(diff(f(x), x, x), g(x)) == 0
assert ode_order(diff(f(x), x, x)*diff(g(x), x), f(x)) == 2
assert ode_order(diff(f(x), x, x)*diff(g(x), x), g(x)) == 1
assert ode_order(diff(x*diff(x*exp(f(x)), x, x), x), g(x)) == 0
# issue 5835: ode_order has to also work for unevaluated derivatives
# (ie, without using doit()).
assert ode_order(Derivative(x*f(x), x), f(x)) == 1
assert ode_order(x*sin(Derivative(x*f(x)**2, x, x)), f(x)) == 2
assert ode_order(Derivative(x*Derivative(x*exp(f(x)), x, x), x), g(x)) == 0
assert ode_order(Derivative(f(x), x, x), g(x)) == 0
assert ode_order(Derivative(x*exp(f(x)), x, x), f(x)) == 2
assert ode_order(Derivative(f(x), x, x)*Derivative(g(x), x), g(x)) == 1
assert ode_order(Derivative(x*Derivative(f(x), x, x), x), f(x)) == 3
assert ode_order(
x*sin(Derivative(x*Derivative(f(x), x)**2, x, x)), f(x)) == 3
# In all tests below, checkodesol has the order option set to prevent
# superfluous calls to ode_order(), and the solve_for_func flag set to False
# because dsolve() already tries to solve for the function, unless the
# simplify=False option is set.
def test_old_ode_tests():
# These are simple tests from the old ode module
eq1 = Eq(f(x).diff(x), 0)
eq2 = Eq(3*f(x).diff(x) - 5, 0)
eq3 = Eq(3*f(x).diff(x), 5)
eq4 = Eq(9*f(x).diff(x, x) + f(x), 0)
eq5 = Eq(9*f(x).diff(x, x), f(x))
# Type: a(x)f'(x)+b(x)*f(x)+c(x)=0
eq6 = Eq(x**2*f(x).diff(x) + 3*x*f(x) - sin(x)/x, 0)
eq7 = Eq(f(x).diff(x, x) - 3*diff(f(x), x) + 2*f(x), 0)
# Type: 2nd order, constant coefficients (two real different roots)
eq8 = Eq(f(x).diff(x, x) - 4*diff(f(x), x) + 4*f(x), 0)
# Type: 2nd order, constant coefficients (two real equal roots)
eq9 = Eq(f(x).diff(x, x) + 2*diff(f(x), x) + 3*f(x), 0)
# Type: 2nd order, constant coefficients (two complex roots)
eq10 = Eq(3*f(x).diff(x) - 1, 0)
eq11 = Eq(x*f(x).diff(x) - 1, 0)
sol1 = Eq(f(x), C1)
sol2 = Eq(f(x), C1 + 5*x/3)
sol3 = Eq(f(x), C1 + 5*x/3)
sol4 = Eq(f(x), C1*sin(x/3) + C2*cos(x/3))
sol5 = Eq(f(x), C1*exp(-x/3) + C2*exp(x/3))
sol6 = Eq(f(x), (C1 - cos(x))/x**3)
sol7 = Eq(f(x), (C1 + C2*exp(x))*exp(x))
sol8 = Eq(f(x), (C1 + C2*x)*exp(2*x))
sol9 = Eq(f(x), (C1*sin(x*sqrt(2)) + C2*cos(x*sqrt(2)))*exp(-x))
sol10 = Eq(f(x), C1 + x/3)
sol11 = Eq(f(x), C1 + log(x))
assert dsolve(eq1) == sol1
assert dsolve(eq1.lhs) == sol1
assert dsolve(eq2) == sol2
assert dsolve(eq3) == sol3
assert dsolve(eq4) == sol4
assert dsolve(eq5) == sol5
assert dsolve(eq6) == sol6
assert dsolve(eq7) == sol7
assert dsolve(eq8) == sol8
assert dsolve(eq9) == sol9
assert dsolve(eq10) == sol10
assert dsolve(eq11) == sol11
assert checkodesol(eq1, sol1, order=1, solve_for_func=False)[0]
assert checkodesol(eq2, sol2, order=1, solve_for_func=False)[0]
assert checkodesol(eq3, sol3, order=1, solve_for_func=False)[0]
assert checkodesol(eq4, sol4, order=2, solve_for_func=False)[0]
assert checkodesol(eq5, sol5, order=2, solve_for_func=False)[0]
assert checkodesol(eq6, sol6, order=1, solve_for_func=False)[0]
assert checkodesol(eq7, sol7, order=2, solve_for_func=False)[0]
assert checkodesol(eq8, sol8, order=2, solve_for_func=False)[0]
assert checkodesol(eq9, sol9, order=2, solve_for_func=False)[0]
assert checkodesol(eq10, sol10, order=1, solve_for_func=False)[0]
assert checkodesol(eq11, sol11, order=1, solve_for_func=False)[0]
@slow
def test_1st_linear():
# Type: first order linear form f'(x)+p(x)f(x)=q(x)
eq = Eq(f(x).diff(x) + x*f(x), x**2)
sol = Eq(f(x), (C1 + x*exp(x**2/2)
- sqrt(2)*sqrt(pi)*erfi(sqrt(2)*x/2)/2)*exp(-x**2/2))
assert dsolve(eq, hint='1st_linear') == sol
assert checkodesol(eq, sol, order=1, solve_for_func=False)[0]
def test_Bernoulli():
# Type: Bernoulli, f'(x) + p(x)*f(x) == q(x)*f(x)**n
eq = Eq(x*f(x).diff(x) + f(x) - f(x)**2, 0)
sol = dsolve(eq, f(x), hint='Bernoulli')
assert sol == Eq(f(x), 1/(x*(C1 + 1/x)))
assert checkodesol(eq, sol, order=1, solve_for_func=False)[0]
def test_Riccati_special_minus2():
# Type: Riccati special alpha = -2, a*dy/dx + b*y**2 + c*y/x +d/x**2
eq = 2*f(x).diff(x) + f(x)**2 - f(x)/x + 3*x**(-2)
sol = dsolve(eq, f(x), hint='Riccati_special_minus2')
assert checkodesol(eq, sol, order=1, solve_for_func=False)[0]
def test_1st_exact1():
# Type: Exact differential equation, p(x,f) + q(x,f)*f' == 0,
# where dp/df == dq/dx
eq1 = sin(x)*cos(f(x)) + cos(x)*sin(f(x))*f(x).diff(x)
eq2 = (2*x*f(x) + 1)/f(x) + (f(x) - x)/f(x)**2*f(x).diff(x)
eq3 = 2*x + f(x)*cos(x) + (2*f(x) + sin(x) - sin(f(x)))*f(x).diff(x)
eq4 = cos(f(x)) - (x*sin(f(x)) - f(x)**2)*f(x).diff(x)
eq5 = 2*x*f(x) + (x**2 + f(x)**2)*f(x).diff(x)
sol1 = [Eq(f(x), -acos(C1/cos(x)) + 2*pi), Eq(f(x), acos(C1/cos(x)))]
sol2 = Eq(f(x), exp(C1 - x**2 + LambertW(-x*exp(-C1 + x**2))))
sol2b = Eq(log(f(x)) + x/f(x) + x**2, C1)
sol3 = Eq(f(x)*sin(x) + cos(f(x)) + x**2 + f(x)**2, C1)
sol4 = Eq(x*cos(f(x)) + f(x)**3/3, C1)
sol5 = Eq(x**2*f(x) + f(x)**3/3, C1)
assert dsolve(eq1, f(x), hint='1st_exact') == sol1
assert dsolve(eq2, f(x), hint='1st_exact') == sol2
assert dsolve(eq3, f(x), hint='1st_exact') == sol3
assert dsolve(eq4, hint='1st_exact') == sol4
assert dsolve(eq5, hint='1st_exact', simplify=False) == sol5
assert checkodesol(eq1, sol1, order=1, solve_for_func=False)[0]
# issue 5080 blocks the testing of this solution
#assert checkodesol(eq2, sol2, order=1, solve_for_func=False)[0]
assert checkodesol(eq2, sol2b, order=1, solve_for_func=False)[0]
assert checkodesol(eq3, sol3, order=1, solve_for_func=False)[0]
assert checkodesol(eq4, sol4, order=1, solve_for_func=False)[0]
assert checkodesol(eq5, sol5, order=1, solve_for_func=False)[0]
@slow
@XFAIL
def test_1st_exact2():
"""
This is an exact equation that fails under the exact engine. It is caught
by first order homogeneous albeit with a much contorted solution. The
exact engine fails because of a poorly simplified integral of q(0,y)dy,
where q is the function multiplying f'. The solutions should be
Eq(sqrt(x**2+f(x)**2)**3+y**3, C1). The equation below is
equivalent, but it is so complex that checkodesol fails, and takes a long
time to do so.
"""
if ON_TRAVIS:
skip("Too slow for travis.")
eq = (x*sqrt(x**2 + f(x)**2) - (x**2*f(x)/(f(x) -
sqrt(x**2 + f(x)**2)))*f(x).diff(x))
sol = dsolve(eq)
assert sol == Eq(log(x),
C1 - 9*sqrt(1 + f(x)**2/x**2)*asinh(f(x)/x)/(-27*f(x)/x +
27*sqrt(1 + f(x)**2/x**2)) - 9*sqrt(1 + f(x)**2/x**2)*
log(1 - sqrt(1 + f(x)**2/x**2)*f(x)/x + 2*f(x)**2/x**2)/
(-27*f(x)/x + 27*sqrt(1 + f(x)**2/x**2)) +
9*asinh(f(x)/x)*f(x)/(x*(-27*f(x)/x + 27*sqrt(1 + f(x)**2/x**2))) +
9*f(x)*log(1 - sqrt(1 + f(x)**2/x**2)*f(x)/x + 2*f(x)**2/x**2)/
(x*(-27*f(x)/x + 27*sqrt(1 + f(x)**2/x**2))))
assert checkodesol(eq, sol, order=1, solve_for_func=False)[0]
def test_separable1():
# test_separable1-5 are from Ordinary Differential Equations, Tenenbaum and
# Pollard, pg. 55
eq1 = f(x).diff(x) - f(x)
eq2 = x*f(x).diff(x) - f(x)
eq3 = f(x).diff(x) + sin(x)
eq4 = f(x)**2 + 1 - (x**2 + 1)*f(x).diff(x)
eq5 = f(x).diff(x)/tan(x) - f(x) - 2
eq6 = f(x).diff(x) * (1 - sin(f(x))) - 1
sol1 = Eq(f(x), C1*exp(x))
sol2 = Eq(f(x), C1*x)
sol3 = Eq(f(x), C1 + cos(x))
sol4 = Eq(atan(f(x)), C1 + atan(x))
sol5 = Eq(f(x), C1/cos(x) - 2)
sol6 = Eq(-x + f(x) + cos(f(x)), C1)
assert dsolve(eq1, hint='separable') == sol1
assert dsolve(eq2, hint='separable') == sol2
assert dsolve(eq3, hint='separable') == sol3
assert dsolve(eq4, hint='separable', simplify=False) == sol4
assert dsolve(eq5, hint='separable') == sol5
assert dsolve(eq6, hint='separable') == sol6
assert checkodesol(eq1, sol1, order=1, solve_for_func=False)[0]
assert checkodesol(eq2, sol2, order=1, solve_for_func=False)[0]
assert checkodesol(eq3, sol3, order=1, solve_for_func=False)[0]
assert checkodesol(eq4, sol4, order=1, solve_for_func=False)[0]
assert checkodesol(eq5, sol5, order=1, solve_for_func=False)[0]
assert checkodesol(eq6, sol6, order=1, solve_for_func=False)[0]
def test_separable2():
a = Symbol('a')
eq6 = f(x)*x**2*f(x).diff(x) - f(x)**3 - 2*x**2*f(x).diff(x)
eq7 = f(x)**2 - 1 - (2*f(x) + x*f(x))*f(x).diff(x)
eq8 = x*log(x)*f(x).diff(x) + sqrt(1 + f(x)**2)
eq9 = exp(x + 1)*tan(f(x)) + cos(f(x))*f(x).diff(x)
eq10 = (x*cos(f(x)) + x**2*sin(f(x))*f(x).diff(x) -
a**2*sin(f(x))*f(x).diff(x))
sol6 = Eq(Integral((u - 2)/u**3, (u, f(x))),
C1 + Integral(x**(-2), x))
sol7 = Eq(-log(-1 + f(x)**2)/2, C1 - log(2 + x))
sol8 = Eq(asinh(f(x)), C1 - log(log(x)))
# integrate cannot handle the integral on the lhs (cos/tan)
sol9 = Eq(Integral(cos(u)/tan(u), (u, f(x))),
C1 + Integral(-exp(1)*exp(x), x))
sol10 = Eq(-log(cos(f(x))), C1 - log(- a**2 + x**2)/2)
assert dsolve(eq6, hint='separable_Integral').dummy_eq(sol6)
assert dsolve(eq7, hint='separable', simplify=False) == sol7
assert dsolve(eq8, hint='separable', simplify=False) == sol8
assert dsolve(eq9, hint='separable_Integral').dummy_eq(sol9)
assert dsolve(eq10, hint='separable', simplify=False) == sol10
assert checkodesol(eq7, sol7, order=1, solve_for_func=False)[0]
assert checkodesol(eq8, sol8, order=1, solve_for_func=False)[0]
assert checkodesol(eq10, sol10, order=1, solve_for_func=False)[0]
def test_separable3():
eq11 = f(x).diff(x) - f(x)*tan(x)
eq12 = (x - 1)*cos(f(x))*f(x).diff(x) - 2*x*sin(f(x))
eq13 = f(x).diff(x) - f(x)*log(f(x))/tan(x)
sol11 = Eq(f(x), C1/cos(x))
sol12 = Eq(log(sin(f(x))), C1 + 2*x + 2*log(x - 1))
sol13 = Eq(log(log(f(x))), C1 + log(sin(x)))
assert dsolve(eq11, hint='separable') == sol11
assert dsolve(eq12, hint='separable', simplify=False) == sol12
assert dsolve(eq13, hint='separable', simplify=False) == sol13
assert checkodesol(eq11, sol11, order=1, solve_for_func=False)[0]
assert checkodesol(eq13, sol13, order=1, solve_for_func=False)[0]
def test_separable4():
# This has a slow integral (1/((1 + y**2)*atan(y))), so we isolate it.
eq14 = x*f(x).diff(x) + (1 + f(x)**2)*atan(f(x))
sol14 = Eq(log(atan(f(x))), C1 - log(x))
assert dsolve(eq14, hint='separable', simplify=False) == sol14
assert checkodesol(eq14, sol14, order=1, solve_for_func=False)[0]
def test_separable5():
eq15 = f(x).diff(x) + x*(f(x) + 1)
eq16 = exp(f(x)**2)*(x**2 + 2*x + 1) + (x*f(x) + f(x))*f(x).diff(x)
eq17 = f(x).diff(x) + f(x)
eq18 = sin(x)*cos(2*f(x)) + cos(x)*sin(2*f(x))*f(x).diff(x)
eq19 = (1 - x)*f(x).diff(x) - x*(f(x) + 1)
eq20 = f(x)*diff(f(x), x) + x - 3*x*f(x)**2
eq21 = f(x).diff(x) - exp(x + f(x))
sol15 = Eq(f(x), -1 + C1*exp(-x**2/2))
sol16 = Eq(-exp(-f(x)**2)/2, C1 - x - x**2/2)
sol17 = Eq(f(x), C1*exp(-x))
sol18 = Eq(-log(cos(2*f(x)))/2, C1 + log(cos(x)))
sol19 = Eq(f(x), (C1*exp(-x) - x + 1)/(x - 1))
sol20 = Eq(log(-1 + 3*f(x)**2)/6, C1 + x**2/2)
sol21 = Eq(-exp(-f(x)), C1 + exp(x))
assert dsolve(eq15, hint='separable') == sol15
assert dsolve(eq16, hint='separable', simplify=False) == sol16
assert dsolve(eq17, hint='separable') == sol17
assert dsolve(eq18, hint='separable', simplify=False) == sol18
assert dsolve(eq19, hint='separable') == sol19
assert dsolve(eq20, hint='separable', simplify=False) == sol20
assert dsolve(eq21, hint='separable', simplify=False) == sol21
assert checkodesol(eq15, sol15, order=1, solve_for_func=False)[0]
assert checkodesol(eq16, sol16, order=1, solve_for_func=False)[0]
assert checkodesol(eq17, sol17, order=1, solve_for_func=False)[0]
assert checkodesol(eq18, sol18, order=1, solve_for_func=False)[0]
assert checkodesol(eq19, sol19, order=1, solve_for_func=False)[0]
assert checkodesol(eq20, sol20, order=1, solve_for_func=False)[0]
assert checkodesol(eq21, sol21, order=1, solve_for_func=False)[0]
def test_separable_1_5_checkodesol():
eq12 = (x - 1)*cos(f(x))*f(x).diff(x) - 2*x*sin(f(x))
sol12 = Eq(-log(1 - cos(f(x))**2)/2, C1 - 2*x - 2*log(1 - x))
assert checkodesol(eq12, sol12, order=1, solve_for_func=False)[0]
def test_homogeneous_order():
assert homogeneous_order(exp(y/x) + tan(y/x), x, y) == 0
assert homogeneous_order(x**2 + sin(x)*cos(y), x, y) is None
assert homogeneous_order(x - y - x*sin(y/x), x, y) == 1
assert homogeneous_order((x*y + sqrt(x**4 + y**4) + x**2*(log(x) - log(y)))/
(pi*x**Rational(2, 3)*sqrt(y)**3), x, y) == Rational(-1, 6)
assert homogeneous_order(y/x*cos(y/x) - x/y*sin(y/x) + cos(y/x), x, y) == 0
assert homogeneous_order(f(x), x, f(x)) == 1
assert homogeneous_order(f(x)**2, x, f(x)) == 2
assert homogeneous_order(x*y*z, x, y) == 2
assert homogeneous_order(x*y*z, x, y, z) == 3
assert homogeneous_order(x**2*f(x)/sqrt(x**2 + f(x)**2), f(x)) is None
assert homogeneous_order(f(x, y)**2, x, f(x, y), y) == 2
assert homogeneous_order(f(x, y)**2, x, f(x), y) is None
assert homogeneous_order(f(x, y)**2, x, f(x, y)) is None
assert homogeneous_order(f(y, x)**2, x, y, f(x, y)) is None
assert homogeneous_order(f(y), f(x), x) is None
assert homogeneous_order(-f(x)/x + 1/sin(f(x)/ x), f(x), x) == 0
assert homogeneous_order(log(1/y) + log(x**2), x, y) is None
assert homogeneous_order(log(1/y) + log(x), x, y) == 0
assert homogeneous_order(log(x/y), x, y) == 0
assert homogeneous_order(2*log(1/y) + 2*log(x), x, y) == 0
a = Symbol('a')
assert homogeneous_order(a*log(1/y) + a*log(x), x, y) == 0
assert homogeneous_order(f(x).diff(x), x, y) is None
assert homogeneous_order(-f(x).diff(x) + x, x, y) is None
assert homogeneous_order(O(x), x, y) is None
assert homogeneous_order(x + O(x**2), x, y) is None
assert homogeneous_order(x**pi, x) == pi
assert homogeneous_order(x**x, x) is None
raises(ValueError, lambda: homogeneous_order(x*y))
@slow
def test_1st_homogeneous_coeff_ode():
# Type: First order homogeneous, y'=f(y/x)
eq1 = f(x)/x*cos(f(x)/x) - (x/f(x)*sin(f(x)/x) + cos(f(x)/x))*f(x).diff(x)
eq2 = x*f(x).diff(x) - f(x) - x*sin(f(x)/x)
eq3 = f(x) + (x*log(f(x)/x) - 2*x)*diff(f(x), x)
eq4 = 2*f(x)*exp(x/f(x)) + f(x)*f(x).diff(x) - 2*x*exp(x/f(x))*f(x).diff(x)
eq5 = 2*x**2*f(x) + f(x)**3 + (x*f(x)**2 - 2*x**3)*f(x).diff(x)
eq6 = x*exp(f(x)/x) - f(x)*sin(f(x)/x) + x*sin(f(x)/x)*f(x).diff(x)
eq7 = (x + sqrt(f(x)**2 - x*f(x)))*f(x).diff(x) - f(x)
eq8 = x + f(x) - (x - f(x))*f(x).diff(x)
sol1 = Eq(log(x), C1 - log(f(x)*sin(f(x)/x)/x))
sol2 = Eq(log(x), log(C1) + log(cos(f(x)/x) - 1)/2 - log(cos(f(x)/x) + 1)/2)
sol3 = Eq(f(x), -exp(C1)*LambertW(-x*exp(-C1 + 1)))
sol4 = Eq(log(f(x)), C1 - 2*exp(x/f(x)))
sol5 = Eq(f(x), exp(2*C1 + LambertW(-2*x**4*exp(-4*C1))/2)/x)
sol6 = Eq(log(x),
C1 + exp(-f(x)/x)*sin(f(x)/x)/2 + exp(-f(x)/x)*cos(f(x)/x)/2)
sol7 = Eq(log(f(x)), C1 - 2*sqrt(-x/f(x) + 1))
sol8 = Eq(log(x), C1 - log(sqrt(1 + f(x)**2/x**2)) + atan(f(x)/x))
assert dsolve(eq1, hint='1st_homogeneous_coeff_subs_dep_div_indep') == \
sol1
# indep_div_dep actually has a simpler solution for eq2,
# but it runs too slow
assert dsolve(eq2, hint='1st_homogeneous_coeff_subs_dep_div_indep',
simplify=False) == sol2
assert dsolve(eq3, hint='1st_homogeneous_coeff_best') == sol3
assert dsolve(eq4, hint='1st_homogeneous_coeff_best') == sol4
assert dsolve(eq5, hint='1st_homogeneous_coeff_best') == sol5
assert dsolve(eq6, hint='1st_homogeneous_coeff_subs_dep_div_indep') == \
sol6
assert dsolve(eq7, hint='1st_homogeneous_coeff_best') == sol7
assert dsolve(eq8, hint='1st_homogeneous_coeff_best') == sol8
# checks are below
@slow
def test_1st_homogeneous_coeff_ode_check134568():
# These are the checkodesols from test_homogeneous_coeff_ode().
eq1 = f(x)/x*cos(f(x)/x) - (x/f(x)*sin(f(x)/x) + cos(f(x)/x))*f(x).diff(x)
eq3 = f(x) + (x*log(f(x)/x) - 2*x)*diff(f(x), x)
eq4 = 2*f(x)*exp(x/f(x)) + f(x)*f(x).diff(x) - 2*x*exp(x/f(x))*f(x).diff(x)
eq5 = 2*x**2*f(x) + f(x)**3 + (x*f(x)**2 - 2*x**3)*f(x).diff(x)
eq6 = x*exp(f(x)/x) - f(x)*sin(f(x)/x) + x*sin(f(x)/x)*f(x).diff(x)
eq8 = x + f(x) - (x - f(x))*f(x).diff(x)
sol1 = Eq(f(x)*sin(f(x)/x), C1)
sol4 = Eq(log(C1*f(x)) + 2*exp(x/f(x)), 0)
sol3 = Eq(-f(x)/(1 + log(x/f(x))), C1)
sol5 = Eq(log(C1*x*sqrt(1/x)*sqrt(f(x))) + x**2/(2*f(x)**2), 0)
sol6 = Eq(-exp(-f(x)/x)*sin(f(x)/x)/2 + log(C1*x) -
cos(f(x)/x)*exp(-f(x)/x)/2, 0)
sol8 = Eq(-atan(f(x)/x) + log(C1*x*sqrt(1 + f(x)**2/x**2)), 0)
assert checkodesol(eq1, sol1, order=1, solve_for_func=False)[0]
assert checkodesol(eq3, sol3, order=1, solve_for_func=False)[0]
assert checkodesol(eq4, sol4, order=1, solve_for_func=False)[0]
assert checkodesol(eq5, sol5, order=1, solve_for_func=False)[0]
assert checkodesol(eq6, sol6, order=1, solve_for_func=False)[0]
assert checkodesol(eq8, sol8, order=1, solve_for_func=False)[0]
def test_1st_homogeneous_coeff_ode_check2():
eq2 = x*f(x).diff(x) - f(x) - x*sin(f(x)/x)
sol2 = Eq(x/tan(f(x)/(2*x)), C1)
assert checkodesol(eq2, sol2, order=1, solve_for_func=False)[0]
@XFAIL
def test_1st_homogeneous_coeff_ode_check3():
skip('This is a known issue.')
# checker cannot determine that the following expression is zero:
# (False,
# x*(log(exp(-LambertW(C1*x))) +
# LambertW(C1*x))*exp(-LambertW(C1*x) + 1))
# This is blocked by issue 5080.
eq3 = f(x) + (x*log(f(x)/x) - 2*x)*diff(f(x), x)
sol3a = Eq(f(x), x*exp(1 - LambertW(C1*x)))
assert checkodesol(eq3, sol3a, solve_for_func=True)[0]
# Checker can't verify this form either
# (False,
# C1*(log(C1*LambertW(C2*x)/x) + LambertW(C2*x) - 1)*LambertW(C2*x))
# It is because a = W(a)*exp(W(a)), so log(a) == log(W(a)) + W(a) and C2 =
# -E/C1 (which can be verified by solving with simplify=False).
sol3b = Eq(f(x), C1*LambertW(C2*x))
assert checkodesol(eq3, sol3b, solve_for_func=True)[0]
def test_1st_homogeneous_coeff_ode_check7():
eq7 = (x + sqrt(f(x)**2 - x*f(x)))*f(x).diff(x) - f(x)
sol7 = Eq(log(C1*f(x)) + 2*sqrt(1 - x/f(x)), 0)
assert checkodesol(eq7, sol7, order=1, solve_for_func=False)[0]
def test_1st_homogeneous_coeff_ode2():
eq1 = f(x).diff(x) - f(x)/x + 1/sin(f(x)/x)
eq2 = x**2 + f(x)**2 - 2*x*f(x)*f(x).diff(x)
eq3 = x*exp(f(x)/x) + f(x) - x*f(x).diff(x)
sol1 = [Eq(f(x), x*(-acos(C1 + log(x)) + 2*pi)), Eq(f(x), x*acos(C1 + log(x)))]
sol2 = Eq(log(f(x)), log(C1) + log(x/f(x)) - log(x**2/f(x)**2 - 1))
sol3 = Eq(f(x), log((1/(C1 - log(x)))**x))
# specific hints are applied for speed reasons
assert dsolve(eq1, hint='1st_homogeneous_coeff_subs_dep_div_indep') == sol1
assert dsolve(eq2, hint='1st_homogeneous_coeff_best', simplify=False) == sol2
assert dsolve(eq3, hint='1st_homogeneous_coeff_subs_dep_div_indep') == sol3
assert checkodesol(eq1, sol1, order=1, solve_for_func=False)[0]
assert checkodesol(eq2, sol2, order=1, solve_for_func=False)[0]
# test for eq3 is in test_1st_homogeneous_coeff_ode2_check3 below
def test_1st_homogeneous_coeff_ode2_check3():
eq3 = x*exp(f(x)/x) + f(x) - x*f(x).diff(x)
sol3 = Eq(f(x), log(log(C1/x)**(-x)))
assert checkodesol(eq3, sol3, order=1, solve_for_func=False)[0]
def test_1st_homogeneous_coeff_ode_check9():
_u2 = Dummy('u2')
__a = Dummy('a')
eq9 = f(x)**2 + (x*sqrt(f(x)**2 - x**2) - x*f(x))*f(x).diff(x)
sol9 = Eq(-Integral(-1/(-(1 - sqrt(1 - _u2**2))*_u2 + _u2), (_u2, __a,
x/f(x))) + log(C1*f(x)), 0)
assert checkodesol(eq9, sol9, order=1, solve_for_func=False)[0]
def test_1st_homogeneous_coeff_ode3():
# The standard integration engine cannot handle one of the integrals
# involved (see issue 4551). meijerg code comes up with an answer, but in
# unconventional form.
# checkodesol fails for this equation, so its test is in
# test_1st_homogeneous_coeff_ode_check9 above. It has to compare string
# expressions because u2 is a dummy variable.
eq = f(x)**2 + (x*sqrt(f(x)**2 - x**2) - x*f(x))*f(x).diff(x)
sol = Eq(log(f(x)), C1 + Piecewise(
(acosh(f(x)/x), abs(f(x)**2)/x**2 > 1),
(-I*asin(f(x)/x), True)))
assert dsolve(eq, hint='1st_homogeneous_coeff_subs_indep_div_dep') == sol
def test_1st_homogeneous_coeff_corner_case():
eq1 = f(x).diff(x) - f(x)/x
c1 = classify_ode(eq1, f(x))
eq2 = x*f(x).diff(x) - f(x)
c2 = classify_ode(eq2, f(x))
sdi = "1st_homogeneous_coeff_subs_dep_div_indep"
sid = "1st_homogeneous_coeff_subs_indep_div_dep"
assert sid not in c1 and sdi not in c1
assert sid not in c2 and sdi not in c2
@slow
def test_nth_linear_constant_coeff_homogeneous():
# From Exercise 20, in Ordinary Differential Equations,
# Tenenbaum and Pollard, pg. 220
a = Symbol('a', positive=True)
k = Symbol('k', real=True)
eq1 = f(x).diff(x, 2) + 2*f(x).diff(x)
eq2 = f(x).diff(x, 2) - 3*f(x).diff(x) + 2*f(x)
eq3 = f(x).diff(x, 2) - f(x)
eq4 = f(x).diff(x, 3) + f(x).diff(x, 2) - 6*f(x).diff(x)
eq5 = 6*f(x).diff(x, 2) - 11*f(x).diff(x) + 4*f(x)
eq6 = Eq(f(x).diff(x, 2) + 2*f(x).diff(x) - f(x), 0)
eq7 = diff(f(x), x, 3) + diff(f(x), x, 2) - 10*diff(f(x), x) - 6*f(x)
eq8 = f(x).diff(x, 4) - f(x).diff(x, 3) - 4*f(x).diff(x, 2) + \
4*f(x).diff(x)
eq9 = f(x).diff(x, 4) + 4*f(x).diff(x, 3) + f(x).diff(x, 2) - \
4*f(x).diff(x) - 2*f(x)
eq10 = f(x).diff(x, 4) - a**2*f(x)
eq11 = f(x).diff(x, 2) - 2*k*f(x).diff(x) - 2*f(x)
eq12 = f(x).diff(x, 2) + 4*k*f(x).diff(x) - 12*k**2*f(x)
eq13 = f(x).diff(x, 4)
eq14 = f(x).diff(x, 2) + 4*f(x).diff(x) + 4*f(x)
eq15 = 3*f(x).diff(x, 3) + 5*f(x).diff(x, 2) + f(x).diff(x) - f(x)
eq16 = f(x).diff(x, 3) - 6*f(x).diff(x, 2) + 12*f(x).diff(x) - 8*f(x)
eq17 = f(x).diff(x, 2) - 2*a*f(x).diff(x) + a**2*f(x)
eq18 = f(x).diff(x, 4) + 3*f(x).diff(x, 3)
eq19 = f(x).diff(x, 4) - 2*f(x).diff(x, 2)
eq20 = f(x).diff(x, 4) + 2*f(x).diff(x, 3) - 11*f(x).diff(x, 2) - \
12*f(x).diff(x) + 36*f(x)
eq21 = 36*f(x).diff(x, 4) - 37*f(x).diff(x, 2) + 4*f(x).diff(x) + 5*f(x)
eq22 = f(x).diff(x, 4) - 8*f(x).diff(x, 2) + 16*f(x)
eq23 = f(x).diff(x, 2) - 2*f(x).diff(x) + 5*f(x)
eq24 = f(x).diff(x, 2) - f(x).diff(x) + f(x)
eq25 = f(x).diff(x, 4) + 5*f(x).diff(x, 2) + 6*f(x)
eq26 = f(x).diff(x, 2) - 4*f(x).diff(x) + 20*f(x)
eq27 = f(x).diff(x, 4) + 4*f(x).diff(x, 2) + 4*f(x)
eq28 = f(x).diff(x, 3) + 8*f(x)
eq29 = f(x).diff(x, 4) + 4*f(x).diff(x, 2)
eq30 = f(x).diff(x, 5) + 2*f(x).diff(x, 3) + f(x).diff(x)
eq31 = f(x).diff(x, 4) + f(x).diff(x, 2) + f(x)
eq32 = f(x).diff(x, 4) + 4*f(x).diff(x, 2) + f(x)
sol1 = Eq(f(x), C1 + C2*exp(-2*x))
sol2 = Eq(f(x), (C1 + C2*exp(x))*exp(x))
sol3 = Eq(f(x), C1*exp(x) + C2*exp(-x))
sol4 = Eq(f(x), C1 + C2*exp(-3*x) + C3*exp(2*x))
sol5 = Eq(f(x), C1*exp(x/2) + C2*exp(4*x/3))
sol6 = Eq(f(x), C1*exp(x*(-1 + sqrt(2))) + C2*exp(x*(-sqrt(2) - 1)))
sol7 = Eq(f(x),
C1*exp(3*x) + C2*exp(x*(-2 - sqrt(2))) + C3*exp(x*(-2 + sqrt(2))))
sol8 = Eq(f(x), C1 + C2*exp(x) + C3*exp(-2*x) + C4*exp(2*x))
sol9 = Eq(f(x),
C1*exp(x) + C2*exp(-x) + C3*exp(x*(-2 + sqrt(2))) +
C4*exp(x*(-2 - sqrt(2))))
sol10 = Eq(f(x),
C1*sin(x*sqrt(a)) + C2*cos(x*sqrt(a)) + C3*exp(x*sqrt(a)) +
C4*exp(-x*sqrt(a)))
sol11 = Eq(f(x),
C1*exp(x*(k - sqrt(k**2 + 2))) + C2*exp(x*(k + sqrt(k**2 + 2))))
sol12 = Eq(f(x), C1*exp(-6*k*x) + C2*exp(2*k*x))
sol13 = Eq(f(x), C1 + C2*x + C3*x**2 + C4*x**3)
sol14 = Eq(f(x), (C1 + C2*x)*exp(-2*x))
sol15 = Eq(f(x), (C1 + C2*x)*exp(-x) + C3*exp(x/3))
sol16 = Eq(f(x), (C1 + C2*x + C3*x**2)*exp(2*x))
sol17 = Eq(f(x), (C1 + C2*x)*exp(a*x))
sol18 = Eq(f(x), C1 + C2*x + C3*x**2 + C4*exp(-3*x))
sol19 = Eq(f(x), C1 + C2*x + C3*exp(x*sqrt(2)) + C4*exp(-x*sqrt(2)))
sol20 = Eq(f(x), (C1 + C2*x)*exp(-3*x) + (C3 + C4*x)*exp(2*x))
sol21 = Eq(f(x), C1*exp(x/2) + C2*exp(-x) + C3*exp(-x/3) + C4*exp(5*x/6))
sol22 = Eq(f(x), (C1 + C2*x)*exp(-2*x) + (C3 + C4*x)*exp(2*x))
sol23 = Eq(f(x), (C1*sin(2*x) + C2*cos(2*x))*exp(x))
sol24 = Eq(f(x), (C1*sin(x*sqrt(3)/2) + C2*cos(x*sqrt(3)/2))*exp(x/2))
sol25 = Eq(f(x),
C1*cos(x*sqrt(3)) + C2*sin(x*sqrt(3)) + C3*sin(x*sqrt(2)) +
C4*cos(x*sqrt(2)))
sol26 = Eq(f(x), (C1*sin(4*x) + C2*cos(4*x))*exp(2*x))
sol27 = Eq(f(x), (C1 + C2*x)*sin(x*sqrt(2)) + (C3 + C4*x)*cos(x*sqrt(2)))
sol28 = Eq(f(x),
(C1*sin(x*sqrt(3)) + C2*cos(x*sqrt(3)))*exp(x) + C3*exp(-2*x))
sol29 = Eq(f(x), C1 + C2*sin(2*x) + C3*cos(2*x) + C4*x)
sol30 = Eq(f(x), C1 + (C2 + C3*x)*sin(x) + (C4 + C5*x)*cos(x))
sol31 = Eq(f(x), (C1*sin(sqrt(3)*x/2) + C2*cos(sqrt(3)*x/2))/sqrt(exp(x))
+ (C3*sin(sqrt(3)*x/2) + C4*cos(sqrt(3)*x/2))*sqrt(exp(x)))
sol32 = Eq(f(x), C1*sin(x*sqrt(-sqrt(3) + 2)) + C2*sin(x*sqrt(sqrt(3) + 2))
+ C3*cos(x*sqrt(-sqrt(3) + 2)) + C4*cos(x*sqrt(sqrt(3) + 2)))
sol1s = constant_renumber(sol1, 'C', 1, 2)
sol2s = constant_renumber(sol2, 'C', 1, 2)
sol3s = constant_renumber(sol3, 'C', 1, 2)
sol4s = constant_renumber(sol4, 'C', 1, 3)
sol5s = constant_renumber(sol5, 'C', 1, 2)
sol6s = constant_renumber(sol6, 'C', 1, 2)
sol7s = constant_renumber(sol7, 'C', 1, 3)
sol8s = constant_renumber(sol8, 'C', 1, 4)
sol9s = constant_renumber(sol9, 'C', 1, 4)
sol10s = constant_renumber(sol10, 'C', 1, 4)
sol11s = constant_renumber(sol11, 'C', 1, 2)
sol12s = constant_renumber(sol12, 'C', 1, 2)
sol13s = constant_renumber(sol13, 'C', 1, 4)
sol14s = constant_renumber(sol14, 'C', 1, 2)
sol15s = constant_renumber(sol15, 'C', 1, 3)
sol16s = constant_renumber(sol16, 'C', 1, 3)
sol17s = constant_renumber(sol17, 'C', 1, 2)
sol18s = constant_renumber(sol18, 'C', 1, 4)
sol19s = constant_renumber(sol19, 'C', 1, 4)
sol20s = constant_renumber(sol20, 'C', 1, 4)
sol21s = constant_renumber(sol21, 'C', 1, 4)
sol22s = constant_renumber(sol22, 'C', 1, 4)
sol23s = constant_renumber(sol23, 'C', 1, 2)
sol24s = constant_renumber(sol24, 'C', 1, 2)
sol25s = constant_renumber(sol25, 'C', 1, 4)
sol26s = constant_renumber(sol26, 'C', 1, 2)
sol27s = constant_renumber(sol27, 'C', 1, 4)
sol28s = constant_renumber(sol28, 'C', 1, 3)
sol29s = constant_renumber(sol29, 'C', 1, 4)
sol30s = constant_renumber(sol30, 'C', 1, 5)
assert dsolve(eq1) in (sol1, sol1s)
assert dsolve(eq2) in (sol2, sol2s)
assert dsolve(eq3) in (sol3, sol3s)
assert dsolve(eq4) in (sol4, sol4s)
assert dsolve(eq5) in (sol5, sol5s)
assert dsolve(eq6) in (sol6, sol6s)
assert dsolve(eq7) in (sol7, sol7s)
assert dsolve(eq8) in (sol8, sol8s)
assert dsolve(eq9) in (sol9, sol9s)
assert dsolve(eq10) in (sol10, sol10s)
assert dsolve(eq11) in (sol11, sol11s)
assert dsolve(eq12) in (sol12, sol12s)
assert dsolve(eq13) in (sol13, sol13s)
assert dsolve(eq14) in (sol14, sol14s)
assert dsolve(eq15) in (sol15, sol15s)
assert dsolve(eq16) in (sol16, sol16s)
assert dsolve(eq17) in (sol17, sol17s)
assert dsolve(eq18) in (sol18, sol18s)
assert dsolve(eq19) in (sol19, sol19s)
assert dsolve(eq20) in (sol20, sol20s)
assert dsolve(eq21) in (sol21, sol21s)
assert dsolve(eq22) in (sol22, sol22s)
assert dsolve(eq23) in (sol23, sol23s)
assert dsolve(eq24) in (sol24, sol24s)
assert dsolve(eq25) in (sol25, sol25s)
assert dsolve(eq26) in (sol26, sol26s)
assert dsolve(eq27) in (sol27, sol27s)
assert dsolve(eq28) in (sol28, sol28s)
assert dsolve(eq29) in (sol29, sol29s)
assert dsolve(eq30) in (sol30, sol30s)
assert dsolve(eq31) in (sol31,)
assert dsolve(eq32) in (sol32,)
assert checkodesol(eq1, sol1, order=2, solve_for_func=False)[0]
assert checkodesol(eq2, sol2, order=2, solve_for_func=False)[0]
assert checkodesol(eq3, sol3, order=2, solve_for_func=False)[0]
assert checkodesol(eq4, sol4, order=3, solve_for_func=False)[0]
assert checkodesol(eq5, sol5, order=2, solve_for_func=False)[0]
assert checkodesol(eq6, sol6, order=2, solve_for_func=False)[0]
assert checkodesol(eq7, sol7, order=3, solve_for_func=False)[0]
assert checkodesol(eq8, sol8, order=4, solve_for_func=False)[0]
assert checkodesol(eq9, sol9, order=4, solve_for_func=False)[0]
assert checkodesol(eq10, sol10, order=4, solve_for_func=False)[0]
assert checkodesol(eq11, sol11, order=2, solve_for_func=False)[0]
assert checkodesol(eq12, sol12, order=2, solve_for_func=False)[0]
assert checkodesol(eq13, sol13, order=4, solve_for_func=False)[0]
assert checkodesol(eq14, sol14, order=2, solve_for_func=False)[0]
assert checkodesol(eq15, sol15, order=3, solve_for_func=False)[0]
assert checkodesol(eq16, sol16, order=3, solve_for_func=False)[0]
assert checkodesol(eq17, sol17, order=2, solve_for_func=False)[0]
assert checkodesol(eq18, sol18, order=4, solve_for_func=False)[0]
assert checkodesol(eq19, sol19, order=4, solve_for_func=False)[0]
assert checkodesol(eq20, sol20, order=4, solve_for_func=False)[0]
assert checkodesol(eq21, sol21, order=4, solve_for_func=False)[0]
assert checkodesol(eq22, sol22, order=4, solve_for_func=False)[0]
assert checkodesol(eq23, sol23, order=2, solve_for_func=False)[0]
assert checkodesol(eq24, sol24, order=2, solve_for_func=False)[0]
assert checkodesol(eq25, sol25, order=4, solve_for_func=False)[0]
assert checkodesol(eq26, sol26, order=2, solve_for_func=False)[0]
assert checkodesol(eq27, sol27, order=4, solve_for_func=False)[0]
assert checkodesol(eq28, sol28, order=3, solve_for_func=False)[0]
assert checkodesol(eq29, sol29, order=4, solve_for_func=False)[0]
assert checkodesol(eq30, sol30, order=5, solve_for_func=False)[0]
assert checkodesol(eq31, sol31, order=4, solve_for_func=False)[0]
assert checkodesol(eq32, sol32, order=4, solve_for_func=False)[0]
# Issue #15237
eqn = Derivative(x*f(x), x, x, x)
hint = 'nth_linear_constant_coeff_homogeneous'
raises(ValueError, lambda: dsolve(eqn, f(x), hint, prep=True))
raises(ValueError, lambda: dsolve(eqn, f(x), hint, prep=False))
def test_nth_linear_constant_coeff_homogeneous_rootof():
eq = f(x).diff(x, 5) + 11*f(x).diff(x) - 2*f(x)
sol = Eq(f(x),
C1*exp(x*rootof(x**5 + 11*x - 2, 0)) +
C2*exp(x*rootof(x**5 + 11*x - 2, 1)) +
C3*exp(x*rootof(x**5 + 11*x - 2, 2)) +
C4*exp(x*rootof(x**5 + 11*x - 2, 3)) +
C5*exp(x*rootof(x**5 + 11*x - 2, 4)))
assert dsolve(eq) == sol
eq = f(x).diff(x, 6) - 6*f(x).diff(x, 5) + 5*f(x).diff(x, 4) + 10*f(x).diff(x) - 50 * f(x)
sol = Eq(f(x),
C1*exp(5*x)
+ C2*exp(x*rootof(x**5 - x**4 + 10, 0))
+ C3*exp(x*rootof(x**5 - x**4 + 10, 1))
+ C4*exp(x*rootof(x**5 - x**4 + 10, 2))
+ C5*exp(x*rootof(x**5 - x**4 + 10, 3))
+ C6*exp(x*rootof(x**5 - x**4 + 10, 4))
)
assert dsolve(eq) == sol
def test_nth_linear_constant_coeff_homogeneous_irrational():
our_hint='nth_linear_constant_coeff_homogeneous'
eq = Eq(sqrt(2) * f(x).diff(x,x,x) + f(x).diff(x), 0)
sol = Eq(f(x), C1 + C2*sin(2**(S(3)/4)*x/2) + C3*cos(2**(S(3)/4)*x/2))
assert our_hint in classify_ode(eq)
assert dsolve(eq, f(x), hint=our_hint) == sol
assert dsolve(eq, f(x)) == sol
assert checkodesol(eq, sol, order=3, solve_for_func=False)[0]
E = exp(1)
eq = Eq(E * f(x).diff(x,x,x) + f(x).diff(x), 0)
sol = Eq(f(x), C1 + C2*sin(x/sqrt(E)) + C3*cos(x/sqrt(E)))
assert our_hint in classify_ode(eq)
assert dsolve(eq, f(x), hint=our_hint) == sol
assert dsolve(eq, f(x)) == sol
assert checkodesol(eq, sol, order=3, solve_for_func=False)[0]
eq = Eq(pi * f(x).diff(x,x,x) + f(x).diff(x), 0)
sol = Eq(f(x), C1 + C2*sin(x/sqrt(pi)) + C3*cos(x/sqrt(pi)))
assert our_hint in classify_ode(eq)
assert dsolve(eq, f(x), hint=our_hint) == sol
assert dsolve(eq, f(x)) == sol
assert checkodesol(eq, sol, order=3, solve_for_func=False)[0]
eq = Eq(I * f(x).diff(x,x,x) + f(x).diff(x), 0)
sol = Eq(f(x), C1 + C2*exp(-sqrt(I)*x) + C3*exp(sqrt(I)*x))
assert our_hint in classify_ode(eq)
assert dsolve(eq, f(x), hint=our_hint) == sol
assert dsolve(eq, f(x)) == sol
assert checkodesol(eq, sol, order=3, solve_for_func=False)[0]
@XFAIL
@slow
def test_nth_linear_constant_coeff_homogeneous_rootof_sol():
if ON_TRAVIS:
skip("Too slow for travis.")
eq = f(x).diff(x, 5) + 11*f(x).diff(x) - 2*f(x)
sol = Eq(f(x),
C1*exp(x*rootof(x**5 + 11*x - 2, 0)) +
C2*exp(x*rootof(x**5 + 11*x - 2, 1)) +
C3*exp(x*rootof(x**5 + 11*x - 2, 2)) +
C4*exp(x*rootof(x**5 + 11*x - 2, 3)) +
C5*exp(x*rootof(x**5 + 11*x - 2, 4)))
assert checkodesol(eq, sol, order=5, solve_for_func=False)[0]
@XFAIL
def test_noncircularized_real_imaginary_parts():
# If this passes, lines numbered 3878-3882 (at the time of this commit)
# of sympy/solvers/ode.py for nth_linear_constant_coeff_homogeneous
# should be removed.
y = sqrt(1+x)
i, r = im(y), re(y)
assert not (i.has(atan2) and r.has(atan2))
@XFAIL
def test_collect_respecting_exponentials():
# If this test passes, lines 1306-1311 (at the time of this commit)
# of sympy/solvers/ode.py should be removed.
sol = 1 + exp(x/2)
assert sol == collect( sol, exp(x/3))
def test_undetermined_coefficients_match():
assert _undetermined_coefficients_match(g(x), x) == {'test': False}
assert _undetermined_coefficients_match(sin(2*x + sqrt(5)), x) == \
{'test': True, 'trialset':
set([cos(2*x + sqrt(5)), sin(2*x + sqrt(5))])}
assert _undetermined_coefficients_match(sin(x)*cos(x), x) == \
{'test': False}
s = set([cos(x), x*cos(x), x**2*cos(x), x**2*sin(x), x*sin(x), sin(x)])
assert _undetermined_coefficients_match(sin(x)*(x**2 + x + 1), x) == \
{'test': True, 'trialset': s}
assert _undetermined_coefficients_match(
sin(x)*x**2 + sin(x)*x + sin(x), x) == {'test': True, 'trialset': s}
assert _undetermined_coefficients_match(
exp(2*x)*sin(x)*(x**2 + x + 1), x
) == {
'test': True, 'trialset': set([exp(2*x)*sin(x), x**2*exp(2*x)*sin(x),
cos(x)*exp(2*x), x**2*cos(x)*exp(2*x), x*cos(x)*exp(2*x),
x*exp(2*x)*sin(x)])}
assert _undetermined_coefficients_match(1/sin(x), x) == {'test': False}
assert _undetermined_coefficients_match(log(x), x) == {'test': False}
assert _undetermined_coefficients_match(2**(x)*(x**2 + x + 1), x) == \
{'test': True, 'trialset': set([2**x, x*2**x, x**2*2**x])}
assert _undetermined_coefficients_match(x**y, x) == {'test': False}
assert _undetermined_coefficients_match(exp(x)*exp(2*x + 1), x) == \
{'test': True, 'trialset': set([exp(1 + 3*x)])}
assert _undetermined_coefficients_match(sin(x)*(x**2 + x + 1), x) == \
{'test': True, 'trialset': set([x*cos(x), x*sin(x), x**2*cos(x),
x**2*sin(x), cos(x), sin(x)])}
assert _undetermined_coefficients_match(sin(x)*(x + sin(x)), x) == \
{'test': False}
assert _undetermined_coefficients_match(sin(x)*(x + sin(2*x)), x) == \
{'test': False}
assert _undetermined_coefficients_match(sin(x)*tan(x), x) == \
{'test': False}
assert _undetermined_coefficients_match(
x**2*sin(x)*exp(x) + x*sin(x) + x, x
) == {
'test': True, 'trialset': set([x**2*cos(x)*exp(x), x, cos(x), S(1),
exp(x)*sin(x), sin(x), x*exp(x)*sin(x), x*cos(x), x*cos(x)*exp(x),
x*sin(x), cos(x)*exp(x), x**2*exp(x)*sin(x)])}
assert _undetermined_coefficients_match(4*x*sin(x - 2), x) == {
'trialset': set([x*cos(x - 2), x*sin(x - 2), cos(x - 2), sin(x - 2)]),
'test': True,
}
assert _undetermined_coefficients_match(2**x*x, x) == \
{'test': True, 'trialset': set([2**x, x*2**x])}
assert _undetermined_coefficients_match(2**x*exp(2*x), x) == \
{'test': True, 'trialset': set([2**x*exp(2*x)])}
assert _undetermined_coefficients_match(exp(-x)/x, x) == \
{'test': False}
# Below are from Ordinary Differential Equations,
# Tenenbaum and Pollard, pg. 231
assert _undetermined_coefficients_match(S(4), x) == \
{'test': True, 'trialset': set([S(1)])}
assert _undetermined_coefficients_match(12*exp(x), x) == \
{'test': True, 'trialset': set([exp(x)])}
assert _undetermined_coefficients_match(exp(I*x), x) == \
{'test': True, 'trialset': set([exp(I*x)])}
assert _undetermined_coefficients_match(sin(x), x) == \
{'test': True, 'trialset': set([cos(x), sin(x)])}
assert _undetermined_coefficients_match(cos(x), x) == \
{'test': True, 'trialset': set([cos(x), sin(x)])}
assert _undetermined_coefficients_match(8 + 6*exp(x) + 2*sin(x), x) == \
{'test': True, 'trialset': set([S(1), cos(x), sin(x), exp(x)])}
assert _undetermined_coefficients_match(x**2, x) == \
{'test': True, 'trialset': set([S(1), x, x**2])}
assert _undetermined_coefficients_match(9*x*exp(x) + exp(-x), x) == \
{'test': True, 'trialset': set([x*exp(x), exp(x), exp(-x)])}
assert _undetermined_coefficients_match(2*exp(2*x)*sin(x), x) == \
{'test': True, 'trialset': set([exp(2*x)*sin(x), cos(x)*exp(2*x)])}
assert _undetermined_coefficients_match(x - sin(x), x) == \
{'test': True, 'trialset': set([S(1), x, cos(x), sin(x)])}
assert _undetermined_coefficients_match(x**2 + 2*x, x) == \
{'test': True, 'trialset': set([S(1), x, x**2])}
assert _undetermined_coefficients_match(4*x*sin(x), x) == \
{'test': True, 'trialset': set([x*cos(x), x*sin(x), cos(x), sin(x)])}
assert _undetermined_coefficients_match(x*sin(2*x), x) == \
{'test': True, 'trialset':
set([x*cos(2*x), x*sin(2*x), cos(2*x), sin(2*x)])}
assert _undetermined_coefficients_match(x**2*exp(-x), x) == \
{'test': True, 'trialset': set([x*exp(-x), x**2*exp(-x), exp(-x)])}
assert _undetermined_coefficients_match(2*exp(-x) - x**2*exp(-x), x) == \
{'test': True, 'trialset': set([x*exp(-x), x**2*exp(-x), exp(-x)])}
assert _undetermined_coefficients_match(exp(-2*x) + x**2, x) == \
{'test': True, 'trialset': set([S(1), x, x**2, exp(-2*x)])}
assert _undetermined_coefficients_match(x*exp(-x), x) == \
{'test': True, 'trialset': set([x*exp(-x), exp(-x)])}
assert _undetermined_coefficients_match(x + exp(2*x), x) == \
{'test': True, 'trialset': set([S(1), x, exp(2*x)])}
assert _undetermined_coefficients_match(sin(x) + exp(-x), x) == \
{'test': True, 'trialset': set([cos(x), sin(x), exp(-x)])}
assert _undetermined_coefficients_match(exp(x), x) == \
{'test': True, 'trialset': set([exp(x)])}
# converted from sin(x)**2
assert _undetermined_coefficients_match(S(1)/2 - cos(2*x)/2, x) == \
{'test': True, 'trialset': set([S(1), cos(2*x), sin(2*x)])}
# converted from exp(2*x)*sin(x)**2
assert _undetermined_coefficients_match(
exp(2*x)*(S(1)/2 + cos(2*x)/2), x
) == {
'test': True, 'trialset': set([exp(2*x)*sin(2*x), cos(2*x)*exp(2*x),
exp(2*x)])}
assert _undetermined_coefficients_match(2*x + sin(x) + cos(x), x) == \
{'test': True, 'trialset': set([S(1), x, cos(x), sin(x)])}
# converted from sin(2*x)*sin(x)
assert _undetermined_coefficients_match(cos(x)/2 - cos(3*x)/2, x) == \
{'test': True, 'trialset': set([cos(x), cos(3*x), sin(x), sin(3*x)])}
assert _undetermined_coefficients_match(cos(x**2), x) == {'test': False}
assert _undetermined_coefficients_match(2**(x**2), x) == {'test': False}
@slow
def test_nth_linear_constant_coeff_undetermined_coefficients():
hint = 'nth_linear_constant_coeff_undetermined_coefficients'
g = exp(-x)
f2 = f(x).diff(x, 2)
c = 3*f(x).diff(x, 3) + 5*f2 + f(x).diff(x) - f(x) - x
eq1 = c - x*g
eq2 = c - g
# 3-27 below are from Ordinary Differential Equations,
# Tenenbaum and Pollard, pg. 231
eq3 = f2 + 3*f(x).diff(x) + 2*f(x) - 4
eq4 = f2 + 3*f(x).diff(x) + 2*f(x) - 12*exp(x)
eq5 = f2 + 3*f(x).diff(x) + 2*f(x) - exp(I*x)
eq6 = f2 + 3*f(x).diff(x) + 2*f(x) - sin(x)
eq7 = f2 + 3*f(x).diff(x) + 2*f(x) - cos(x)
eq8 = f2 + 3*f(x).diff(x) + 2*f(x) - (8 + 6*exp(x) + 2*sin(x))
eq9 = f2 + f(x).diff(x) + f(x) - x**2
eq10 = f2 - 2*f(x).diff(x) - 8*f(x) - 9*x*exp(x) - 10*exp(-x)
eq11 = f2 - 3*f(x).diff(x) - 2*exp(2*x)*sin(x)
eq12 = f(x).diff(x, 4) - 2*f2 + f(x) - x + sin(x)
eq13 = f2 + f(x).diff(x) - x**2 - 2*x
eq14 = f2 + f(x).diff(x) - x - sin(2*x)
eq15 = f2 + f(x) - 4*x*sin(x)
eq16 = f2 + 4*f(x) - x*sin(2*x)
eq17 = f2 + 2*f(x).diff(x) + f(x) - x**2*exp(-x)
eq18 = f(x).diff(x, 3) + 3*f2 + 3*f(x).diff(x) + f(x) - 2*exp(-x) + \
x**2*exp(-x)
eq19 = f2 + 3*f(x).diff(x) + 2*f(x) - exp(-2*x) - x**2
eq20 = f2 - 3*f(x).diff(x) + 2*f(x) - x*exp(-x)
eq21 = f2 + f(x).diff(x) - 6*f(x) - x - exp(2*x)
eq22 = f2 + f(x) - sin(x) - exp(-x)
eq23 = f(x).diff(x, 3) - 3*f2 + 3*f(x).diff(x) - f(x) - exp(x)
# sin(x)**2
eq24 = f2 + f(x) - S(1)/2 - cos(2*x)/2
# exp(2*x)*sin(x)**2
eq25 = f(x).diff(x, 3) - f(x).diff(x) - exp(2*x)*(S(1)/2 - cos(2*x)/2)
eq26 = (f(x).diff(x, 5) + 2*f(x).diff(x, 3) + f(x).diff(x) - 2*x -
sin(x) - cos(x))
# sin(2*x)*sin(x), skip 3127 for now, match bug
eq27 = f2 + f(x) - cos(x)/2 + cos(3*x)/2
eq28 = f(x).diff(x) - 1
sol1 = Eq(f(x),
-1 - x + (C1 + C2*x - 3*x**2/32 - x**3/24)*exp(-x) + C3*exp(x/3))
sol2 = Eq(f(x), -1 - x + (C1 + C2*x - x**2/8)*exp(-x) + C3*exp(x/3))
sol3 = Eq(f(x), 2 + C1*exp(-x) + C2*exp(-2*x))
sol4 = Eq(f(x), 2*exp(x) + C1*exp(-x) + C2*exp(-2*x))
sol5 = Eq(f(x), C1*exp(-2*x) + C2*exp(-x) + exp(I*x)/10 - 3*I*exp(I*x)/10)
sol6 = Eq(f(x), -3*cos(x)/10 + sin(x)/10 + C1*exp(-x) + C2*exp(-2*x))
sol7 = Eq(f(x), cos(x)/10 + 3*sin(x)/10 + C1*exp(-x) + C2*exp(-2*x))
sol8 = Eq(f(x),
4 - 3*cos(x)/5 + sin(x)/5 + exp(x) + C1*exp(-x) + C2*exp(-2*x))
sol9 = Eq(f(x),
-2*x + x**2 + (C1*sin(x*sqrt(3)/2) + C2*cos(x*sqrt(3)/2))*exp(-x/2))
sol10 = Eq(f(x), -x*exp(x) - 2*exp(-x) + C1*exp(-2*x) + C2*exp(4*x))
sol11 = Eq(f(x), C1 + C2*exp(3*x) + (-3*sin(x) - cos(x))*exp(2*x)/5)
sol12 = Eq(f(x), x - sin(x)/4 + (C1 + C2*x)*exp(-x) + (C3 + C4*x)*exp(x))
sol13 = Eq(f(x), C1 + x**3/3 + C2*exp(-x))
sol14 = Eq(f(x), C1 - x - sin(2*x)/5 - cos(2*x)/10 + x**2/2 + C2*exp(-x))
sol15 = Eq(f(x), (C1 + x)*sin(x) + (C2 - x**2)*cos(x))
sol16 = Eq(f(x), (C1 + x/16)*sin(2*x) + (C2 - x**2/8)*cos(2*x))
sol17 = Eq(f(x), (C1 + C2*x + x**4/12)*exp(-x))
sol18 = Eq(f(x), (C1 + C2*x + C3*x**2 - x**5/60 + x**3/3)*exp(-x))
sol19 = Eq(f(x), S(7)/4 - 3*x/2 + x**2/2 + C1*exp(-x) + (C2 - x)*exp(-2*x))
sol20 = Eq(f(x), C1*exp(x) + C2*exp(2*x) + (6*x + 5)*exp(-x)/36)
sol21 = Eq(f(x), -S(1)/36 - x/6 + C1*exp(-3*x) + (C2 + x/5)*exp(2*x))
sol22 = Eq(f(x), C1*sin(x) + (C2 - x/2)*cos(x) + exp(-x)/2)
sol23 = Eq(f(x), (C1 + C2*x + C3*x**2 + x**3/6)*exp(x))
sol24 = Eq(f(x), S(1)/2 - cos(2*x)/6 + C1*sin(x) + C2*cos(x))
sol25 = Eq(f(x), C1 + C2*exp(-x) + C3*exp(x) +
(-21*sin(2*x) + 27*cos(2*x) + 130)*exp(2*x)/1560)
sol26 = Eq(f(x),
C1 + (C2 + C3*x - x**2/8)*sin(x) + (C4 + C5*x + x**2/8)*cos(x) + x**2)
sol27 = Eq(f(x), cos(3*x)/16 + C1*cos(x) + (C2 + x/4)*sin(x))
sol28 = Eq(f(x), C1 + x)
sol1s = constant_renumber(sol1, 'C', 1, 3)
sol2s = constant_renumber(sol2, 'C', 1, 3)
sol3s = constant_renumber(sol3, 'C', 1, 2)
sol4s = constant_renumber(sol4, 'C', 1, 2)
sol5s = constant_renumber(sol5, 'C', 1, 2)
sol6s = constant_renumber(sol6, 'C', 1, 2)
sol7s = constant_renumber(sol7, 'C', 1, 2)
sol8s = constant_renumber(sol8, 'C', 1, 2)
sol9s = constant_renumber(sol9, 'C', 1, 2)
sol10s = constant_renumber(sol10, 'C', 1, 2)
sol11s = constant_renumber(sol11, 'C', 1, 2)
sol12s = constant_renumber(sol12, 'C', 1, 2)
sol13s = constant_renumber(sol13, 'C', 1, 4)
sol14s = constant_renumber(sol14, 'C', 1, 2)
sol15s = constant_renumber(sol15, 'C', 1, 2)
sol16s = constant_renumber(sol16, 'C', 1, 2)
sol17s = constant_renumber(sol17, 'C', 1, 2)
sol18s = constant_renumber(sol18, 'C', 1, 3)
sol19s = constant_renumber(sol19, 'C', 1, 2)
sol20s = constant_renumber(sol20, 'C', 1, 2)
sol21s = constant_renumber(sol21, 'C', 1, 2)
sol22s = constant_renumber(sol22, 'C', 1, 2)
sol23s = constant_renumber(sol23, 'C', 1, 3)
sol24s = constant_renumber(sol24, 'C', 1, 2)
sol25s = constant_renumber(sol25, 'C', 1, 3)
sol26s = constant_renumber(sol26, 'C', 1, 5)
sol27s = constant_renumber(sol27, 'C', 1, 2)
assert dsolve(eq1, hint=hint) in (sol1, sol1s)
assert dsolve(eq2, hint=hint) in (sol2, sol2s)
assert dsolve(eq3, hint=hint) in (sol3, sol3s)
assert dsolve(eq4, hint=hint) in (sol4, sol4s)
assert dsolve(eq5, hint=hint) in (sol5, sol5s)
assert dsolve(eq6, hint=hint) in (sol6, sol6s)
assert dsolve(eq7, hint=hint) in (sol7, sol7s)
assert dsolve(eq8, hint=hint) in (sol8, sol8s)
assert dsolve(eq9, hint=hint) in (sol9, sol9s)
assert dsolve(eq10, hint=hint) in (sol10, sol10s)
assert dsolve(eq11, hint=hint) in (sol11, sol11s)
assert dsolve(eq12, hint=hint) in (sol12, sol12s)
assert dsolve(eq13, hint=hint) in (sol13, sol13s)
assert dsolve(eq14, hint=hint) in (sol14, sol14s)
assert dsolve(eq15, hint=hint) in (sol15, sol15s)
assert dsolve(eq16, hint=hint) in (sol16, sol16s)
assert dsolve(eq17, hint=hint) in (sol17, sol17s)
assert dsolve(eq18, hint=hint) in (sol18, sol18s)
assert dsolve(eq19, hint=hint) in (sol19, sol19s)
assert dsolve(eq20, hint=hint) in (sol20, sol20s)
assert dsolve(eq21, hint=hint) in (sol21, sol21s)
assert dsolve(eq22, hint=hint) in (sol22, sol22s)
assert dsolve(eq23, hint=hint) in (sol23, sol23s)
assert dsolve(eq24, hint=hint) in (sol24, sol24s)
assert dsolve(eq25, hint=hint) in (sol25, sol25s)
assert dsolve(eq26, hint=hint) in (sol26, sol26s)
assert dsolve(eq27, hint=hint) in (sol27, sol27s)
assert dsolve(eq28, hint=hint) == sol28
assert checkodesol(eq1, sol1, order=3, solve_for_func=False)[0]
assert checkodesol(eq2, sol2, order=3, solve_for_func=False)[0]
assert checkodesol(eq3, sol3, order=2, solve_for_func=False)[0]
assert checkodesol(eq4, sol4, order=2, solve_for_func=False)[0]
assert checkodesol(eq5, sol5, order=2, solve_for_func=False)[0]
assert checkodesol(eq6, sol6, order=2, solve_for_func=False)[0]
assert checkodesol(eq7, sol7, order=2, solve_for_func=False)[0]
assert checkodesol(eq8, sol8, order=2, solve_for_func=False)[0]
assert checkodesol(eq9, sol9, order=2, solve_for_func=False)[0]
assert checkodesol(eq10, sol10, order=2, solve_for_func=False)[0]
assert checkodesol(eq11, sol11, order=2, solve_for_func=False)[0]
assert checkodesol(eq12, sol12, order=4, solve_for_func=False)[0]
assert checkodesol(eq13, sol13, order=2, solve_for_func=False)[0]
assert checkodesol(eq14, sol14, order=2, solve_for_func=False)[0]
assert checkodesol(eq15, sol15, order=2, solve_for_func=False)[0]
assert checkodesol(eq16, sol16, order=2, solve_for_func=False)[0]
assert checkodesol(eq17, sol17, order=2, solve_for_func=False)[0]
assert checkodesol(eq18, sol18, order=3, solve_for_func=False)[0]
assert checkodesol(eq19, sol19, order=2, solve_for_func=False)[0]
assert checkodesol(eq20, sol20, order=2, solve_for_func=False)[0]
assert checkodesol(eq21, sol21, order=2, solve_for_func=False)[0]
assert checkodesol(eq22, sol22, order=2, solve_for_func=False)[0]
assert checkodesol(eq23, sol23, order=3, solve_for_func=False)[0]
assert checkodesol(eq24, sol24, order=2, solve_for_func=False)[0]
assert checkodesol(eq25, sol25, order=3, solve_for_func=False)[0]
assert checkodesol(eq26, sol26, order=5, solve_for_func=False)[0]
assert checkodesol(eq27, sol27, order=2, solve_for_func=False)[0]
assert checkodesol(eq28, sol28, order=1, solve_for_func=False)[0]
def test_issue_5787():
# This test case is to show the classification of imaginary constants under
# nth_linear_constant_coeff_undetermined_coefficients
eq = Eq(diff(f(x), x), I*f(x) + S(1)/2 - I)
out_hint = 'nth_linear_constant_coeff_undetermined_coefficients'
assert out_hint in classify_ode(eq)
@XFAIL
def test_nth_linear_constant_coeff_undetermined_coefficients_imaginary_exp():
# Equivalent to eq26 in
# test_nth_linear_constant_coeff_undetermined_coefficients above.
# This fails because the algorithm for undetermined coefficients
# doesn't know to multiply exp(I*x) by sufficient x because it is linearly
# dependent on sin(x) and cos(x).
hint = 'nth_linear_constant_coeff_undetermined_coefficients'
eq26a = f(x).diff(x, 5) + 2*f(x).diff(x, 3) + f(x).diff(x) - 2*x - exp(I*x)
sol26 = Eq(f(x),
C1 + (C2 + C3*x - x**2/8)*sin(x) + (C4 + C5*x + x**2/8)*cos(x) + x**2)
assert dsolve(eq26a, hint=hint) == sol26
assert checkodesol(eq26a, sol26, order=5, solve_for_func=False)[0]
@slow
def test_nth_linear_constant_coeff_variation_of_parameters():
hint = 'nth_linear_constant_coeff_variation_of_parameters'
g = exp(-x)
f2 = f(x).diff(x, 2)
c = 3*f(x).diff(x, 3) + 5*f2 + f(x).diff(x) - f(x) - x
eq1 = c - x*g
eq2 = c - g
eq3 = f(x).diff(x) - 1
eq4 = f2 + 3*f(x).diff(x) + 2*f(x) - 4
eq5 = f2 + 3*f(x).diff(x) + 2*f(x) - 12*exp(x)
eq6 = f2 - 2*f(x).diff(x) - 8*f(x) - 9*x*exp(x) - 10*exp(-x)
eq7 = f2 + 2*f(x).diff(x) + f(x) - x**2*exp(-x)
eq8 = f2 - 3*f(x).diff(x) + 2*f(x) - x*exp(-x)
eq9 = f(x).diff(x, 3) - 3*f2 + 3*f(x).diff(x) - f(x) - exp(x)
eq10 = f2 + 2*f(x).diff(x) + f(x) - exp(-x)/x
eq11 = f2 + f(x) - 1/sin(x)*1/cos(x)
eq12 = f(x).diff(x, 4) - 1/x
sol1 = Eq(f(x),
-1 - x + (C1 + C2*x - 3*x**2/32 - x**3/24)*exp(-x) + C3*exp(x/3))
sol2 = Eq(f(x), -1 - x + (C1 + C2*x - x**2/8)*exp(-x) + C3*exp(x/3))
sol3 = Eq(f(x), C1 + x)
sol4 = Eq(f(x), 2 + C1*exp(-x) + C2*exp(-2*x))
sol5 = Eq(f(x), 2*exp(x) + C1*exp(-x) + C2*exp(-2*x))
sol6 = Eq(f(x), -x*exp(x) - 2*exp(-x) + C1*exp(-2*x) + C2*exp(4*x))
sol7 = Eq(f(x), (C1 + C2*x + x**4/12)*exp(-x))
sol8 = Eq(f(x), C1*exp(x) + C2*exp(2*x) + (6*x + 5)*exp(-x)/36)
sol9 = Eq(f(x), (C1 + C2*x + C3*x**2 + x**3/6)*exp(x))
sol10 = Eq(f(x), (C1 + x*(C2 + log(x)))*exp(-x))
sol11 = Eq(f(x), cos(x)*(C2 - Integral(1/cos(x), x)) + sin(x)*(C1 +
Integral(1/sin(x), x)))
sol12 = Eq(f(x), C1 + C2*x + x**3*(C3 + log(x)/6) + C4*x**2)
sol1s = constant_renumber(sol1, 'C', 1, 3)
sol2s = constant_renumber(sol2, 'C', 1, 3)
sol3s = constant_renumber(sol3, 'C', 1, 2)
sol4s = constant_renumber(sol4, 'C', 1, 2)
sol5s = constant_renumber(sol5, 'C', 1, 2)
sol6s = constant_renumber(sol6, 'C', 1, 2)
sol7s = constant_renumber(sol7, 'C', 1, 2)
sol8s = constant_renumber(sol8, 'C', 1, 2)
sol9s = constant_renumber(sol9, 'C', 1, 3)
sol10s = constant_renumber(sol10, 'C', 1, 2)
sol11s = constant_renumber(sol11, 'C', 1, 2)
sol12s = constant_renumber(sol12, 'C', 1, 4)
assert dsolve(eq1, hint=hint) in (sol1, sol1s)
assert dsolve(eq2, hint=hint) in (sol2, sol2s)
assert dsolve(eq3, hint=hint) in (sol3, sol3s)
assert dsolve(eq4, hint=hint) in (sol4, sol4s)
assert dsolve(eq5, hint=hint) in (sol5, sol5s)
assert dsolve(eq6, hint=hint) in (sol6, sol6s)
assert dsolve(eq7, hint=hint) in (sol7, sol7s)
assert dsolve(eq8, hint=hint) in (sol8, sol8s)
assert dsolve(eq9, hint=hint) in (sol9, sol9s)
assert dsolve(eq10, hint=hint) in (sol10, sol10s)
assert dsolve(eq11, hint=hint + '_Integral') in (sol11, sol11s)
assert dsolve(eq12, hint=hint) in (sol12, sol12s)
assert checkodesol(eq1, sol1, order=3, solve_for_func=False)[0]
assert checkodesol(eq2, sol2, order=3, solve_for_func=False)[0]
assert checkodesol(eq3, sol3, order=1, solve_for_func=False)[0]
assert checkodesol(eq4, sol4, order=2, solve_for_func=False)[0]
assert checkodesol(eq5, sol5, order=2, solve_for_func=False)[0]
assert checkodesol(eq6, sol6, order=2, solve_for_func=False)[0]
assert checkodesol(eq7, sol7, order=2, solve_for_func=False)[0]
assert checkodesol(eq8, sol8, order=2, solve_for_func=False)[0]
assert checkodesol(eq9, sol9, order=3, solve_for_func=False)[0]
assert checkodesol(eq10, sol10, order=2, solve_for_func=False)[0]
assert checkodesol(eq12, sol12, order=4, solve_for_func=False)[0]
@slow
def test_nth_linear_constant_coeff_variation_of_parameters_simplify_False():
# solve_variation_of_parameters shouldn't attempt to simplify the
# Wronskian if simplify=False. If wronskian() ever gets good enough
# to simplify the result itself, this test might fail.
hint = 'nth_linear_constant_coeff_variation_of_parameters'
assert dsolve(f(x).diff(x, 5) + 2*f(x).diff(x, 3) + f(x).diff(x) -
2*x - exp(I*x), f(x), hint + "_Integral", simplify=False) != \
dsolve(f(x).diff(x, 5) + 2*f(x).diff(x, 3) + f(x).diff(x) -
2*x - exp(I*x), f(x), hint + "_Integral", simplify=True)
def test_Liouville_ODE():
hint = 'Liouville'
# The first part here used to be test_ODE_1() from test_solvers.py
eq1 = diff(f(x), x)/x + diff(f(x), x, x)/2 - diff(f(x), x)**2/2
eq1a = diff(x*exp(-f(x)), x, x)
# compare to test_unexpanded_Liouville_ODE() below
eq2 = (eq1*exp(-f(x))/exp(f(x))).expand()
eq3 = diff(f(x), x, x) + 1/f(x)*(diff(f(x), x))**2 + 1/x*diff(f(x), x)
eq4 = x*diff(f(x), x, x) + x/f(x)*diff(f(x), x)**2 + x*diff(f(x), x)
eq5 = Eq((x*exp(f(x))).diff(x, x), 0)
sol1 = Eq(f(x), log(x/(C1 + C2*x)))
sol1a = Eq(C1 + C2/x - exp(-f(x)), 0)
sol2 = sol1
sol3 = set(
[Eq(f(x), -sqrt(C1 + C2*log(x))),
Eq(f(x), sqrt(C1 + C2*log(x)))])
sol4 = set([Eq(f(x), sqrt(C1 + C2*exp(x))*exp(-x/2)),
Eq(f(x), -sqrt(C1 + C2*exp(x))*exp(-x/2))])
sol5 = Eq(f(x), log(C1 + C2/x))
sol1s = constant_renumber(sol1, 'C', 1, 2)
sol2s = constant_renumber(sol2, 'C', 1, 2)
sol3s = constant_renumber(sol3, 'C', 1, 2)
sol4s = constant_renumber(sol4, 'C', 1, 2)
sol5s = constant_renumber(sol5, 'C', 1, 2)
assert dsolve(eq1, hint=hint) in (sol1, sol1s)
assert dsolve(eq1a, hint=hint) in (sol1, sol1s)
assert dsolve(eq2, hint=hint) in (sol2, sol2s)
assert set(dsolve(eq3, hint=hint)) in (sol3, sol3s)
assert set(dsolve(eq4, hint=hint)) in (sol4, sol4s)
assert dsolve(eq5, hint=hint) in (sol5, sol5s)
assert checkodesol(eq1, sol1, order=2, solve_for_func=False)[0]
assert checkodesol(eq1a, sol1a, order=2, solve_for_func=False)[0]
assert checkodesol(eq2, sol2, order=2, solve_for_func=False)[0]
assert all(i[0] for i in checkodesol(eq3, sol3, order=2,
solve_for_func=False))
assert all(i[0] for i in checkodesol(eq4, sol4, order=2,
solve_for_func=False))
assert checkodesol(eq5, sol5, order=2, solve_for_func=False)[0]
not_Liouville1 = classify_ode(diff(f(x), x)/x + f(x)*diff(f(x), x, x)/2 -
diff(f(x), x)**2/2, f(x))
not_Liouville2 = classify_ode(diff(f(x), x)/x + diff(f(x), x, x)/2 -
x*diff(f(x), x)**2/2, f(x))
assert hint not in not_Liouville1
assert hint not in not_Liouville2
assert hint + '_Integral' not in not_Liouville1
assert hint + '_Integral' not in not_Liouville2
def test_unexpanded_Liouville_ODE():
# This is the same as eq1 from test_Liouville_ODE() above.
eq1 = diff(f(x), x)/x + diff(f(x), x, x)/2 - diff(f(x), x)**2/2
eq2 = eq1*exp(-f(x))/exp(f(x))
sol2 = Eq(f(x), log(x/(C1 + C2*x)))
sol2s = constant_renumber(sol2, 'C', 1, 2)
assert dsolve(eq2) in (sol2, sol2s)
assert checkodesol(eq2, sol2, order=2, solve_for_func=False)[0]
def test_issue_4785():
from sympy.abc import A
eq = x + A*(x + diff(f(x), x) + f(x)) + diff(f(x), x) + f(x) + 2
assert classify_ode(eq, f(x)) == ('1st_linear', 'almost_linear',
'1st_power_series', 'lie_group',
'nth_linear_constant_coeff_undetermined_coefficients',
'nth_linear_constant_coeff_variation_of_parameters',
'1st_linear_Integral', 'almost_linear_Integral',
'nth_linear_constant_coeff_variation_of_parameters_Integral')
# issue 4864
eq = (x**2 + f(x)**2)*f(x).diff(x) - 2*x*f(x)
assert classify_ode(eq, f(x)) == ('1st_exact',
'1st_homogeneous_coeff_best',
'1st_homogeneous_coeff_subs_indep_div_dep',
'1st_homogeneous_coeff_subs_dep_div_indep',
'1st_power_series',
'lie_group', '1st_exact_Integral',
'1st_homogeneous_coeff_subs_indep_div_dep_Integral',
'1st_homogeneous_coeff_subs_dep_div_indep_Integral')
def test_issue_4825():
raises(ValueError, lambda: dsolve(f(x, y).diff(x) - y*f(x, y), f(x)))
assert classify_ode(f(x, y).diff(x) - y*f(x, y), f(x), dict=True) == \
{'default': None, 'order': 0}
# See also issue 3793, test Z13.
raises(ValueError, lambda: dsolve(f(x).diff(x), f(y)))
assert classify_ode(f(x).diff(x), f(y), dict=True) == \
{'default': None, 'order': 0}
def test_constant_renumber_order_issue_5308():
from sympy.utilities.iterables import variations
assert constant_renumber(C1*x + C2*y, "C", 1, 2) == \
constant_renumber(C1*y + C2*x, "C", 1, 2) == \
C1*x + C2*y
e = C1*(C2 + x)*(C3 + y)
for a, b, c in variations([C1, C2, C3], 3):
assert constant_renumber(a*(b + x)*(c + y), "C", 1, 3) == e
def test_issue_5770():
k = Symbol("k", real=True)
t = Symbol('t')
w = Function('w')
sol = dsolve(w(t).diff(t, 6) - k**6*w(t), w(t))
assert len([s for s in sol.free_symbols if s.name.startswith('C')]) == 6
assert constantsimp((C1*cos(x) + C2*cos(x))*exp(x), set([C1, C2])) == \
C1*cos(x)*exp(x)
assert constantsimp(C1*cos(x) + C2*cos(x) + C3*sin(x), set([C1, C2, C3])) == \
C1*cos(x) + C3*sin(x)
assert constantsimp(exp(C1 + x), set([C1])) == C1*exp(x)
assert constantsimp(x + C1 + y, set([C1, y])) == C1 + x
assert constantsimp(x + C1 + Integral(x, (x, 1, 2)), set([C1])) == C1 + x
def test_issue_5112_5430():
assert homogeneous_order(-log(x) + acosh(x), x) is None
assert homogeneous_order(y - log(x), x, y) is None
def test_nth_order_linear_euler_eq_homogeneous():
x, t, a, b, c = symbols('x t a b c')
y = Function('y')
our_hint = "nth_linear_euler_eq_homogeneous"
eq = diff(f(t), t, 4)*t**4 - 13*diff(f(t), t, 2)*t**2 + 36*f(t)
assert our_hint in classify_ode(eq)
eq = a*y(t) + b*t*diff(y(t), t) + c*t**2*diff(y(t), t, 2)
assert our_hint in classify_ode(eq)
eq = Eq(-3*diff(f(x), x)*x + 2*x**2*diff(f(x), x, x), 0)
sol = C1 + C2*x**Rational(5, 2)
sols = constant_renumber(sol, 'C', 1, 3)
assert our_hint in classify_ode(eq)
assert dsolve(eq, f(x), hint=our_hint).rhs in (sol, sols)
assert checkodesol(eq, sol, order=2, solve_for_func=False)[0]
eq = Eq(3*f(x) - 5*diff(f(x), x)*x + 2*x**2*diff(f(x), x, x), 0)
sol = C1*sqrt(x) + C2*x**3
sols = constant_renumber(sol, 'C', 1, 3)
assert our_hint in classify_ode(eq)
assert dsolve(eq, f(x), hint=our_hint).rhs in (sol, sols)
assert checkodesol(eq, sol, order=2, solve_for_func=False)[0]
eq = Eq(4*f(x) + 5*diff(f(x), x)*x + x**2*diff(f(x), x, x), 0)
sol = (C1 + C2*log(x))/x**2
sols = constant_renumber(sol, 'C', 1, 3)
assert our_hint in classify_ode(eq)
assert dsolve(eq, f(x), hint=our_hint).rhs in (sol, sols)
assert checkodesol(eq, sol, order=2, solve_for_func=False)[0]
eq = Eq(6*f(x) - 6*diff(f(x), x)*x + 1*x**2*diff(f(x), x, x) + x**3*diff(f(x), x, x, x), 0)
sol = dsolve(eq, f(x), hint=our_hint)
sol = C1/x**2 + C2*x + C3*x**3
sols = constant_renumber(sol, 'C', 1, 4)
assert our_hint in classify_ode(eq)
assert dsolve(eq, f(x), hint=our_hint).rhs in (sol, sols)
assert checkodesol(eq, sol, order=2, solve_for_func=False)[0]
eq = Eq(-125*f(x) + 61*diff(f(x), x)*x - 12*x**2*diff(f(x), x, x) + x**3*diff(f(x), x, x, x), 0)
sol = x**5*(C1 + C2*log(x) + C3*log(x)**2)
sols = [sol, constant_renumber(sol, 'C', 1, 4)]
sols += [sols[-1].expand()]
assert our_hint in classify_ode(eq)
assert dsolve(eq, f(x), hint=our_hint).rhs in sols
assert checkodesol(eq, sol, order=2, solve_for_func=False)[0]
eq = t**2*diff(y(t), t, 2) + t*diff(y(t), t) - 9*y(t)
sol = C1*t**3 + C2*t**-3
sols = constant_renumber(sol, 'C', 1, 3)
assert our_hint in classify_ode(eq)
assert dsolve(eq, y(t), hint=our_hint).rhs in (sol, sols)
assert checkodesol(eq, sol, order=2, solve_for_func=False)[0]
eq = sin(x)*x**2*f(x).diff(x, 2) + sin(x)*x*f(x).diff(x) + sin(x)*f(x)
sol = C1*sin(log(x)) + C2*cos(log(x))
sols = constant_renumber(sol, 'C', 1, 3)
assert our_hint in classify_ode(eq)
assert dsolve(eq, f(x), hint=our_hint).rhs in (sol, sols)
assert checkodesol(eq, sol, order=2, solve_for_func=False)[0]
def test_nth_order_linear_euler_eq_nonhomogeneous_undetermined_coefficients():
x, t = symbols('x t')
a, b, c, d = symbols('a b c d', integer=True)
our_hint = "nth_linear_euler_eq_nonhomogeneous_undetermined_coefficients"
eq = x**4*diff(f(x), x, 4) - 13*x**2*diff(f(x), x, 2) + 36*f(x) + x
assert our_hint in classify_ode(eq, f(x))
eq = a*x**2*diff(f(x), x, 2) + b*x*diff(f(x), x) + c*f(x) + d*log(x)
assert our_hint in classify_ode(eq, f(x))
eq = Eq(x**2*diff(f(x), x, x) + x*diff(f(x), x), 1)
sol = C1 + C2*log(x) + log(x)**2/2
sols = constant_renumber(sol, 'C', 1, 2)
assert our_hint in classify_ode(eq, f(x))
assert dsolve(eq, f(x), hint=our_hint).rhs in (sol, sols)
assert checkodesol(eq, sol, order=2, solve_for_func=False)[0]
eq = Eq(x**2*diff(f(x), x, x) - 2*x*diff(f(x), x) + 2*f(x), x**3)
sol = x*(C1 + C2*x + Rational(1, 2)*x**2)
sols = constant_renumber(sol, 'C', 1, 2)
assert our_hint in classify_ode(eq, f(x))
assert dsolve(eq, f(x), hint=our_hint).rhs in (sol, sols)
assert checkodesol(eq, sol, order=2, solve_for_func=False)[0]
eq = Eq(x**2*diff(f(x), x, x) - x*diff(f(x), x) - 3*f(x), log(x)/x)
sol = C1/x + C2*x**3 - Rational(1, 16)*log(x)/x - Rational(1, 8)*log(x)**2/x
sols = constant_renumber(sol, 'C', 1, 2)
assert our_hint in classify_ode(eq, f(x))
assert dsolve(eq, f(x), hint=our_hint).rhs.expand() in (sol, sols)
assert checkodesol(eq, sol, order=2, solve_for_func=False)[0]
eq = Eq(x**2*diff(f(x), x, x) + 3*x*diff(f(x), x) - 8*f(x), log(x)**3 - log(x))
sol = C1/x**4 + C2*x**2 - Rational(1,8)*log(x)**3 - Rational(3,32)*log(x)**2 - Rational(1,64)*log(x) - Rational(7, 256)
sols = constant_renumber(sol, 'C', 1, 2)
assert our_hint in classify_ode(eq)
assert dsolve(eq, f(x), hint=our_hint).rhs.expand() in (sol, sols)
assert checkodesol(eq, sol, order=2, solve_for_func=False)[0]
eq = Eq(x**3*diff(f(x), x, x, x) - 3*x**2*diff(f(x), x, x) + 6*x*diff(f(x), x) - 6*f(x), log(x))
sol = C1*x + C2*x**2 + C3*x**3 - Rational(1, 6)*log(x) - Rational(11, 36)
sols = constant_renumber(sol, 'C', 1, 3)
assert our_hint in classify_ode(eq)
assert dsolve(eq, f(x), hint=our_hint).rhs.expand() in (sol, sols)
assert checkodesol(eq, sol, order=2, solve_for_func=False)[0]
def test_nth_order_linear_euler_eq_nonhomogeneous_variation_of_parameters():
x, t = symbols('x, t')
a, b, c, d = symbols('a, b, c, d', integer=True)
our_hint = "nth_linear_euler_eq_nonhomogeneous_variation_of_parameters"
eq = Eq(x**2*diff(f(x),x,2) - 8*x*diff(f(x),x) + 12*f(x), x**2)
assert our_hint in classify_ode(eq, f(x))
eq = Eq(a*x**3*diff(f(x),x,3) + b*x**2*diff(f(x),x,2) + c*x*diff(f(x),x) + d*f(x), x*log(x))
assert our_hint in classify_ode(eq, f(x))
eq = Eq(x**2*Derivative(f(x), x, x) - 2*x*Derivative(f(x), x) + 2*f(x), x**4)
sol = C1*x + C2*x**2 + x**4/6
sols = constant_renumber(sol, 'C', 1, 2)
assert our_hint in classify_ode(eq)
assert dsolve(eq, f(x), hint=our_hint).rhs.expand() in (sol, sols)
assert checkodesol(eq, sol, order=2, solve_for_func=False)[0]
eq = Eq(3*x**2*diff(f(x), x, x) + 6*x*diff(f(x), x) - 6*f(x), x**3*exp(x))
sol = C1/x**2 + C2*x + x*exp(x)/3 - 4*exp(x)/3 + 8*exp(x)/(3*x) - 8*exp(x)/(3*x**2)
sols = constant_renumber(sol, 'C', 1, 2)
assert our_hint in classify_ode(eq)
assert dsolve(eq, f(x), hint=our_hint).rhs.expand() in (sol, sols)
assert checkodesol(eq, sol, order=2, solve_for_func=False)[0]
eq = Eq(x**2*Derivative(f(x), x, x) - 2*x*Derivative(f(x), x) + 2*f(x), x**4*exp(x))
sol = C1*x + C2*x**2 + x**2*exp(x) - 2*x*exp(x)
sols = constant_renumber(sol, 'C', 1, 2)
assert our_hint in classify_ode(eq)
assert dsolve(eq, f(x), hint=our_hint).rhs.expand() in (sol, sols)
assert checkodesol(eq, sol, order=2, solve_for_func=False)[0]
eq = x**2*Derivative(f(x), x, x) - 2*x*Derivative(f(x), x) + 2*f(x) - log(x)
sol = C1*x + C2*x**2 + log(x)/2 + S(3)/4
sols = constant_renumber(sol, 'C', 1, 2)
assert our_hint in classify_ode(eq)
assert dsolve(eq, f(x), hint=our_hint).rhs in (sol, sols)
assert checkodesol(eq, sol, order=2, solve_for_func=False)[0]
eq = -exp(x) + (x*Derivative(f(x), (x, 2)) + Derivative(f(x), x))/x
sol = Eq(f(x), C1 + C2*log(x) + exp(x) - Ei(x))
assert our_hint in classify_ode(eq)
assert dsolve(eq, f(x), hint=our_hint) == sol
assert checkodesol(eq, sol, order=2, solve_for_func=False)[0]
def test_issue_5095():
f = Function('f')
raises(ValueError, lambda: dsolve(f(x).diff(x)**2, f(x), 'separable'))
raises(ValueError, lambda: dsolve(f(x).diff(x)**2, f(x), 'fdsjf'))
def test_almost_linear():
from sympy import Ei
A = Symbol('A', positive=True)
our_hint = 'almost_linear'
f = Function('f')
d = f(x).diff(x)
eq = x**2*f(x)**2*d + f(x)**3 + 1
sol = dsolve(eq, f(x), hint = 'almost_linear')
assert sol[0].rhs == (C1*exp(3/x) - 1)**(S(1)/3)
assert checkodesol(eq, sol, order=1, solve_for_func=False)[0]
eq = x*f(x)*d + 2*x*f(x)**2 + 1
sol = dsolve(eq, f(x), hint = 'almost_linear')
assert sol[0].rhs == -sqrt(C1 - 2*Ei(4*x))*exp(-2*x)
assert checkodesol(eq, sol, order=1, solve_for_func=False)[0]
eq = x*d + x*f(x) + 1
sol = dsolve(eq, f(x), hint = 'almost_linear')
assert sol.rhs == (C1 - Ei(x))*exp(-x)
assert checkodesol(eq, sol, order=1, solve_for_func=False)[0]
assert our_hint in classify_ode(eq, f(x))
eq = x*exp(f(x))*d + exp(f(x)) + 3*x
sol = dsolve(eq, f(x), hint = 'almost_linear')
assert sol.rhs == log(C1/x - 3*x/2)
assert checkodesol(eq, sol, order=1, solve_for_func=False)[0]
eq = x + A*(x + diff(f(x), x) + f(x)) + diff(f(x), x) + f(x) + 2
sol = dsolve(eq, f(x), hint = 'almost_linear')
assert sol.rhs == (C1 + Piecewise(
(x, Eq(A + 1, 0)), ((-A*x + A - x - 1)*exp(x)/(A + 1), True)))*exp(-x)
assert checkodesol(eq, sol, order=1, solve_for_func=False)[0]
def test_exact_enhancement():
f = Function('f')(x)
df = Derivative(f, x)
eq = f/x**2 + ((f*x - 1)/x)*df
sol = dsolve(eq, f)
assert sol == [Eq(f, (i*sqrt(C1*x**2 + 1) + 1)/x) for i in (-1, 1)]
eq = (x*f - 1) + df*(x**2 - x*f)
rhs = [sol.rhs for sol in dsolve(eq, f)]
assert rhs[0] == x - sqrt(C1 + x**2 - 2*log(x))
assert rhs[1] == x + sqrt(C1 + x**2 - 2*log(x))
eq = (x + 2)*sin(f) + df*x*cos(f)
rhs = [sol.rhs for sol in dsolve(eq, f)]
assert rhs == [
-asin(C1*exp(-x)/x**2) + pi,
asin(C1*exp(-x)/x**2)]
def test_separable_reduced():
f = Function('f')
x = Symbol('x')
df = f(x).diff(x)
eq = (x / f(x))*df + tan(x**2*f(x) / (x**2*f(x) - 1))
assert classify_ode(eq) == ('separable_reduced', 'lie_group',
'separable_reduced_Integral')
eq = x* df + f(x)* (1 / (x**2*f(x) - 1))
assert classify_ode(eq) == ('separable_reduced', 'lie_group',
'separable_reduced_Integral')
sol = dsolve(eq, hint = 'separable_reduced', simplify=False)
assert sol.lhs == log(x**2*f(x))/3 + log(x**2*f(x) - S(3)/2)/6
assert sol.rhs == C1 + log(x)
assert checkodesol(eq, sol, order=1, solve_for_func=False)[0]
eq = f(x).diff(x) + (f(x) / (x**4*f(x) - x))
assert classify_ode(eq) == ('separable_reduced', 'lie_group',
'separable_reduced_Integral')
sol = dsolve(eq, hint = 'separable_reduced')
assert len(sol) == 4
eq = x*df + f(x)*(x**2*f(x))
sol = dsolve(eq, hint = 'separable_reduced', simplify=False)
assert sol == Eq(log(x**2*f(x))/2 - log(x**2*f(x) - 2)/2, C1 + log(x))
assert checkodesol(eq, sol, order=1, solve_for_func=False)[0]
def test_homogeneous_function():
f = Function('f')
eq1 = tan(x + f(x))
eq2 = sin((3*x)/(4*f(x)))
eq3 = cos(3*x/4*f(x))
eq4 = log((3*x + 4*f(x))/(5*f(x) + 7*x))
eq5 = exp((2*x**2)/(3*f(x)**2))
eq6 = log((3*x + 4*f(x))/(5*f(x) + 7*x) + exp((2*x**2)/(3*f(x)**2)))
eq7 = sin((3*x)/(5*f(x) + x**2))
assert homogeneous_order(eq1, x, f(x)) == None
assert homogeneous_order(eq2, x, f(x)) == 0
assert homogeneous_order(eq3, x, f(x)) == None
assert homogeneous_order(eq4, x, f(x)) == 0
assert homogeneous_order(eq5, x, f(x)) == 0
assert homogeneous_order(eq6, x, f(x)) == 0
assert homogeneous_order(eq7, x, f(x)) == None
def test_linear_coeff_match():
from sympy.solvers.ode import _linear_coeff_match
n, d = z*(2*x + 3*f(x) + 5), z*(7*x + 9*f(x) + 11)
rat = n/d
eq1 = sin(rat) + cos(rat.expand())
eq2 = rat
eq3 = log(sin(rat))
ans = (4, -S(13)/3)
assert _linear_coeff_match(eq1, f(x)) == ans
assert _linear_coeff_match(eq2, f(x)) == ans
assert _linear_coeff_match(eq3, f(x)) == ans
# no c
eq4 = (3*x)/f(x)
# not x and f(x)
eq5 = (3*x + 2)/x
# denom will be zero
eq6 = (3*x + 2*f(x) + 1)/(3*x + 2*f(x) + 5)
# not rational coefficient
eq7 = (3*x + 2*f(x) + sqrt(2))/(3*x + 2*f(x) + 5)
assert _linear_coeff_match(eq4, f(x)) is None
assert _linear_coeff_match(eq5, f(x)) is None
assert _linear_coeff_match(eq6, f(x)) is None
assert _linear_coeff_match(eq7, f(x)) is None
def test_linear_coefficients():
f = Function('f')
sol = Eq(f(x), C1/(x**2 + 6*x + 9) - S(3)/2)
eq = f(x).diff(x) + (3 + 2*f(x))/(x + 3)
assert dsolve(eq, hint='linear_coefficients') == sol
assert checkodesol(eq, sol, order=1, solve_for_func=False)[0]
def test_constantsimp_take_problem():
c = exp(C1) + 2
assert len(Poly(constantsimp(exp(C1) + c + c*x, [C1])).gens) == 2
def test_issue_6879():
f = Function('f')
eq = Eq(Derivative(f(x), x, 2) - 2*Derivative(f(x), x) + f(x), sin(x))
sol = (C1 + C2*x)*exp(x) + cos(x)/2
assert dsolve(eq).rhs == sol
assert checkodesol(eq, sol, order=1, solve_for_func=False)[0]
def test_issue_6989():
f = Function('f')
k = Symbol('k')
assert dsolve(f(x).diff(x) - x*exp(-k*x), f(x)) == Eq(f(x),
C1 + Piecewise(
((-k*x - 1)*exp(-k*x)/k**2, Ne(k**2, 0)),
(x**2/2, True)
))
eq = -f(x).diff(x) + x*exp(-k*x)
sol = dsolve(eq, f(x))
actual_sol = Eq(f(x), C1 + Piecewise(
((-k*x - 1)*exp(-k*x)/k**2, Ne(k**2, 0)),
(+x**2/2, True)
))
errstr = str(eq) + ' : ' + str(sol) + ' == ' + str(actual_sol)
assert sol == actual_sol, errstr
def test_heuristic1():
y, a, b, c, a4, a3, a2, a1, a0 = symbols("y a b c a4 a3 a2 a1 a0")
y = Symbol('y')
f = Function('f')
xi = Function('xi')
eta = Function('eta')
df = f(x).diff(x)
eq = Eq(df, x**2*f(x))
eq1 = f(x).diff(x) + a*f(x) - c*exp(b*x)
eq2 = f(x).diff(x) + 2*x*f(x) - x*exp(-x**2)
eq3 = (1 + 2*x)*df + 2 - 4*exp(-f(x))
eq4 = f(x).diff(x) - (a4*x**4 + a3*x**3 + a2*x**2 + a1*x + a0)**(S(-1)/2)
eq5 = x**2*df - f(x) + x**2*exp(x - (1/x))
eqlist = [eq, eq1, eq2, eq3, eq4, eq5]
i = infinitesimals(eq, hint='abaco1_simple')
assert i == [{eta(x, f(x)): exp(x**3/3), xi(x, f(x)): 0},
{eta(x, f(x)): f(x), xi(x, f(x)): 0},
{eta(x, f(x)): 0, xi(x, f(x)): x**(-2)}]
i1 = infinitesimals(eq1, hint='abaco1_simple')
assert i1 == [{eta(x, f(x)): exp(-a*x), xi(x, f(x)): 0}]
i2 = infinitesimals(eq2, hint='abaco1_simple')
assert i2 == [{eta(x, f(x)): exp(-x**2), xi(x, f(x)): 0}]
i3 = infinitesimals(eq3, hint='abaco1_simple')
assert i3 == [{eta(x, f(x)): 0, xi(x, f(x)): 2*x + 1},
{eta(x, f(x)): 0, xi(x, f(x)): 1/(exp(f(x)) - 2)}]
i4 = infinitesimals(eq4, hint='abaco1_simple')
assert i4 == [{eta(x, f(x)): 1, xi(x, f(x)): 0},
{eta(x, f(x)): 0,
xi(x, f(x)): sqrt(a0 + a1*x + a2*x**2 + a3*x**3 + a4*x**4)}]
i5 = infinitesimals(eq5, hint='abaco1_simple')
assert i5 == [{xi(x, f(x)): 0, eta(x, f(x)): exp(-1/x)}]
ilist = [i, i1, i2, i3, i4, i5]
for eq, i in (zip(eqlist, ilist)):
check = checkinfsol(eq, i)
assert check[0]
def test_issue_6247():
eq = x**2*f(x)**2 + x*Derivative(f(x), x)
sol = dsolve(eq, hint = 'separable_reduced')
assert checkodesol(eq, sol, order=1)[0]
eq = f(x).diff(x, x) + 4*f(x)
sol = dsolve(eq, f(x), simplify=False)
assert sol == Eq(f(x), C1*sin(2*x) + C2*cos(2*x))
def test_heuristic2():
y = Symbol('y')
xi = Function('xi')
eta = Function('eta')
df = f(x).diff(x)
# This ODE can be solved by the Lie Group method, when there are
# better assumptions
eq = df - (f(x)/x)*(x*log(x**2/f(x)) + 2)
i = infinitesimals(eq, hint='abaco1_product')
assert i == [{eta(x, f(x)): f(x)*exp(-x), xi(x, f(x)): 0}]
assert checkinfsol(eq, i)[0]
@slow
def test_heuristic3():
y = Symbol('y')
xi = Function('xi')
eta = Function('eta')
a, b = symbols("a b")
df = f(x).diff(x)
eq = x**2*df + x*f(x) + f(x)**2 + x**2
i = infinitesimals(eq, hint='bivariate')
assert i == [{eta(x, f(x)): f(x), xi(x, f(x)): x}]
assert checkinfsol(eq, i)[0]
eq = x**2*(-f(x)**2 + df)- a*x**2*f(x) + 2 - a*x
i = infinitesimals(eq, hint='bivariate')
assert checkinfsol(eq, i)[0]
def test_heuristic_4():
y, a = symbols("y a")
xi = Function('xi')
eta = Function('eta')
eq = x*(f(x).diff(x)) + 1 - f(x)**2
i = infinitesimals(eq, hint='chi')
assert checkinfsol(eq, i)[0]
def test_heuristic_function_sum():
xi = Function('xi')
eta = Function('eta')
eq = f(x).diff(x) - (3*(1 + x**2/f(x)**2)*atan(f(x)/x) + (1 - 2*f(x))/x +
(1 - 3*f(x))*(x/f(x)**2))
i = infinitesimals(eq, hint='function_sum')
assert i == [{eta(x, f(x)): f(x)**(-2) + x**(-2), xi(x, f(x)): 0}]
assert checkinfsol(eq, i)[0]
def test_heuristic_abaco2_similar():
xi = Function('xi')
eta = Function('eta')
F = Function('F')
a, b = symbols("a b")
eq = f(x).diff(x) - F(a*x + b*f(x))
i = infinitesimals(eq, hint='abaco2_similar')
assert i == [{eta(x, f(x)): -a/b, xi(x, f(x)): 1}]
assert checkinfsol(eq, i)[0]
eq = f(x).diff(x) - (f(x)**2 / (sin(f(x) - x) - x**2 + 2*x*f(x)))
i = infinitesimals(eq, hint='abaco2_similar')
assert i == [{eta(x, f(x)): f(x)**2, xi(x, f(x)): f(x)**2}]
assert checkinfsol(eq, i)[0]
def test_heuristic_abaco2_unique_unknown():
xi = Function('xi')
eta = Function('eta')
F = Function('F')
a, b = symbols("a b")
x = Symbol("x", positive=True)
eq = f(x).diff(x) - x**(a - 1)*(f(x)**(1 - b))*F(x**a/a + f(x)**b/b)
i = infinitesimals(eq, hint='abaco2_unique_unknown')
assert i == [{eta(x, f(x)): -f(x)*f(x)**(-b), xi(x, f(x)): x*x**(-a)}]
assert checkinfsol(eq, i)[0]
eq = f(x).diff(x) + tan(F(x**2 + f(x)**2) + atan(x/f(x)))
i = infinitesimals(eq, hint='abaco2_unique_unknown')
assert i == [{eta(x, f(x)): x, xi(x, f(x)): -f(x)}]
assert checkinfsol(eq, i)[0]
eq = (x*f(x).diff(x) + f(x) + 2*x)**2 -4*x*f(x) -4*x**2 -4*a
i = infinitesimals(eq, hint='abaco2_unique_unknown')
assert checkinfsol(eq, i)[0]
def test_heuristic_linear():
xi = Function('xi')
eta = Function('eta')
F = Function('F')
a, b, m, n = symbols("a b m n")
eq = x**(n*(m + 1) - m)*(f(x).diff(x)) - a*f(x)**n -b*x**(n*(m + 1))
i = infinitesimals(eq, hint='linear')
assert checkinfsol(eq, i)[0]
@XFAIL
def test_kamke():
a, b, alpha, c = symbols("a b alpha c")
eq = x**2*(a*f(x)**2+(f(x).diff(x))) + b*x**alpha + c
i = infinitesimals(eq, hint='sum_function')
assert checkinfsol(eq, i)[0]
def test_series():
C1 = Symbol("C1")
eq = f(x).diff(x) - f(x)
assert dsolve(eq, hint='1st_power_series') == Eq(f(x),
C1 + C1*x + C1*x**2/2 + C1*x**3/6 + C1*x**4/24 +
C1*x**5/120 + O(x**6))
eq = f(x).diff(x) - x*f(x)
assert dsolve(eq, hint='1st_power_series') == Eq(f(x),
C1*x**4/8 + C1*x**2/2 + C1 + O(x**6))
eq = f(x).diff(x) - sin(x*f(x))
sol = Eq(f(x), (x - 2)**2*(1+ sin(4))*cos(4) + (x - 2)*sin(4) + 2 + O(x**3))
assert dsolve(eq, hint='1st_power_series', ics={f(2): 2}, n=3) == sol
@slow
def test_lie_group():
C1 = Symbol("C1")
x = Symbol("x") # assuming x is real generates an error!
a, b, c = symbols("a b c")
eq = f(x).diff(x)**2
sol = dsolve(eq, f(x), hint='lie_group')
assert checkodesol(eq, sol)[0]
eq = Eq(f(x).diff(x), x**2*f(x))
sol = dsolve(eq, f(x), hint='lie_group')
assert sol == Eq(f(x), C1*exp(x**3)**(1/3))
assert checkodesol(eq, sol)[0]
eq = f(x).diff(x) + a*f(x) - c*exp(b*x)
sol = dsolve(eq, f(x), hint='lie_group')
assert checkodesol(eq, sol)[0]
eq = f(x).diff(x) + 2*x*f(x) - x*exp(-x**2)
sol = dsolve(eq, f(x), hint='lie_group')
actual_sol = Eq(f(x), (C1 + x**2/2)*exp(-x**2))
errstr = str(eq)+' : '+str(sol)+' == '+str(actual_sol)
assert sol == actual_sol, errstr
assert checkodesol(eq, sol)[0]
eq = (1 + 2*x)*(f(x).diff(x)) + 2 - 4*exp(-f(x))
sol = dsolve(eq, f(x), hint='lie_group')
assert sol == Eq(f(x), log(C1/(2*x + 1) + 2))
assert checkodesol(eq, sol)[0]
eq = x**2*(f(x).diff(x)) - f(x) + x**2*exp(x - (1/x))
sol = dsolve(eq, f(x), hint='lie_group')
assert checkodesol(eq, sol)[0]
eq = x**2*f(x)**2 + x*Derivative(f(x), x)
sol = dsolve(eq, f(x), hint='lie_group')
assert sol == Eq(f(x), 2/(C1 + x**2))
assert checkodesol(eq, sol)[0]
@XFAIL
def test_lie_group_issue15219():
eqn = exp(f(x).diff(x)-f(x))
assert 'lie_group' not in classify_ode(eqn, f(x))
def test_user_infinitesimals():
C2 = Symbol("C2")
x = Symbol("x") # assuming x is real generates an error
eq = x*(f(x).diff(x)) + 1 - f(x)**2
sol = dsolve(eq, hint='lie_group', xi=sqrt(f(x) - 1)/sqrt(f(x) + 1),
eta=0)
actual_sol = Eq(f(x), (C1 + x**2)/(C1 - x**2))
errstr = str(eq)+' : '+str(sol)+' == '+str(actual_sol)
assert sol == actual_sol, errstr
raises(ValueError, lambda: dsolve(eq, hint='lie_group', xi=0, eta=f(x)))
def test_issue_7081():
eq = x*(f(x).diff(x)) + 1 - f(x)**2
assert dsolve(eq) == Eq(f(x), -1/(-C1 + x**2)*(C1 + x**2))
def test_2nd_power_series_ordinary():
C1, C2 = symbols("C1 C2")
eq = f(x).diff(x, 2) - x*f(x)
assert classify_ode(eq) == ('2nd_power_series_ordinary',)
assert dsolve(eq) == Eq(f(x),
C2*(x**3/6 + 1) + C1*x*(x**3/12 + 1) + O(x**6))
assert dsolve(eq, x0=-2) == Eq(f(x),
C2*((x + 2)**4/6 + (x + 2)**3/6 - (x + 2)**2 + 1)
+ C1*(x + (x + 2)**4/12 - (x + 2)**3/3 + S(2))
+ O(x**6))
assert dsolve(eq, n=2) == Eq(f(x), C2*x + C1 + O(x**2))
eq = (1 + x**2)*(f(x).diff(x, 2)) + 2*x*(f(x).diff(x)) -2*f(x)
assert classify_ode(eq) == ('2nd_power_series_ordinary',)
assert dsolve(eq) == Eq(f(x), C2*(-x**4/3 + x**2 + 1) + C1*x
+ O(x**6))
eq = f(x).diff(x, 2) + x*(f(x).diff(x)) + f(x)
assert classify_ode(eq) == ('2nd_power_series_ordinary',)
assert dsolve(eq) == Eq(f(x), C2*(
x**4/8 - x**2/2 + 1) + C1*x*(-x**2/3 + 1) + O(x**6))
eq = f(x).diff(x, 2) + f(x).diff(x) - x*f(x)
assert classify_ode(eq) == ('2nd_power_series_ordinary',)
assert dsolve(eq) == Eq(f(x), C2*(
-x**4/24 + x**3/6 + 1) + C1*x*(x**3/24 + x**2/6 - x/2
+ 1) + O(x**6))
eq = f(x).diff(x, 2) + x*f(x)
assert classify_ode(eq) == ('2nd_power_series_ordinary',)
assert dsolve(eq, n=7) == Eq(f(x), C2*(
x**6/180 - x**3/6 + 1) + C1*x*(-x**3/12 + 1) + O(x**7))
def test_2nd_power_series_regular():
C1, C2 = symbols("C1 C2")
eq = x**2*(f(x).diff(x, 2)) - 3*x*(f(x).diff(x)) + (4*x + 4)*f(x)
assert dsolve(eq) == Eq(f(x), C1*x**2*(-16*x**3/9 +
4*x**2 - 4*x + 1) + O(x**6))
eq = 4*x**2*(f(x).diff(x, 2)) -8*x**2*(f(x).diff(x)) + (4*x**2 +
1)*f(x)
assert dsolve(eq) == Eq(f(x), C1*sqrt(x)*(
x**4/24 + x**3/6 + x**2/2 + x + 1) + O(x**6))
eq = x**2*(f(x).diff(x, 2)) - x**2*(f(x).diff(x)) + (
x**2 - 2)*f(x)
assert dsolve(eq) == Eq(f(x), C1*(-x**6/720 - 3*x**5/80 - x**4/8 +
x**2/2 + x/2 + 1)/x + C2*x**2*(-x**3/60 + x**2/20 + x/2 + 1)
+ O(x**6))
eq = x**2*(f(x).diff(x, 2)) + x*(f(x).diff(x)) + (x**2 - S(1)/4)*f(x)
assert dsolve(eq) == Eq(f(x), C1*(x**4/24 - x**2/2 + 1)/sqrt(x) +
C2*sqrt(x)*(x**4/120 - x**2/6 + 1) + O(x**6))
eq = x*(f(x).diff(x, 2)) - f(x).diff(x) + 4*x**3*f(x)
assert dsolve(eq) == Eq(f(x), C2*(-x**4/2 + 1) + C1*x**2 + O(x**6))
def test_issue_7093():
x = Symbol("x") # assuming x is real leads to an error
sol = [Eq(f(x), C1 - 2*x*sqrt(x**3)/5),
Eq(f(x), C1 + 2*x*sqrt(x**3)/5)]
eq = Derivative(f(x), x)**2 - x**3
assert (set(dsolve(eq)) == set(sol) and
checkodesol(eq, sol) == [(True, 0)] * 2)
def test_dsolve_linsystem_symbol():
eps = Symbol('epsilon', positive=True)
eq1 = (Eq(diff(f(x), x), -eps*g(x)), Eq(diff(g(x), x), eps*f(x)))
sol1 = [Eq(f(x), -C1*eps*cos(eps*x) - C2*eps*sin(eps*x)),
Eq(g(x), -C1*eps*sin(eps*x) + C2*eps*cos(eps*x))]
assert checksysodesol(eq1, sol1) == (True, [0, 0])
def test_C1_function_9239():
t = Symbol('t')
C1 = Function('C1')
C2 = Function('C2')
C3 = Symbol('C3')
C4 = Symbol('C4')
eq = (Eq(diff(C1(t), t), 9*C2(t)), Eq(diff(C2(t), t), 12*C1(t)))
sol = [Eq(C1(t), 9*C3*exp(6*sqrt(3)*t) + 9*C4*exp(-6*sqrt(3)*t)),
Eq(C2(t), 6*sqrt(3)*C3*exp(6*sqrt(3)*t) - 6*sqrt(3)*C4*exp(-6*sqrt(3)*t))]
assert checksysodesol(eq, sol) == (True, [0, 0])
def test_issue_15056():
t = Symbol('t')
C3 = Symbol('C3')
assert get_numbered_constants(Symbol('C1') * Function('C2')(t)) == C3
def test_issue_10379():
t,y = symbols('t,y')
sol = dsolve(f(t).diff(t)-(1-51.05*y*f(t)), rational=False)
ans = Eq(f(t), (0.019588638589618*exp(y*(C1 - 51.05*t)) + 0.019588638589618)/y)
assert str(sol) == str(ans)
def test_issue_10867():
x = Symbol('x')
v = Eq(g(x).diff(x).diff(x), (x-2)**2 + (x-3)**3)
ans = Eq(g(x), C1 + C2*x + x**5/20 - 2*x**4/3 + 23*x**3/6 - 23*x**2/2)
assert dsolve(v, g(x)) == ans
def test_issue_11290():
eq = cos(f(x)) - (x*sin(f(x)) - f(x)**2)*f(x).diff(x)
sol_1 = dsolve(eq, f(x), simplify=False, hint='1st_exact_Integral')
sol_0 = dsolve(eq, f(x), simplify=False, hint='1st_exact')
assert sol_1.dummy_eq(Eq(Subs(
Integral(u**2 - x*sin(u) - Integral(-sin(u), x), u) +
Integral(cos(u), x), u, f(x)), C1))
assert sol_1.doit() == sol_0
def test_issue_14395():
sol = Eq(f(x), (C1 - x/3 + sin(2*x)/3)*sin(3*x) + (C2 + log(cos(x))
- 2*log(cos(x)**2)/3 + 2*cos(x)**2/3)*cos(3*x))
assert dsolve(Derivative(f(x), x, x) + 9*f(x) - sec(x), f(x)) == sol
def test_sysode_linear_neq_order1():
from sympy.abc import t
Z0 = Function('Z0')
Z1 = Function('Z1')
Z2 = Function('Z2')
Z3 = Function('Z3')
k01, k10, k20, k21, k23, k30 = symbols('k01 k10 k20 k21 k23 k30')
eq = (Eq(Derivative(Z0(t), t), -k01*Z0(t) + k10*Z1(t) + k20*Z2(t) + k30*Z3(t)), Eq(Derivative(Z1(t), t),
k01*Z0(t) - k10*Z1(t) + k21*Z2(t)), Eq(Derivative(Z2(t), t), -(k20 + k21 + k23)*Z2(t)), Eq(Derivative(Z3(t),
t), k23*Z2(t) - k30*Z3(t)))
sols_eq = [Eq(Z0(t), C1*k10/k01 + C2*(-k10 + k30)*exp(-k30*t)/(k01 + k10 - k30) - C3*exp(t*(-
k01 - k10)) + C4*(k10*k20 + k10*k21 - k10*k30 - k20**2 - k20*k21 - k20*k23 + k20*k30 +
k23*k30)*exp(t*(-k20 - k21 - k23))/(k23*(k01 + k10 - k20 - k21 - k23))),
Eq(Z1(t), C1 - C2*k01*exp(-k30*t)/(k01 + k10 - k30) + C3*exp(t*(-k01 - k10)) + C4*(k01*k20 + k01*k21
- k01*k30 - k20*k21 - k21**2 - k21*k23 + k21*k30)*exp(t*(-k20 - k21 - k23))/(k23*(k01 + k10 - k20 -
k21 - k23))),
Eq(Z2(t), C4*(-k20 - k21 - k23 + k30)*exp(t*(-k20 - k21 - k23))/k23),
Eq(Z3(t), C2*exp(-k30*t) + C4*exp(t*(-k20 - k21 - k23)))]
assert dsolve(eq, simplify=False) == sols_eq
def test_nth_algebraic():
eqn = Eq(Derivative(f(x), x), Derivative(g(x), x))
sol = Eq(f(x), C1 + g(x))
assert checkodesol(eqn, sol, order=1, solve_for_func=False)[0]
assert sol == dsolve(eqn, f(x), hint='nth_algebraic')
assert sol == dsolve(eqn, f(x))
eqn = (diff(f(x)) - x)*(diff(f(x)) + x)
sol = [Eq(f(x), C1 - x**2/2), Eq(f(x), C1 + x**2/2)]
assert checkodesol(eqn, sol, order=1, solve_for_func=False)[0]
assert sol == dsolve(eqn, f(x), hint='nth_algebraic')
assert sol == dsolve(eqn, f(x))
eqn = (1 - sin(f(x))) * f(x).diff(x)
sol = Eq(f(x), C1)
assert checkodesol(eqn, sol, order=1, solve_for_func=False)[0]
assert sol == dsolve(eqn, f(x), hint='nth_algebraic')
assert sol == dsolve(eqn, f(x))
M, m, r, t = symbols('M m r t')
phi = Function('phi')
eqn = Eq(-M * phi(t).diff(t),
Rational(3, 2) * m * r**2 * phi(t).diff(t) * phi(t).diff(t,t))
solns = [Eq(phi(t), C1), Eq(phi(t), C1 + C2*t - M*t**2/(3*m*r**2))]
assert checkodesol(eqn, solns[0], order=2, solve_for_func=False)[0]
assert checkodesol(eqn, solns[1], order=2, solve_for_func=False)[0]
assert set(solns) == set(dsolve(eqn, phi(t), hint='nth_algebraic'))
assert set(solns) == set(dsolve(eqn, phi(t)))
eqn = f(x) * f(x).diff(x) * f(x).diff(x, x)
sol = Eq(f(x), C1 + C2*x)
assert checkodesol(eqn, sol, order=1, solve_for_func=False)[0]
assert sol == dsolve(eqn, f(x), hint='nth_algebraic')
assert sol == dsolve(eqn, f(x))
eqn = f(x) * f(x).diff(x) * f(x).diff(x, x) * (f(x) - 1)
sol = Eq(f(x), C1 + C2*x)
assert checkodesol(eqn, sol, order=1, solve_for_func=False)[0]
assert sol == dsolve(eqn, f(x), hint='nth_algebraic')
assert sol == dsolve(eqn, f(x))
eqn = f(x) * f(x).diff(x) * f(x).diff(x, x) * (f(x) - 1) * (f(x).diff(x) - x)
solns = [Eq(f(x), C1 + x**2/2), Eq(f(x), C1 + C2*x)]
assert checkodesol(eqn, solns[0], order=2, solve_for_func=False)[0]
assert checkodesol(eqn, solns[1], order=2, solve_for_func=False)[0]
assert set(solns) == set(dsolve(eqn, f(x), hint='nth_algebraic'))
assert set(solns) == set(dsolve(eqn, f(x)))
def test_nth_algebraic_redundant_solutions():
# This one has a redundant solution that should be removed
eqn = f(x)*f(x).diff(x)
soln = Eq(f(x), C1)
assert checkodesol(eqn, soln, order=1, solve_for_func=False)[0]
assert soln == dsolve(eqn, f(x), hint='nth_algebraic')
assert soln == dsolve(eqn, f(x))
# This has two integral solutions and no algebraic solutions
eqn = (diff(f(x)) - x)*(diff(f(x)) + x)
sol = [Eq(f(x), C1 - x**2/2), Eq(f(x), C1 + x**2/2)]
assert all(c[0] for c in checkodesol(eqn, sol, order=1, solve_for_func=False))
assert set(sol) == set(dsolve(eqn, f(x), hint='nth_algebraic'))
assert set(sol) == set(dsolve(eqn, f(x)))
# This one doesn't work with dsolve at the time of writing but the
# redundancy checking code should not remove the algebraic solution.
from sympy.solvers.ode import _nth_algebraic_remove_redundant_solutions
eqn = f(x) + f(x)*f(x).diff(x)
solns = [Eq(f(x), 0),
Eq(f(x), C1 - x)]
solns_final = _nth_algebraic_remove_redundant_solutions(eqn, solns, 1, x)
assert all(c[0] for c in checkodesol(eqn, solns, order=1, solve_for_func=False))
assert set(solns) == set(solns_final)
solns = [Eq(f(x), exp(x)),
Eq(f(x), C1*exp(C2*x))]
solns_final = _nth_algebraic_remove_redundant_solutions(eqn, solns, 2, x)
assert solns_final == [Eq(f(x), C1*exp(C2*x))]
#
# These tests can be combined with the above test if they get fixed
# so that dsolve actually works in all these cases.
#
# Fails due to division by f(x) eliminating the solution before nth_algebraic
# is called.
@XFAIL
def test_nth_algebraic_find_multiple1():
eqn = f(x) + f(x)*f(x).diff(x)
solns = [Eq(f(x), 0),
Eq(f(x), C1 - x)]
assert all(c[0] for c in checkodesol(eqn, solns, order=1, solve_for_func=False))
assert set(solns) == set(dsolve(eqn, f(x)))
# prep = True breaks this
def test_nth_algebraic_noprep1():
eqn = Derivative(x*f(x), x, x, x)
sol = Eq(f(x), (C1 + C2*x + C3*x**2) / x)
assert checkodesol(eqn, sol, order=3, solve_for_func=False)[0]
assert sol == dsolve(eqn, f(x), prep=False, hint='nth_algebraic')
@XFAIL
def test_nth_algebraic_prep1():
eqn = Derivative(x*f(x), x, x, x)
sol = Eq(f(x), (C1 + C2*x + C3*x**2) / x)
assert checkodesol(eqn, sol, order=3, solve_for_func=False)[0]
assert sol == dsolve(eqn, f(x), prep=True, hint='nth_algebraic')
assert sol == dsolve(eqn, f(x))
# prep = True breaks this
def test_nth_algebraic_noprep2():
eqn = Eq(Derivative(x*Derivative(f(x), x), x)/x, exp(x))
sol = Eq(f(x), C1 + C2*log(x) + exp(x) - Ei(x))
assert checkodesol(eqn, sol, order=2, solve_for_func=False)[0]
assert sol == dsolve(eqn, f(x), prep=False, hint='nth_algebraic')
@XFAIL
def test_nth_algebraic_prep2():
eqn = Eq(Derivative(x*Derivative(f(x), x), x)/x, exp(x))
sol = Eq(f(x), C1 + C2*log(x) + exp(x) - Ei(x))
assert checkodesol(eqn, sol, order=2, solve_for_func=False)[0]
assert sol == dsolve(eqn, f(x), prep=True, hint='nth_algebraic')
assert sol == dsolve(eqn, f(x))
# This one needs a substitution f' = g. Should be doable...
@XFAIL
def test_2nd_order_substitution():
eqn = -exp(x) + (x*Derivative(f(x), (x, 2)) + Derivative(f(x), x))/x
sol = Eq(f(x), C1 + C2*log(x) + exp(x) - Ei(x))
assert checkodesol(eqn, sol, order=2, solve_for_func=False)[0]
assert sol == dsolve(eqn, f(x))
# This needs a combination of solutions from nth_algebraic and some other
# method from dsolve
@XFAIL
def test_nth_algebraic_find_multiple2():
eqn = f(x)**2 + f(x)*f(x).diff(x)
solns = [Eq(f(x), 0),
Eq(f(x), C1*exp(-x))]
assert all(c[0] for c in checkodesol(eqn, solns, order=1, solve_for_func=False))
assert set(solns) == dsolve(eqn, f(x))
# Needs to be a way to know how to combine derivatives in the expression
@XFAIL
def test_factoring_ode():
eqn = Derivative(x*f(x), x, x, x) + Derivative(f(x), x, x, x)
soln = Eq(f(x), (C1*x**2/2 + C2*x + C3 - x)/(1 + x))
assert checkodesol(eqn, soln, order=2, solve_for_func=False)[0]
assert soln == dsolve(eqn, f(x))
| 49.506262 | 152 | 0.54266 |
b149712ead3876de8c8b1bdfc88eb583d38fded5
| 1,506 |
py
|
Python
|
ryu/services/protocols/bgp/operator/commands/show/importmap.py
|
MrCocoaCat/ryu
|
9e9571991a73380099b7ba7c6f37e0e587080a6a
|
[
"Apache-2.0"
] | null | null | null |
ryu/services/protocols/bgp/operator/commands/show/importmap.py
|
MrCocoaCat/ryu
|
9e9571991a73380099b7ba7c6f37e0e587080a6a
|
[
"Apache-2.0"
] | null | null | null |
ryu/services/protocols/bgp/operator/commands/show/importmap.py
|
MrCocoaCat/ryu
|
9e9571991a73380099b7ba7c6f37e0e587080a6a
|
[
"Apache-2.0"
] | null | null | null |
from ryu.services.protocols.bgp.operator.command import Command
from ryu.services.protocols.bgp.operator.command import CommandsResponse
from ryu.services.protocols.bgp.operator.command import STATUS_ERROR
from ryu.services.protocols.bgp.operator.command import STATUS_OK
from ryu.services.protocols.bgp.operator.commands.responses import \
WrongParamResp
from ryu.services.protocols.bgp.operator.views.bgp import CoreServiceDetailView
class Importmap(Command):
help_msg = 'show importmaps'
param_help_msg = 'all | <name>'
command = 'importmap'
def __init__(self, *args, **kwargs):
super(Importmap, self).__init__(*args, **kwargs)
def action(self, params):
if len(params) != 1:
return WrongParamResp()
core_service = self.api.get_core_service()
core_service_view = CoreServiceDetailView(core_service)
importmap_manager = core_service_view.rel('importmap_manager')
importmaps_view = importmap_manager.rel('importmaps')
importmap_name = params[0]
if importmap_name == 'all':
encoded = importmaps_view.encode()
else:
encoded = importmaps_view.encode().get(importmap_name)
if encoded is None:
return CommandsResponse(
STATUS_ERROR,
'Wrong importmap name.'
)
return CommandsResponse(
STATUS_OK,
encoded
)
| 35.023256 | 80 | 0.648074 |
7682b528524de796c467abd571c8846292aa5ddf
| 3,814 |
py
|
Python
|
Software de Trading/freqtrade/misc.py
|
NatanNMB15/tcc-pytradebot
|
52b19251a030ab9c1a1b95157b4d57a9cf6df9dc
|
[
"MIT"
] | 1 |
2020-05-13T14:12:42.000Z
|
2020-05-13T14:12:42.000Z
|
Software de Trading/freqtrade/misc.py
|
NatanNMB15/tcc-pytradebot
|
52b19251a030ab9c1a1b95157b4d57a9cf6df9dc
|
[
"MIT"
] | 7 |
2020-02-12T02:58:40.000Z
|
2021-06-04T23:24:08.000Z
|
Software de Trading/freqtrade/misc.py
|
NatanNMB15/tcc-pytradebot
|
52b19251a030ab9c1a1b95157b4d57a9cf6df9dc
|
[
"MIT"
] | null | null | null |
"""
Various tool function for Freqtrade and scripts
"""
import gzip
import logging
import re
from datetime import datetime
from pathlib import Path
from typing.io import IO
import numpy as np
import rapidjson
logger = logging.getLogger(__name__)
def shorten_date(_date: str) -> str:
"""
Trim the date so it fits on small screens
"""
new_date = re.sub('seconds?', 'sec', _date)
new_date = re.sub('minutes?', 'min', new_date)
new_date = re.sub('hours?', 'h', new_date)
new_date = re.sub('days?', 'd', new_date)
new_date = re.sub('^an?', '1', new_date)
return new_date
############################################
# Used by scripts #
# Matplotlib doesn't support ::datetime64, #
# so we need to convert it into ::datetime #
############################################
def datesarray_to_datetimearray(dates: np.ndarray) -> np.ndarray:
"""
Convert an pandas-array of timestamps into
An numpy-array of datetimes
:return: numpy-array of datetime
"""
return dates.dt.to_pydatetime()
def file_dump_json(filename: Path, data, is_zip=False) -> None:
"""
Dump JSON data into a file
:param filename: file to create
:param data: JSON Data to save
:return:
"""
logger.info(f'dumping json to "{filename}"')
if is_zip:
if filename.suffix != '.gz':
filename = filename.with_suffix('.gz')
with gzip.open(filename, 'w') as fp:
rapidjson.dump(data, fp, default=str, number_mode=rapidjson.NM_NATIVE)
else:
with open(filename, 'w') as fp:
rapidjson.dump(data, fp, default=str, number_mode=rapidjson.NM_NATIVE)
logger.debug(f'done json to "{filename}"')
def json_load(datafile: IO):
"""
load data with rapidjson
Use this to have a consistent experience,
sete number_mode to "NM_NATIVE" for greatest speed
"""
return rapidjson.load(datafile, number_mode=rapidjson.NM_NATIVE)
def file_load_json(file):
if file.suffix != ".gz":
gzipfile = file.with_suffix(file.suffix + '.gz')
else:
gzipfile = file
# Try gzip file first, otherwise regular json file.
if gzipfile.is_file():
logger.debug('Loading ticker data from file %s', gzipfile)
with gzip.open(gzipfile) as tickerdata:
pairdata = json_load(tickerdata)
elif file.is_file():
logger.debug('Loading ticker data from file %s', file)
with open(file) as tickerdata:
pairdata = json_load(tickerdata)
else:
return None
return pairdata
def format_ms_time(date: int) -> str:
"""
convert MS date to readable format.
: epoch-string in ms
"""
return datetime.fromtimestamp(date/1000.0).strftime('%Y-%m-%dT%H:%M:%S')
def deep_merge_dicts(source, destination):
"""
Values from Source override destination, destination is returned (and modified!!)
Sample:
>>> a = { 'first' : { 'rows' : { 'pass' : 'dog', 'number' : '1' } } }
>>> b = { 'first' : { 'rows' : { 'fail' : 'cat', 'number' : '5' } } }
>>> merge(b, a) == { 'first' : { 'rows' : { 'pass' : 'dog', 'fail' : 'cat', 'number' : '5' } } }
True
"""
for key, value in source.items():
if isinstance(value, dict):
# get node or create one
node = destination.setdefault(key, {})
deep_merge_dicts(value, node)
else:
destination[key] = value
return destination
def round_dict(d, n):
"""
Rounds float values in the dict to n digits after the decimal point.
"""
return {k: (round(v, n) if isinstance(v, float) else v) for k, v in d.items()}
def plural(num, singular: str, plural: str = None) -> str:
return singular if (num == 1 or num == -1) else plural or singular + 's'
| 29.338462 | 100 | 0.601468 |
ff270b67fa25eabcc003187c6e8d4c34d6e9944b
| 1,385 |
py
|
Python
|
src/losses/aic20_loss.py
|
hthieu166/selab-aic20-track-2
|
5a87a075e64711388e06fc22171ee314cca1ae10
|
[
"MIT"
] | null | null | null |
src/losses/aic20_loss.py
|
hthieu166/selab-aic20-track-2
|
5a87a075e64711388e06fc22171ee314cca1ae10
|
[
"MIT"
] | null | null | null |
src/losses/aic20_loss.py
|
hthieu166/selab-aic20-track-2
|
5a87a075e64711388e06fc22171ee314cca1ae10
|
[
"MIT"
] | null | null | null |
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
import sys
import os
sys.path.insert(0, os.path.abspath(
os.path.join(os.path.dirname(__file__), '..', '..', 'src')))
import src.utils.logging as logging
logger = logging.get_logger(__name__)
import torch
import numpy as np
import torch.nn as nn
from src.utils.reid_metrics import pdist_torch
import src.config as cfg
from src.losses.triplet_loss import TripletLoss
from src.losses.cross_entro_lbl_smooth import CrossEntropyLabelSmooth
import ipdb
class AIC20Loss(nn.Module):
'''
Compute normal triplet loss or soft margin triplet loss given triplets
'''
def __init__(self, margin, num_classes = 230, epsilon=0.1):
super(AIC20Loss, self).__init__()
self.triplet_loss = TripletLoss(margin)
self.celsmth_loss = CrossEntropyLabelSmooth(
num_classes,
epsilon=epsilon,
use_gpu = True)
self.weight = [1.0, 1.0]
def forward(self, inputs, labels):
trip_lss = self.triplet_loss(inputs, labels)
entro_lss= self.celsmth_loss(inputs['cls'], labels)
# logger.info("Triplet loss : %.4f" % trip_lss)
# logger.info("Cross entropy loss : %.4f" % entro_lss)
final_lss = self.weight[0] * trip_lss + self.weight[1] * entro_lss
return final_lss
| 32.209302 | 74 | 0.695307 |
efaf4579a9371e18d8835ab66c1fe28f480a33c1
| 8,962 |
py
|
Python
|
tools/ci_build/github/android/build_aar_package.py
|
vpisarev/onnxruntime
|
bab9b80f1f2330d3a115e0abbb4d8278c2be3f44
|
[
"MIT"
] | null | null | null |
tools/ci_build/github/android/build_aar_package.py
|
vpisarev/onnxruntime
|
bab9b80f1f2330d3a115e0abbb4d8278c2be3f44
|
[
"MIT"
] | null | null | null |
tools/ci_build/github/android/build_aar_package.py
|
vpisarev/onnxruntime
|
bab9b80f1f2330d3a115e0abbb4d8278c2be3f44
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
import argparse
import json
import os
import pathlib
import shutil
import subprocess
import sys
SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__))
REPO_DIR = os.path.normpath(os.path.join(SCRIPT_DIR, "..", "..", "..", ".."))
BUILD_PY = os.path.join(REPO_DIR, "tools", "ci_build", "build.py")
JAVA_ROOT = os.path.join(REPO_DIR, "java")
DEFAULT_BUILD_VARIANT = "Full"
sys.path.insert(0, os.path.join(REPO_DIR, "tools", "python"))
from util import is_windows # noqa: E402
# We by default will build all 4 ABIs
DEFAULT_BUILD_ABIS = ["armeabi-v7a", "arm64-v8a", "x86", "x86_64"]
# Onnx Runtime native library is built against NDK API 21 by default
# It is possible to build from source for Android API levels below 21, but it is not guaranteed
DEFAULT_ANDROID_MIN_SDK_VER = 21
# Android API 24 is the default target API version for Android builds, based on Microsoft 1CS requirements
# It is possible to build from source using API level 21 and higher as the target SDK version
DEFAULT_ANDROID_TARGET_SDK_VER = 24
def _parse_build_settings(args):
setting_file = args.build_settings_file.resolve()
if not setting_file.is_file():
raise FileNotFoundError('Build config file {} is not a file.'.format(setting_file))
with open(setting_file) as f:
build_settings_data = json.load(f)
build_settings = {}
if 'build_abis' in build_settings_data:
build_settings['build_abis'] = build_settings_data['build_abis']
else:
build_settings['build_abis'] = DEFAULT_BUILD_ABIS
build_params = []
if 'build_params' in build_settings_data:
build_params += build_settings_data['build_params']
else:
raise ValueError('build_params is required in the build config file')
if 'android_min_sdk_version' in build_settings_data:
build_settings['android_min_sdk_version'] = build_settings_data['android_min_sdk_version']
else:
build_settings['android_min_sdk_version'] = DEFAULT_ANDROID_MIN_SDK_VER
build_params += ['--android_api=' + str(build_settings['android_min_sdk_version'])]
if 'android_target_sdk_version' in build_settings_data:
build_settings['android_target_sdk_version'] = build_settings_data['android_target_sdk_version']
else:
build_settings['android_target_sdk_version'] = DEFAULT_ANDROID_TARGET_SDK_VER
if build_settings['android_min_sdk_version'] > build_settings['android_target_sdk_version']:
raise ValueError(
'android_min_sdk_version {} cannot be larger than android_target_sdk_version {}'.format(
build_settings['android_min_sdk_version'], build_settings['android_target_sdk_version']
))
build_settings['build_params'] = build_params
build_settings["build_variant"] = build_settings_data.get("build_variant", DEFAULT_BUILD_VARIANT)
return build_settings
def _build_aar(args):
build_settings = _parse_build_settings(args)
build_dir = os.path.abspath(args.build_dir)
ops_config_path = os.path.abspath(args.include_ops_by_config) if args.include_ops_by_config else None
# Setup temp environment for building
temp_env = os.environ.copy()
temp_env['ANDROID_HOME'] = os.path.abspath(args.android_sdk_path)
temp_env['ANDROID_NDK_HOME'] = os.path.abspath(args.android_ndk_path)
# Temp dirs to hold building results
intermediates_dir = os.path.join(build_dir, 'intermediates')
build_config = args.config
aar_dir = os.path.join(intermediates_dir, 'aar', build_config)
jnilibs_dir = os.path.join(intermediates_dir, 'jnilibs', build_config)
exe_dir = os.path.join(intermediates_dir, 'executables', build_config)
base_build_command = [sys.executable, BUILD_PY] + build_settings['build_params'] + ['--config=' + build_config]
header_files_path = ''
# Build binary for each ABI, one by one
for abi in build_settings['build_abis']:
abi_build_dir = os.path.join(intermediates_dir, abi)
abi_build_command = base_build_command + [
'--android_abi=' + abi,
'--build_dir=' + abi_build_dir
]
if ops_config_path is not None:
abi_build_command += ['--include_ops_by_config=' + ops_config_path]
subprocess.run(abi_build_command, env=temp_env, shell=False, check=True, cwd=REPO_DIR)
# create symbolic links for libonnxruntime.so and libonnxruntime4j_jni.so
# to jnilibs/[abi] for later compiling the aar package
abi_jnilibs_dir = os.path.join(jnilibs_dir, abi)
os.makedirs(abi_jnilibs_dir, exist_ok=True)
for lib_name in ['libonnxruntime.so', 'libonnxruntime4j_jni.so']:
target_lib_name = os.path.join(abi_jnilibs_dir, lib_name)
# If the symbolic already exists, delete it first
# For some reason, os.path.exists will return false for a symbolic link in Linux,
# add double check with os.path.islink
if os.path.exists(target_lib_name) or os.path.islink(target_lib_name):
os.remove(target_lib_name)
os.symlink(os.path.join(abi_build_dir, build_config, lib_name), target_lib_name)
# copy executables for each abi, in case we want to publish those as well
abi_exe_dir = os.path.join(exe_dir, abi)
for exe_name in ['libonnxruntime.so', 'onnxruntime_perf_test', 'onnx_test_runner']:
os.makedirs(abi_exe_dir, exist_ok=True)
target_exe_name = os.path.join(abi_exe_dir, exe_name)
shutil.copyfile(os.path.join(abi_build_dir, build_config, exe_name), target_exe_name)
# we only need to define the header files path once
if not header_files_path:
header_files_path = os.path.join(abi_build_dir, build_config, 'android', 'headers')
# The directory to publish final AAR
aar_publish_dir = os.path.join(build_dir, 'aar_out', build_config)
os.makedirs(aar_publish_dir, exist_ok=True)
# get the common gradle command args
gradle_command = [
'gradle',
'--no-daemon',
'-b=build-android.gradle',
'-c=settings-android.gradle',
'-DjniLibsDir=' + jnilibs_dir,
'-DbuildDir=' + aar_dir,
'-DheadersDir=' + header_files_path,
'-DpublishDir=' + aar_publish_dir,
'-DminSdkVer=' + str(build_settings['android_min_sdk_version']),
'-DtargetSdkVer=' + str(build_settings['android_target_sdk_version']),
'-DbuildVariant=' + str(build_settings['build_variant'])
]
# If not using shell on Window, will not be able to find gradle in path
use_shell = True if is_windows() else False
# clean, build, and publish to a local directory
subprocess.run(gradle_command + ['clean'], env=temp_env, shell=use_shell, check=True, cwd=JAVA_ROOT)
subprocess.run(gradle_command + ['build'], env=temp_env, shell=use_shell, check=True, cwd=JAVA_ROOT)
subprocess.run(gradle_command + ['publish'], env=temp_env, shell=use_shell, check=True, cwd=JAVA_ROOT)
def parse_args():
parser = argparse.ArgumentParser(
os.path.basename(__file__),
description='''Create Android Archive (AAR) package for one or more Android ABI(s)
and building properties specified in the given build config file, see
tools/ci_build/github/android/default_mobile_aar_build_settings.json for details.
The output of the final AAR package can be found under [build_dir]/aar_out
'''
)
parser.add_argument("--android_sdk_path", type=str, default=os.environ.get("ANDROID_HOME", ""),
help="Path to the Android SDK")
parser.add_argument("--android_ndk_path", type=str, default=os.environ.get("ANDROID_NDK_HOME", ""),
help="Path to the Android NDK")
parser.add_argument('--build_dir', type=str, default=os.path.join(REPO_DIR, 'build/android_aar'),
help='Provide the root directory for build output')
parser.add_argument(
"--include_ops_by_config", type=str,
help="Include ops from config file. See /docs/Reduced_Operator_Kernel_build.md for more information.")
parser.add_argument("--config", type=str, default="Release",
choices=["Debug", "MinSizeRel", "Release", "RelWithDebInfo"],
help="Configuration to build.")
parser.add_argument('build_settings_file', type=pathlib.Path,
help='Provide the file contains settings for building AAR')
return parser.parse_args()
def main():
args = parse_args()
# Android SDK and NDK path are required
if not args.android_sdk_path:
raise ValueError('android_sdk_path is required')
if not args.android_ndk_path:
raise ValueError('android_ndk_path is required')
_build_aar(args)
if __name__ == '__main__':
main()
| 42.67619 | 115 | 0.699286 |
5402a0b2b567449ce509dddf4b1a68f39757d0df
| 1,650 |
py
|
Python
|
examples/signing.py
|
Barre/python-dkim
|
55403408038a5347ac3469adc9bbdc7de36c6076
|
[
"BSD-2-Clause"
] | 1 |
2020-12-19T21:21:58.000Z
|
2020-12-19T21:21:58.000Z
|
examples/signing.py
|
Barre/python-dkim
|
55403408038a5347ac3469adc9bbdc7de36c6076
|
[
"BSD-2-Clause"
] | 1 |
2020-12-19T21:23:55.000Z
|
2020-12-19T21:23:55.000Z
|
examples/signing.py
|
Barre/python-dkim
|
55403408038a5347ac3469adc9bbdc7de36c6076
|
[
"BSD-2-Clause"
] | 1 |
2021-12-31T10:56:00.000Z
|
2021-12-31T10:56:00.000Z
|
import dkim
from email.parser import Parser
def main():
message = """From: Example <[email protected]>
This is a message body. Fun!
"""
selector = "_dkim"
signing_domain = "example.com"
secret_key = "-----BEGIN RSA PRIVATE KEY-----\n" \
"MIICXQIBAAKBgQC4GUGr+d/6SFNzVLYpphnRd0QPGKz2uWnV65RAxa1Pw352Bqiz\n" \
"qiKOBjgYGzj8pJQSs8tOvv/2k6jpI809RnESqOFgF0gu3UJbNnu3+cd8k/kiQj+q\n" \
"4cKKRpAT92ccxc7svhCNgN1sBGmROYZuysG3Vu3Dyc079gSLtnSrgXb+gQIDAQAB\n" \
"AoGAemlI0opm1Kvs2T4VliH8/tvX5FXbBH8LEZQAUwVeFTB/UQlieXyCV39pIxZO\n" \
"0Sa50qm8YNL9rb5HTSZiHQFOwyAKNqS4m/7JCsbuH4gQkPgPF561BHNL9oKfYgJq\n" \
"9P4kEFfDTBoXKBMxwWtT7AKV8dYvCa3vYzPQ/1BnqQdw2zECQQDyscdgR9Ih59PQ\n" \
"b72ddibdsxS65uXS2vzYLe7SKl+4R5JgJzw0M6DTAnoYFf6JAsKGZM15PCC0E16t\n" \
"RRo47U9VAkEAwjEVrlQ0/8yPACbDggDJg/Zz/uRu1wK0zjqj4vKjleubaX4SEvj7\n" \
"r6xxZm9hC1pMJAC9y3bbkbgCRBjXfyY6fQJBANe5aq2MaZ41wTOPf45NjbKXEiAo\n" \
"SbUpboKCIbyyaa8V/2h0t7D3C0dE9l4efsguqdZoF7Rh2/f1F70QpYRgfJkCQQCH\n" \
"oRrAeGXP50JVW72fNgeJGH/pnghgOa6of0JpxwhENJuGMZxUDfxTtUA6yD3iXP3j\n" \
"A3WL/wbaHsfOYf9Y+g1NAkAGLhx67Ah+uBNK4Xvfz0YPGINX20m+CMsxAw7FOaNv\n" \
"IW2oWFfZCB4APkIis79Ql45AHpavwx5XodBMzZwJUvlL\n" \
"-----END RSA PRIVATE KEY-----\n"
message = Parser().parsestr(text=message)
dkim.Signer(message, selector, signing_domain, secret_key.encode()).add_signature_to_message()
print(message.as_string())
if __name__ == '__main__':
main()
| 48.529412 | 98 | 0.690909 |
766180d609eb46cd46add723016ceb50c9f039bf
| 5,447 |
py
|
Python
|
sdk/storage/azure-mgmt-storagesync/azure/mgmt/storagesync/aio/operations/_operation_status_operations.py
|
rsdoherty/azure-sdk-for-python
|
6bba5326677468e6660845a703686327178bb7b1
|
[
"MIT"
] | 2,728 |
2015-01-09T10:19:32.000Z
|
2022-03-31T14:50:33.000Z
|
sdk/storage/azure-mgmt-storagesync/azure/mgmt/storagesync/aio/operations/_operation_status_operations.py
|
rsdoherty/azure-sdk-for-python
|
6bba5326677468e6660845a703686327178bb7b1
|
[
"MIT"
] | 17,773 |
2015-01-05T15:57:17.000Z
|
2022-03-31T23:50:25.000Z
|
sdk/storage/azure-mgmt-storagesync/azure/mgmt/storagesync/aio/operations/_operation_status_operations.py
|
rsdoherty/azure-sdk-for-python
|
6bba5326677468e6660845a703686327178bb7b1
|
[
"MIT"
] | 1,916 |
2015-01-19T05:05:41.000Z
|
2022-03-31T19:36:44.000Z
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, Callable, Dict, Generic, Optional, TypeVar
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class OperationStatusOperations:
"""OperationStatusOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.storagesync.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def get(
self,
resource_group_name: str,
location_name: str,
workflow_id: str,
operation_id: str,
**kwargs
) -> "_models.OperationStatus":
"""Get Operation status.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param location_name: The desired region to obtain information from.
:type location_name: str
:param workflow_id: workflow Id.
:type workflow_id: str
:param operation_id: operation Id.
:type operation_id: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: OperationStatus, or the result of cls(response)
:rtype: ~azure.mgmt.storagesync.models.OperationStatus
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.OperationStatus"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-09-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'locationName': self._serialize.url("location_name", location_name, 'str'),
'workflowId': self._serialize.url("workflow_id", workflow_id, 'str'),
'operationId': self._serialize.url("operation_id", operation_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.StorageSyncError, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
response_headers = {}
response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id'))
response_headers['x-ms-correlation-request-id']=self._deserialize('str', response.headers.get('x-ms-correlation-request-id'))
deserialized = self._deserialize('OperationStatus', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, response_headers)
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.StorageSync/locations/{locationName}/workflows/{workflowId}/operations/{operationId}'} # type: ignore
| 48.633929 | 217 | 0.683128 |
b182e16b73619b2f22441046a8f57e51ba28faed
| 2,887 |
py
|
Python
|
feasibility.py
|
BYU-PRISM/Grid-Energy-Management
|
a10c6f9a9bbd9a8a8a44dc0b42fb7bd923d33cd0
|
[
"MIT"
] | 1 |
2021-12-16T17:53:48.000Z
|
2021-12-16T17:53:48.000Z
|
feasibility.py
|
BYU-PRISM/Grid-Energy-Management
|
a10c6f9a9bbd9a8a8a44dc0b42fb7bd923d33cd0
|
[
"MIT"
] | null | null | null |
feasibility.py
|
BYU-PRISM/Grid-Energy-Management
|
a10c6f9a9bbd9a8a8a44dc0b42fb7bd923d33cd0
|
[
"MIT"
] | null | null | null |
import numpy as np
def load_feasibility(gen_time, tol=1e-8):
gen = gen_time[0]
time = gen_time[1]
r = np.zeros(len(time))
r[0] = 0
dt = time[1] - time[0]
for i in range(1, len(time)):
r[i] = (gen[i] - gen[i-1])/dt
# r = gen_time[2]
# r = np.diff(gen)/np.diff(time)
# r = np.gradient(gen, time)
feasible = True
err = np.zeros(len(r))
for i in range(len(r)):
err[i] = r[i]**2 - 1
# eliminate error from within constraints
err = np.clip(err, 0, None)
mess1 = sum(err)
if sum(err) > tol:
feasible = False
# for i in range(len(r)):
# if r[i] > 1.0 or r[i] < 1.0:
# isTrue = False
return feasible, mess1
def co_feasibility(consCheck, tol=1e-7):
#consCheck is [time, gen1, gen2]
# r = np.diff(consCheck[1])/np.diff(consCheck[0])
time = consCheck[0]
g1 = consCheck[1]
g2 = consCheck[2]
# r = consCheck[3]
r = np.zeros(len(time))
r[0] = 0
dt = time[1] - time[0]
for i in range(1, len(time)):
r[i] = (g1[i] - g1[i-1])/dt
feasible = True
err = np.zeros(len(r))
for i in range(len(r)):
err[i] = r[i]**2 - 1
err = np.clip(err, 0, None)
mess1 = sum(err)
err2 = np.zeros(len(g1))
for i in range(len(g1)):
err2[i] += (g2[i] - (2*g1[i]))**2
# if r2 > 2 or r2 < -2:
# isTrue = False
residual = sum(err) + sum(err2)
mess2 = residual
if sum(err) > tol:
feasible = False
return feasible, mess1, mess2
def tri_feasibility(consCheck, tol=1e-7):
#consCheck is [time, gen1, gen2, gen3, tdemand1, tdemand2, load1, load2]
# r = np.diff(consCheck[1])/np.diff(consCheck[0])
# r3 = np.diff(consCheck[3])/np.diff(consCheck[0])
time = consCheck[0]
g1 = consCheck[1]
g2 = consCheck[2]
g3 = consCheck[3]
r = np.zeros(len(time))
r[0] = 0
dt = time[1] - time[0]
for i in range(1, len(time)):
r[i] = (g1[i] - g1[i-1])/dt
r3 = np.zeros(len(time))
r3[0] = 0
for i in range(1, len(time)):
r3[i] = (g3[i] - g3[i-1])/dt
dtot1 = consCheck[4]
dtot2 = consCheck[5]
d1 = consCheck[6]
d2 = consCheck[7]
# r = consCheck[8]
# r3 = consCheck[9]
feasible = True
err = np.zeros(len(r))
err2 = np.zeros(len(r))
err3 = np.zeros(len(g1))
for i in range(len(r)):
err[i] = r[i]**2 - 1
err = np.clip(err, 0, None)
for i in range(len(r)):
err2[i] = r3[i]**2 - 1
err2 = np.clip(err2, 0, None)
mess1 = sum(err) + sum(err2)
for i in range(len(g1)):
err3[i] += (g2[i] - (2*g1[i]))**2
err3[i] += (dtot1[i] - d1[i] - 2*g3[i])**2
err3[i] += (dtot2[i] - d2[i] - 3*g3[i])**2
residual = sum(err) + sum(err2) + sum(err3)
mess2 = residual
if sum(err) > tol:
feasible = False
return feasible, mess1, mess2
| 27.759615 | 76 | 0.520956 |
d12dda65d86b33c031adc62f235474b2ea90d5e0
| 689 |
py
|
Python
|
mii_sorter/migrations/0010_auto_fix_folder_path.py
|
MiiRaGe/miilibrary
|
f613c6654f21db62668a6a9d68e6678fdd2a1d03
|
[
"MIT"
] | null | null | null |
mii_sorter/migrations/0010_auto_fix_folder_path.py
|
MiiRaGe/miilibrary
|
f613c6654f21db62668a6a9d68e6678fdd2a1d03
|
[
"MIT"
] | 1 |
2018-01-26T15:52:51.000Z
|
2018-01-26T15:52:51.000Z
|
mii_sorter/migrations/0010_auto_fix_folder_path.py
|
MiiRaGe/miilibrary
|
f613c6654f21db62668a6a9d68e6678fdd2a1d03
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations
from mii_sorter.models import Movie, Episode
def change_folder_path_to_abs(apps, schema_editor):
for movie in Movie.objects.exclude(folder_path__icontains=settings.DESTINATION_PLACEHOLDER):
folder_path = movie.folder_path.replace(u'{destination_dir}', settings.DESTINATION_PLACEHOLDER)
movie.folder_path = folder_path
movie.save()
class Migration(migrations.Migration):
dependencies = [
('mii_sorter', '0009_regexrenaming'),
]
operations = [
migrations.RunPython(change_folder_path_to_abs)
]
| 27.56 | 103 | 0.741655 |
5450281b1fc890e668b2e68b591c82e76aab4fd8
| 3,469 |
py
|
Python
|
hummingbot/connector/exchange/binance/binance_constants.py
|
BGTCapital/hummingbot
|
2c50f50d67cedccf0ef4d8e3f4c8cdce3dc87242
|
[
"Apache-2.0"
] | 2 |
2022-03-03T10:00:27.000Z
|
2022-03-08T13:57:56.000Z
|
hummingbot/connector/exchange/binance/binance_constants.py
|
BGTCapital/hummingbot
|
2c50f50d67cedccf0ef4d8e3f4c8cdce3dc87242
|
[
"Apache-2.0"
] | 6 |
2022-01-31T15:44:54.000Z
|
2022-03-06T04:27:12.000Z
|
hummingbot/connector/exchange/binance/binance_constants.py
|
BGTCapital/hummingbot
|
2c50f50d67cedccf0ef4d8e3f4c8cdce3dc87242
|
[
"Apache-2.0"
] | 1 |
2022-02-22T11:03:02.000Z
|
2022-02-22T11:03:02.000Z
|
from hummingbot.core.api_throttler.data_types import LinkedLimitWeightPair, RateLimit
from hummingbot.core.data_type.in_flight_order import OrderState
HBOT_ORDER_ID_PREFIX = "x-XEKWYICX"
# Base URL
REST_URL = "https://api.binance.{}/api/"
WSS_URL = "wss://stream.binance.{}:9443/ws"
PUBLIC_API_VERSION = "v3"
PRIVATE_API_VERSION = "v3"
# Public API endpoints or BinanceClient function
TICKER_PRICE_CHANGE_PATH_URL = "/ticker/24hr"
EXCHANGE_INFO_PATH_URL = "/exchangeInfo"
PING_PATH_URL = "/ping"
SNAPSHOT_PATH_URL = "/depth"
SERVER_TIME_PATH_URL = "/time"
# Private API endpoints or BinanceClient function
ACCOUNTS_PATH_URL = "/account"
MY_TRADES_PATH_URL = "/myTrades"
ORDER_PATH_URL = "/order"
BINANCE_USER_STREAM_PATH_URL = "/userDataStream"
WS_HEARTBEAT_TIME_INTERVAL = 30
# Binance params
SIDE_BUY = 'BUY'
SIDE_SELL = 'SELL'
TIME_IN_FORCE_GTC = 'GTC' # Good till cancelled
TIME_IN_FORCE_IOC = 'IOC' # Immediate or cancel
TIME_IN_FORCE_FOK = 'FOK' # Fill or kill
# Rate Limit Type
REQUEST_WEIGHT = "REQUEST_WEIGHT"
ORDERS = "ORDERS"
ORDERS_24HR = "ORDERS_24HR"
# Rate Limit time intervals
ONE_MINUTE = 60
ONE_SECOND = 1
ONE_DAY = 86400
MAX_REQUEST = 5000
# Order States
ORDER_STATE = {
"PENDING": OrderState.PENDING_CREATE,
"NEW": OrderState.OPEN,
"FILLED": OrderState.FILLED,
"PARTIALLY_FILLED": OrderState.PARTIALLY_FILLED,
"PENDING_CANCEL": OrderState.OPEN,
"CANCELED": OrderState.CANCELLED,
"REJECTED": OrderState.FAILED,
"EXPIRED": OrderState.FAILED,
}
# Websocket event types
DIFF_EVENT_TYPE = "depthUpdate"
TRADE_EVENT_TYPE = "trade"
RATE_LIMITS = [
# Pools
RateLimit(limit_id=REQUEST_WEIGHT, limit=1200, time_interval=ONE_MINUTE),
RateLimit(limit_id=ORDERS, limit=10, time_interval=ONE_SECOND),
RateLimit(limit_id=ORDERS_24HR, limit=100000, time_interval=ONE_DAY),
# Weighted Limits
RateLimit(limit_id=TICKER_PRICE_CHANGE_PATH_URL, limit=MAX_REQUEST, time_interval=ONE_MINUTE,
linked_limits=[LinkedLimitWeightPair(REQUEST_WEIGHT, 40)]),
RateLimit(limit_id=EXCHANGE_INFO_PATH_URL, limit=MAX_REQUEST, time_interval=ONE_MINUTE,
linked_limits=[(LinkedLimitWeightPair(REQUEST_WEIGHT, 10))]),
RateLimit(limit_id=SNAPSHOT_PATH_URL, limit=MAX_REQUEST, time_interval=ONE_MINUTE,
linked_limits=[LinkedLimitWeightPair(REQUEST_WEIGHT, 50)]),
RateLimit(limit_id=BINANCE_USER_STREAM_PATH_URL, limit=MAX_REQUEST, time_interval=ONE_MINUTE,
linked_limits=[LinkedLimitWeightPair(REQUEST_WEIGHT, 1)]),
RateLimit(limit_id=SERVER_TIME_PATH_URL, limit=MAX_REQUEST, time_interval=ONE_MINUTE,
linked_limits=[LinkedLimitWeightPair(REQUEST_WEIGHT, 1)]),
RateLimit(limit_id=PING_PATH_URL, limit=MAX_REQUEST, time_interval=ONE_MINUTE,
linked_limits=[LinkedLimitWeightPair(REQUEST_WEIGHT, 1)]),
RateLimit(limit_id=ACCOUNTS_PATH_URL, limit=MAX_REQUEST, time_interval=ONE_MINUTE,
linked_limits=[LinkedLimitWeightPair(REQUEST_WEIGHT, 10)]),
RateLimit(limit_id=MY_TRADES_PATH_URL, limit=MAX_REQUEST, time_interval=ONE_MINUTE,
linked_limits=[LinkedLimitWeightPair(REQUEST_WEIGHT, 10)]),
RateLimit(limit_id=ORDER_PATH_URL, limit=MAX_REQUEST, time_interval=ONE_MINUTE,
linked_limits=[LinkedLimitWeightPair(REQUEST_WEIGHT, 1),
LinkedLimitWeightPair(ORDERS, 1),
LinkedLimitWeightPair(ORDERS_24HR, 1)]),
]
| 37.706522 | 97 | 0.751513 |
a98aa5c04913f10b7503188867931f72f5d000a9
| 2,890 |
py
|
Python
|
core/urls.py
|
MTES-MCT/appel
|
3b840ccea600ef31cfea57721fe5e6edbdbc2c79
|
[
"MIT"
] | null | null | null |
core/urls.py
|
MTES-MCT/appel
|
3b840ccea600ef31cfea57721fe5e6edbdbc2c79
|
[
"MIT"
] | null | null | null |
core/urls.py
|
MTES-MCT/appel
|
3b840ccea600ef31cfea57721fe5e6edbdbc2c79
|
[
"MIT"
] | null | null | null |
"""core URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
# pylint: disable=W0611
from django.contrib import admin
from django.urls import path
from django.conf import settings
from django.conf.urls.static import static
from django.urls import include
from django.views.generic import TemplateView
from django.contrib.sitemaps.views import sitemap
import django_cas_ng.views
from core.sitemaps import SITEMAPS
from . import signals
urlpatterns = [
path("admin/", admin.site.urls),
path("bailleurs/", include(("bailleurs.urls", "bailleurs"), namespace="bailleurs")),
path(
"conventions/",
include(("conventions.urls", "conventions"), namespace="conventions"),
),
path(
"instructeurs/",
include(("instructeurs.urls", "instructeurs"), namespace="instructeurs"),
),
path(
"programmes/",
include(("programmes.urls", "programmes"), namespace="programmes"),
),
path("stats/", include(("stats.urls", "stats"), namespace="stats")),
path("", include(("users.urls", "users"), namespace="users")),
path("upload/", include(("upload.urls", "upload"), namespace="upload")),
path("comments/", include(("comments.urls", "comments"), namespace="comments")),
path("cgu", TemplateView.as_view(template_name="editorial/cgu.html"), name="cgu"),
path(
"accessibilite",
TemplateView.as_view(template_name="editorial/accessibilite.html"),
name="accessibilite",
),
path(
"sitemap.xml",
sitemap,
{"sitemaps": SITEMAPS},
name="django.contrib.sitemaps.views.sitemap",
),
path("api-auth/", include("rest_framework.urls", namespace="rest_framework")),
path("api/v1/", include(("api.v1.urls", "api"), namespace="apis")),
] + static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
if settings.CERBERE_AUTH:
urlpatterns = urlpatterns + [
path(
"accounts/cerbere-login",
django_cas_ng.views.LoginView.as_view(),
name="cas_ng_login",
),
path(
"accounts/cerbere-logout",
django_cas_ng.views.LogoutView.as_view(),
name="cas_ng_logout",
),
]
else:
urlpatterns = urlpatterns + [
path("accounts/", include("django.contrib.auth.urls")),
]
| 34.404762 | 88 | 0.652941 |
0003f116b9c117432d025f2e4df4d079ba8a9a06
| 1,141 |
py
|
Python
|
model-optimizer/mo/utils/ir_reader/extenders/strided_slice_extender.py
|
fujunwei/dldt
|
09497b7724de4be92629f7799b8538b483d809a2
|
[
"Apache-2.0"
] | 1 |
2021-07-30T17:03:50.000Z
|
2021-07-30T17:03:50.000Z
|
model-optimizer/mo/utils/ir_reader/extenders/strided_slice_extender.py
|
fujunwei/dldt
|
09497b7724de4be92629f7799b8538b483d809a2
|
[
"Apache-2.0"
] | null | null | null |
model-optimizer/mo/utils/ir_reader/extenders/strided_slice_extender.py
|
fujunwei/dldt
|
09497b7724de4be92629f7799b8538b483d809a2
|
[
"Apache-2.0"
] | null | null | null |
"""
Copyright (C) 2018-2020 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from mo.utils.ir_reader.extender import Extender
from mo.utils.graph import Node
from mo.front.common.partial_infer.utils import int64_array
class StridedSlice_extender(Extender):
op = 'StridedSlice'
@staticmethod
def extend(op: Node):
attrs = ['shrink_axis_mask', 'new_axis_mask', 'ellipsis_mask', 'begin_mask', 'end_mask']
for attr in attrs:
Extender.attr_to_list(op, attr)
op.begin_mask = int64_array([1 - i for i in op.begin_mask])
op.end_mask = int64_array([1 - i for i in op.end_mask])
| 32.6 | 96 | 0.729185 |
cdf4ad5a77e0021447ec71f737535fa0c21822f5
| 3,746 |
py
|
Python
|
textflow/view/dashboard/__init__.py
|
ysenarath/textflow
|
ebb86cbedaf6ba7ed62a9f811a7d7d1818d938ac
|
[
"MIT"
] | 4 |
2020-12-10T19:38:15.000Z
|
2021-08-02T02:00:46.000Z
|
textflow/view/dashboard/__init__.py
|
ysenarath/textflow
|
ebb86cbedaf6ba7ed62a9f811a7d7d1818d938ac
|
[
"MIT"
] | 2 |
2021-01-08T18:35:04.000Z
|
2021-02-07T04:25:56.000Z
|
textflow/view/dashboard/__init__.py
|
ysenarath/textflow
|
ebb86cbedaf6ba7ed62a9f811a7d7d1818d938ac
|
[
"MIT"
] | 1 |
2021-04-04T19:21:40.000Z
|
2021-04-04T19:21:40.000Z
|
# Copyright 2021 by Yasas Senarath.
# All rights reserved.
# This file is part of the TextFlow,
# and is released under the "MIT License Agreement". Please see the LICENSE
# file that should have been included as part of this package.
from collections import namedtuple
import flask
from flask import Blueprint, session, jsonify
from flask_login import current_user
from textflow import services, auth
from textflow.utils import jsend
from textflow.view.base import render_template
from textflow.view.dashboard.documents import UploadForm
from textflow.view.dashboard.labels import LabelsForm, LabelForm
from textflow.view.dashboard.project import ProjectForm
from textflow.view.dashboard.users import UsersForm, AssignmentForm
from textflow.view.dashboard import agreement, status, dataset, project, labels, users, documents, models
__all__ = [
'view'
]
view = Blueprint('dashboard', __name__)
@view.route('/projects/<project_id>/dashboard', methods=['GET'])
@auth.login_required
@auth.roles_required(role=['admin', 'manager'])
def index(project_id):
"""Render dashboard
:param project_id: project id
:return: rendered template for dashboard
"""
Section = namedtuple('Section', ['label', 'value', 'icon'])
sidebar = [
('General', [
Section('Status', 'status', 'fa-tasks'),
Section('Agreement', 'agreement', 'fa-chart-bar'),
Section('Dataset', 'dataset', 'fa-table'),
Section('Models', 'models', 'fa-magic'),
]),
('Editor', [
Section('Project', 'project', 'fa-tools'),
Section('Labels', 'labels', 'fa-tags'),
Section('Users', 'users', 'fa-users'),
Section('Documents', 'documents', 'fa-file-alt'),
])
]
current_section = 'status'
if ('dash.section' in session) and (project_id in session['dash.section']):
current_section = session['dash.section'][project_id]
if current_user.role == 'manager':
kwargs = dict(project_id=project_id, sidebar=sidebar, section=current_section)
return render_template('dashboard/index.html', **kwargs)
else:
obj_project = services.get_project(user_id=current_user.id, project_id=project_id)
obj_assignments = services.list_assignments(project_id=project_id)
obj_labels = services.list_labels(user_id=current_user.id, project_id=project_id)
forms = dict(
update_project=ProjectForm(obj=obj_project),
update_labels=LabelsForm(labels=obj_labels),
create_label=LabelForm(),
update_users=UsersForm(users=obj_assignments),
create_user=AssignmentForm(),
upload_documents=UploadForm(),
)
kwargs = dict(project_id=project_id, sidebar=sidebar, forms=forms, section=current_section)
return render_template('dashboard/index.html', **kwargs)
@view.route('/api/projects/<project_id>/dashboard/sections', methods=['GET'])
@auth.login_required
@auth.roles_required(role=['admin', 'manager'])
def update_section(project_id):
if 'value' not in flask.request.args:
return jsonify(jsend.fail({'message': 'Argument section \'value\' not defined.'}))
section = flask.request.args['value']
if 'dash.section' not in session:
session['dash.section'] = dict()
session_var = {k: v for k, v in session['dash.section'].items()}
session_var[project_id] = section
session['dash.section'] = session_var
return jsonify(jsend.success(section))
agreement.view.register(view)
status.view.register(view)
dataset.view.register(view)
models.view.register(view)
project.view.register(view)
labels.view.register(view)
users.view.register(view)
documents.view.register(view)
| 38.22449 | 105 | 0.693807 |
51b916dc4a6e6471de9dfaa865dccebabdce13be
| 1,639 |
py
|
Python
|
hourglass/middleware.py
|
davezen1/calc
|
410d114f01e84e9fc6363f58853a4d9451a00ef2
|
[
"CC0-1.0"
] | null | null | null |
hourglass/middleware.py
|
davezen1/calc
|
410d114f01e84e9fc6363f58853a4d9451a00ef2
|
[
"CC0-1.0"
] | 3 |
2021-03-19T23:45:25.000Z
|
2022-03-21T22:21:12.000Z
|
hourglass/middleware.py
|
davezen1/calc
|
410d114f01e84e9fc6363f58853a4d9451a00ef2
|
[
"CC0-1.0"
] | null | null | null |
from django.conf import settings
from django.core.exceptions import MiddlewareNotUsed
from debug_toolbar.middleware import DebugToolbarMiddleware
class ComplianceMiddleware:
'''
Middleware to add security-related response headers.
If the SECURITY_HEADERS_ON_ERROR_ONLY setting is True, then the headers
will only be added to error responses (400 and above). This behavior is
needed for cloud.gov deployments because its proxy adds the headers
to 200 responses, but not to error responses.
Otherwise, the headers will be added to all responses.
'''
def process_response(self, request, response):
if (not settings.SECURITY_HEADERS_ON_ERROR_ONLY or
response.status_code >= 400):
response["X-Content-Type-Options"] = "nosniff"
response["X-XSS-Protection"] = "1; mode=block"
return response
def show_toolbar(request):
'''
Like debug_toolbar.middleware.show_toolbar, but without the
INTERNAL_IPS consultation, since the developer may be using
Docker or accessing their development instance over a mobile/tablet
device.
'''
if not settings.DEBUG:
raise AssertionError("I should not be used when DEBUG is False!")
if request.is_ajax():
return False
return True
class DebugOnlyDebugToolbarMiddleware(DebugToolbarMiddleware):
'''
Like DebugToolbarMiddleware, but tells Django it's unused if
DEBUG or HIDE_DEBUG_UI is False.
'''
def __init__(self):
if not settings.DEBUG or settings.HIDE_DEBUG_UI:
raise MiddlewareNotUsed()
super().__init__()
| 29.8 | 75 | 0.708969 |
8c627692e56b3023602d700ce8063ece88bac228
| 293 |
py
|
Python
|
v1/feedback/factories/feedback.py
|
buckyroberts/Website-API
|
e74d202a41533c7622acbe12c793d047d44012ad
|
[
"MIT"
] | 64 |
2020-10-02T02:58:06.000Z
|
2022-01-29T20:00:50.000Z
|
v1/feedback/factories/feedback.py
|
buckyroberts/Website-API
|
e74d202a41533c7622acbe12c793d047d44012ad
|
[
"MIT"
] | 93 |
2020-10-04T22:53:46.000Z
|
2022-03-05T18:17:46.000Z
|
v1/feedback/factories/feedback.py
|
buckyroberts/Website-API
|
e74d202a41533c7622acbe12c793d047d44012ad
|
[
"MIT"
] | 21 |
2020-10-11T14:16:13.000Z
|
2021-11-09T17:50:25.000Z
|
import factory
from factory.django import DjangoModelFactory
from ..models.feedback import Feedback
class FeedbackFactory(DjangoModelFactory):
name = factory.Faker('pystr')
email = factory.Faker('email')
message = factory.Faker('text')
class Meta:
model = Feedback
| 20.928571 | 45 | 0.723549 |
941a05081705291290592fa69ffd66a22a2f2fc1
| 2,216 |
py
|
Python
|
convert_model/test_conversion.py
|
dzubke/speech-lite
|
65f83ac2b7551650820f079ce5152741f2a6fdb8
|
[
"Apache-2.0"
] | null | null | null |
convert_model/test_conversion.py
|
dzubke/speech-lite
|
65f83ac2b7551650820f079ce5152741f2a6fdb8
|
[
"Apache-2.0"
] | null | null | null |
convert_model/test_conversion.py
|
dzubke/speech-lite
|
65f83ac2b7551650820f079ce5152741f2a6fdb8
|
[
"Apache-2.0"
] | null | null | null |
import torch
import torch.nn as nn
import onnx
from onnx import onnx_pb
from onnx_coreml import convert
import coremltools
class TestNet(nn.Module):
def __init__(self, inplace=False):
super(TestNet, self).__init__()
self.relu = nn.ReLU(inplace=inplace)
self.conv1 = nn.Conv2d(1, 32, (5, 32), 1, 0)
self.gru = nn.GRU(4160, 256, num_layers=1, batch_first=True)
def forward(self, x):
#x = self.relu(self.conv1(x))
x = self.conv1(x)
x = torch.transpose(x, 1, 2).contiguous()
b, t, f, c = x.data.size()
x = x.view((b, t, f * c))
x, h = self.gru(x)
return x
layer_count=1
# model = nn.LSTM(10, 20, num_layers=layer_count, bidirectional=True)
#model = nn.Sequential(nn.Conv1d(1, 32, 5, 1, 0), nn.GRU(10, 20, num_layers=layer_count))
#model = nn.Conv2d(1, 32, (5, 32), 1, 0)
model = TestNet()
model.eval()
model_name = "20200130_1conv2d-1gru"
torch.save(model, './torch_models/'+model_name+'.pth')
with torch.no_grad():
#input = torch.randn(5, 3, 10)
#h0 = torch.randn(layer_count, 3, 20)
#output, hn = model(input, h0)
input = torch.rand(1, 1, 200, 161)
# default export
#torch.onnx.export(model, (input, h0), './onnx_models/'+model_name+'.onnx')
torch.onnx.export(model, input, './onnx_models/'+model_name+'.onnx')
onnx_model = onnx.load('./onnx_models/'+model_name+'.onnx')
# input shape [5, 3, 10]
print(onnx_model.graph.input[0])
# export with `dynamic_axes`
#torch.onnx.export(model, input, model_name+'.onnx',
# input_names=['input', 'h0'],
# output_names=['output', 'hn'],
# dynamic_axes={'input': {0: 'sequence'}, 'output': {0: 'sequence'}})
#torch.onnx.export(model, input, model_name+'.onnx',
# input_names=['input'],
# output_names=['output']
# )
onnx_model = onnx.load('./onnx_models/'+model_name+'.onnx')
# input shape ['sequence', 3, 10]
print(onnx_model.graph.input[0])
mlmodel = convert(model=onnx_model, minimum_ios_deployment_target='13')
mlmodel.save('./coreml_models/'+model_name+'.mlmodel')
| 30.356164 | 89 | 0.599278 |
d373cff78cb535cbfe37251e5c801749457b504c
| 7,746 |
py
|
Python
|
network/textnet.py
|
shuyansy/A-detection-and-recognition-pipeline-of-complex-meters-in-wild
|
15bc2b97078d3216cfd075ccba1cf2d2e42af54f
|
[
"MIT"
] | 17 |
2022-03-20T05:41:51.000Z
|
2022-03-25T04:53:17.000Z
|
network/textnet.py
|
shuyansy/A-detection-and-recognition-pipeline-of-complex-meters-in-wild
|
15bc2b97078d3216cfd075ccba1cf2d2e42af54f
|
[
"MIT"
] | null | null | null |
network/textnet.py
|
shuyansy/A-detection-and-recognition-pipeline-of-complex-meters-in-wild
|
15bc2b97078d3216cfd075ccba1cf2d2e42af54f
|
[
"MIT"
] | 1 |
2022-03-23T03:06:51.000Z
|
2022-03-23T03:06:51.000Z
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
from network.vgg import VggNet
from network.resnet import ResNet
from util.roi import batch_roi_transform
from network.crnn import CRNN
from util.converter import keys
from util.misc import mkdirs, to_device
import cv2
from util.tool import order_points
class UpBlok(nn.Module):
def __init__(self, in_channels, out_channels):
super().__init__()
self.conv1x1 = nn.Conv2d(in_channels, in_channels, kernel_size=1, stride=1, padding=0)
self.conv3x3 = nn.Conv2d(in_channels, out_channels, kernel_size=3, stride=1, padding=1)
self.deconv = nn.ConvTranspose2d(out_channels, out_channels, kernel_size=4, stride=2, padding=1)
def forward(self, upsampled, shortcut):
x = torch.cat([upsampled, shortcut], dim=1)
x = self.conv1x1(x)
x = F.relu(x)
x = self.conv3x3(x)
x = F.relu(x)
x = self.deconv(x)
return x
class FPN(nn.Module):
def __init__(self, backbone='vgg_bn', is_training=True):
super().__init__()
self.is_training = is_training
self.backbone_name = backbone
self.class_channel = 6
self.reg_channel = 2
if backbone == "vgg" or backbone == 'vgg_bn':
if backbone == 'vgg_bn':
self.backbone = VggNet(name="vgg16_bn", pretrain=True)
elif backbone == 'vgg':
self.backbone = VggNet(name="vgg16", pretrain=True)
self.deconv5 = nn.ConvTranspose2d(512, 256, kernel_size=4, stride=2, padding=1)
self.merge4 = UpBlok(512 + 256, 128)
self.merge3 = UpBlok(256 + 128, 64)
self.merge2 = UpBlok(128 + 64, 32)
self.merge1 = UpBlok(64 + 32, 32)
elif backbone == 'resnet50' or backbone == 'resnet101':
if backbone == 'resnet101':
self.backbone = ResNet(name="resnet101", pretrain=True)
elif backbone == 'resnet50':
self.backbone = ResNet(name="resnet50", pretrain=True)
self.deconv5 = nn.ConvTranspose2d(2048, 256, kernel_size=4, stride=2, padding=1)
self.merge4 = UpBlok(1024 + 256, 256)
self.merge3 = UpBlok(512 + 256, 128)
self.merge2 = UpBlok(256 + 128, 64)
self.merge1 = UpBlok(64 + 64, 32)
else:
print("backbone is not support !")
def forward(self, x):
C1, C2, C3, C4, C5 = self.backbone(x)
up5 = self.deconv5(C5)
up5 = F.relu(up5)
up4 = self.merge4(C4, up5)
up4 = F.relu(up4)
up3 = self.merge3(C3, up4)
up3 = F.relu(up3)
up2 = self.merge2(C2, up3)
up2 = F.relu(up2)
up1 = self.merge1(C1, up2)
return up1, up2, up3, up4, up5
class TextNet(nn.Module):
def __init__(self, backbone='vgg', is_training=True):
super().__init__()
self.is_training = is_training
self.backbone_name = backbone
self.fpn = FPN(self.backbone_name, self.is_training)
# ##class and regression branch
self.out_channel = 3
self.predict = nn.Sequential(
nn.Conv2d(32, self.out_channel, kernel_size=1, stride=1, padding=0)
)
num_class = len(keys) + 1
self.recognizer = Recognizer(num_class)
def load_model(self, model_path):
print('Loading from {}'.format(model_path))
state_dict = torch.load(model_path)
self.load_state_dict(state_dict['model'])
def forward(self, x,boxes,mapping):
up1, up2, up3, up4, up5 = self.fpn(x)
predict_out = self.predict(up1)
rois = batch_roi_transform(x, boxes[:, :8], mapping)
pred_mapping = mapping
pred_boxes = boxes
# print("rois",rois.shape)
preds = self.recognizer(rois)
# print("preds",preds.shape)
preds_size = torch.LongTensor([preds.size(0)] * int(preds.size(1)))
preds_size=to_device(preds_size)
# print("predsize", preds_size)
return predict_out,(preds, preds_size)
def forward_test(self, x):
up1, up2, up3, up4, up5 = self.fpn(x)
output = self.predict(up1)
# print("predict_out",output.shape)
pointer_pred = torch.sigmoid(output[0, 0, :, :]).data.cpu().numpy()
dail_pred = torch.sigmoid(output[0, 1, :, :]).data.cpu().numpy()
text_pred = torch.sigmoid(output[0, 2, :, :]).data.cpu().numpy()
pointer_pred = (pointer_pred > 0.6).astype(np.uint8)
dail_pred = (dail_pred > 0.7).astype(np.uint8)
text_pred = (text_pred > 0.7).astype(np.uint8)
# dail_label=self.filter(dail_pred)
text_label = self.filter(text_pred)
dail_label=dail_pred
# order dail_label by y_coordinates
dail_edges = dail_label * 255
dail_edges = dail_edges.astype(np.uint8)
_, dail_contours,_ = cv2.findContours(dail_edges, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
std_point = []
for i in range(len(dail_contours)):
rect = cv2.minAreaRect(dail_contours[i])
std_point.append((int(rect[0][0]), int(rect[0][1])))
if len(std_point) < 2:
std_point=None
return pointer_pred, dail_label, text_label, (None, None),std_point
else:
if std_point[0][1] >= std_point[1][1]:
pass
else:
std_point[0], std_point[1] = std_point[1], std_point[0]
# cv2.imshow("srtc",text_pred*255)
# cv2.imshow("1", pointer_pred * 255)
# cv2.imshow("2", dail_pred * 255)
# cv2.waitKey(0)
word_edges =text_label* 255
img_bin, contours, hierarchy = cv2.findContours(word_edges, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
max_dis=10000
index=0
if len(contours) != 0:
for i in range(len(contours)):
min_rect = cv2.minAreaRect(contours[i])
test_point=(min_rect[0][0], min_rect[0][1])
dis=(test_point[0]-std_point[1][0]) **2 + (test_point[1]-std_point[1][1]) **2
if dis<max_dis:
max_dis=dis
index=i
rect_points = cv2.boxPoints(cv2.minAreaRect(contours[index]))
bboxes = np.int0(rect_points)
bboxes=order_points(bboxes)
print("bbox", bboxes)
boxes=bboxes.reshape(1,8)
mapping=[0]
mapping=np.array(mapping)
rois = batch_roi_transform(x,boxes[:, :8], mapping)
# print("rois",rois.shape)
preds = self.recognizer(rois)
preds_size = torch.LongTensor([preds.size(0)] * int(preds.size(1)))
# print("*******", preds.shape, preds_size)
else:
preds=None
preds_size=None
return pointer_pred,dail_label,text_label,(preds,preds_size),std_point
def filter(self,image,n=30):
text_num, text_label = cv2.connectedComponents(image.astype(np.uint8), connectivity=8)
for i in range(1, text_num + 1):
pts = np.where(text_label == i)
if len(pts[0]) < n:
text_label[pts] = 0
text_label = text_label > 0
text_label = np.clip(text_label, 0, 1)
text_label = text_label.astype(np.uint8)
return text_label
class Recognizer(nn.Module):
def __init__(self, nclass):
super().__init__()
self.crnn = CRNN(32, 1, nclass, 256)
def forward(self, rois):
return self.crnn(rois)
if __name__=="__main__":
csrnet=TextNet().to('cuda')
img=torch.ones((1,3,256,256)).to('cuda')
out=csrnet(img)
print(out.shape) # 1*12*256*256
| 32.410042 | 107 | 0.587142 |
1683b2e27c33baa5d6ef3476dc2ca7dbf0986bcb
| 83 |
py
|
Python
|
python/testData/copyPaste/IndentTabIncrease.after.py
|
jnthn/intellij-community
|
8fa7c8a3ace62400c838e0d5926a7be106aa8557
|
[
"Apache-2.0"
] | 2 |
2019-04-28T07:48:50.000Z
|
2020-12-11T14:18:08.000Z
|
python/testData/copyPaste/IndentTabIncrease.after.py
|
Cyril-lamirand/intellij-community
|
60ab6c61b82fc761dd68363eca7d9d69663cfa39
|
[
"Apache-2.0"
] | 173 |
2018-07-05T13:59:39.000Z
|
2018-08-09T01:12:03.000Z
|
python/testData/copyPaste/IndentTabIncrease.after.py
|
Cyril-lamirand/intellij-community
|
60ab6c61b82fc761dd68363eca7d9d69663cfa39
|
[
"Apache-2.0"
] | 2 |
2020-03-15T08:57:37.000Z
|
2020-04-07T04:48:14.000Z
|
print "Line 2"
class Test:
def __init__(self):
print "Line 2"
print "Line 1"
| 11.857143 | 20 | 0.650602 |
33cf843dde8e8eb984fa54f006e1cb8440807ed3
| 627 |
py
|
Python
|
bin/passing_arguments.py
|
gshklover/async_v20
|
965346eafee09ee4ee510b22a064d3cf661b7aa4
|
[
"MIT"
] | 23 |
2017-10-30T18:49:11.000Z
|
2022-02-08T12:22:18.000Z
|
bin/passing_arguments.py
|
gshklover/async_v20
|
965346eafee09ee4ee510b22a064d3cf661b7aa4
|
[
"MIT"
] | 23 |
2017-10-20T12:32:18.000Z
|
2021-03-13T07:43:23.000Z
|
bin/passing_arguments.py
|
gshklover/async_v20
|
965346eafee09ee4ee510b22a064d3cf661b7aa4
|
[
"MIT"
] | 12 |
2017-10-30T18:49:13.000Z
|
2021-02-06T02:26:37.000Z
|
import asyncio
from async_v20 import OandaClient
client = OandaClient()
# This
coroutine_1 = client.create_order('AUD_USD', 10)
# Is the same as this
from async_v20 import InstrumentName, DecimalNumber
coroutine_2 = client.create_order(
InstrumentName('AUD_USD'), DecimalNumber(10)
)
# Is the same as this
from async_v20 import OrderRequest
coroutine_3 = client.post_order(
order_request=OrderRequest(
instrument='AUD_USD', units=10, type='MARKET'
)
)
loop = asyncio.get_event_loop()
loop.run_until_complete(
asyncio.gather(
coroutine_1,
coroutine_2,
coroutine_3
)
)
| 18.441176 | 53 | 0.722488 |
fa994748f68202886bbb279e7ffaba42f8805022
| 2,207 |
py
|
Python
|
xlib/qt/widgets/QXFixedLayeredImages.py
|
seanwan/DeepFaceLive
|
0a076dbfdffdc5d12b7986d2ec3361eec5812382
|
[
"MIT"
] | null | null | null |
xlib/qt/widgets/QXFixedLayeredImages.py
|
seanwan/DeepFaceLive
|
0a076dbfdffdc5d12b7986d2ec3361eec5812382
|
[
"MIT"
] | null | null | null |
xlib/qt/widgets/QXFixedLayeredImages.py
|
seanwan/DeepFaceLive
|
0a076dbfdffdc5d12b7986d2ec3361eec5812382
|
[
"MIT"
] | null | null | null |
from typing import List, Union
import numpy as np
from PyQt6.QtCore import *
from PyQt6.QtGui import *
from PyQt6.QtWidgets import *
from ..gui.from_np import QImage_from_np
from .QXWidget import QXWidget
class QXFixedLayeredImages(QXWidget):
"""
A widget to show multiple stacked images in fixed area
all images must have the same aspect ratio
"""
def __init__(self, fwidth, height):
super().__init__()
self._fwidth = fwidth
self._height = height
self._qp = QPainter()
self._images : List = []
def clear_images(self):
self._images : List = []
self.update()
def add_image(self, image, name=None):
"""
image QImage
QPixmap
np.ndarray of uint8 dtype
"""
saved_ref = None
if not isinstance(image, QImage) and not isinstance(image, QPixmap):
if isinstance(image, np.ndarray):
saved_ref = image
image = QImage_from_np(image)
else:
raise ValueError(f'Unsupported type of image {image.__class__}')
self._images.append( (image, saved_ref) )
self.update()
def sizeHint(self):
return QSize(self._fwidth, self._height)
def paintEvent(self, event):
super().paintEvent(event)
qp = self._qp
qp.begin(self)
qp.setRenderHint(QPainter.RenderHint.SmoothPixmapTransform)
w = self._fwidth
h = self._height
w_half = w /2
h_half = h /2
a = w/h
for image, _ in self._images:
size = image.size()
ap = size.width() / size.height()
if ap > a:
ph_fit = h * (a / ap)
rect = QRect(0, h_half-ph_fit/2, w, ph_fit )
elif ap < a:
pw_fit = w * (ap / a)
rect = QRect(w_half-pw_fit/2, 0, pw_fit, h )
else:
rect = self.rect()
if isinstance(image, QImage):
qp.drawImage(rect, image, image.rect())
elif isinstance(image, QPixmap):
qp.drawPixmap(rect, image, image.rect())
qp.end()
| 26.590361 | 80 | 0.546443 |
47f471b026d40b6c66fc458af134201313f4ab2e
| 2,707 |
py
|
Python
|
Preprocessing/code/options/config.py
|
dongkwonjin/Semantic-Line-MWCS
|
2533bbaa62dde955b560fa2ab6a78a9b1a0038ac
|
[
"MIT"
] | 16 |
2021-04-19T06:43:14.000Z
|
2022-03-28T06:37:59.000Z
|
Preprocessing/code/options/config.py
|
dongkwonjin/Semantic-Line-MWCS
|
2533bbaa62dde955b560fa2ab6a78a9b1a0038ac
|
[
"MIT"
] | 1 |
2021-09-22T07:10:36.000Z
|
2022-03-20T12:02:48.000Z
|
Preprocessing/code/options/config.py
|
dongkwonjin/Semantic-Line-MWCS
|
2533bbaa62dde955b560fa2ab6a78a9b1a0038ac
|
[
"MIT"
] | null | null | null |
import os
import torch
class Config(object):
def __init__(self):
self.settings_for_system()
self.settings_for_path()
self.settings_for_image_processing()
self.settings_for_preprocessing()
self.settings_for_dataloading()
self.settings_for_visualization()
self.settings_for_save()
def settings_for_system(self):
self.gpu_id = "0"
self.seed = 123
# GPU setting
os.environ["CUDA_VISIBLE_DEVICES"] = self.gpu_id
torch.backends.cudnn.deterministic = True
def settings_for_path(self):
self.dir = dict()
self.dir['head'] = '--system root' # need to modify
self.dir['proj'] = os.path.dirname(os.getcwd()) + '/'
self.settings_dataset_path()
self.dir['out'] = self.dir['proj'] + 'output_{}_{}/'.format(self.dataset_name, self.datalist_mode)
def settings_dataset_path(self):
self.dataset_name = 'SL5K' # ['SEL', 'SL5K'] # SL5K --> Nankai
self.datalist_mode = 'train' # ['train', 'test', 'val']
self.dir['dataset'] = dict()
self.dir['preprocess']['SEL'] = self.dir['head'] + '--preprocessed_data root /SEL/pickle/' # need to modify
self.dir['preprocess']['SEL_Hard'] = '--preprocessed_data root /SEL_Hard/pickle/' # need to modify
self.dir['preprocess']['SL5K'] = self.dir['head'] + '--preprocessed_data root /SL5K/pickle/' # need to modify
self.dir['dataset']['SEL_img'] = self.dir['dataset']['SEL'] + 'ICCV2017_JTLEE_images/'
self.dir['dataset']['SEL_Hard_img'] = self.dir['dataset']['SEL_Hard'] + 'images/'
self.dir['dataset']['SL5K_img'] = self.dir['dataset']['SL5K']
def settings_for_image_processing(self):
self.org_height = 400
self.org_width = 400
self.height = 400
self.width = 400
self.size = [self.width, self.height, self.width, self.height]
self.mean = [0.485, 0.456, 0.406]
self.std = [0.229, 0.224, 0.225]
self.center_pt = [(self.width - 1) / 2, (self.height - 1) / 2]
self.max_dist = self.width // 2 - 20
def settings_for_dataloading(self):
self.num_workers = 4
self.batch_size = {'img': 1}
self.data_flip = True
self.crop_size = 0
def settings_for_visualization(self):
self.display = True
self.display_offset_validation = True
def settings_for_save(self):
self.save_pickle = True
def settings_for_preprocessing(self):
self.scale_factor = 4
self.sampling_mode = 'grid' # ['threshold', 'grid']
self.min_angle_error = 0.08
self.min_dist_error = 0.08
self.max_offset = 80
| 36.093333 | 118 | 0.611008 |
16bd3013b868ddb9d061097c7813e4086449f7e1
| 3,357 |
py
|
Python
|
src/machinable/repository.py
|
machinable-org/machinable
|
9d96e942dde05d68699bc7bc0c3d062ee18652ad
|
[
"MIT"
] | 23 |
2020-02-28T14:29:04.000Z
|
2021-12-23T20:50:54.000Z
|
src/machinable/repository.py
|
machinable-org/machinable
|
9d96e942dde05d68699bc7bc0c3d062ee18652ad
|
[
"MIT"
] | 172 |
2020-02-24T12:12:11.000Z
|
2022-03-29T03:08:24.000Z
|
src/machinable/repository.py
|
machinable-org/machinable
|
9d96e942dde05d68699bc7bc0c3d062ee18652ad
|
[
"MIT"
] | 1 |
2020-11-23T22:42:20.000Z
|
2020-11-23T22:42:20.000Z
|
from typing import TYPE_CHECKING, List, Optional, Tuple, Union
from re import L
from machinable import schema
from machinable.component import compact
from machinable.element import Connectable, Element
from machinable.group import Group
from machinable.project import Project
from machinable.settings import get_settings
from machinable.storage.storage import Storage
from machinable.types import VersionType
if TYPE_CHECKING:
from machinable.execution import Execution
from machinable.experiment import Experiment
class Repository(Connectable, Element):
"""Repository base class"""
_kind = "Repository"
def __init__(
self,
storage: Union[str, None] = None,
version: VersionType = None,
default_group: Optional[str] = get_settings().default_group,
):
super().__init__()
if storage is None:
storage = Storage.default or get_settings().default_storage
self.__model__ = schema.Repository(
storage=compact(storage, version), default_group=default_group
)
self._resolved_storage: Optional[Storage] = None
@classmethod
def filesystem(
cls,
directory: str,
default_group: Optional[str] = get_settings().default_group,
) -> "Repository":
return cls(
storage="machinable.storage.filesystem_storage",
version={"directory": directory},
default_group=default_group,
)
def storage(self, reload: bool = False) -> Storage:
"""Resolves and returns the storage instance"""
if self._resolved_storage is None or reload:
self._resolved_storage = Storage.make(
self.__model__.storage[0], self.__model__.storage[1:]
)
return self._resolved_storage
def commit(
self,
experiments: Union["Experiment", List["Experiment"]],
execution: Optional["Execution"] = None,
) -> None:
from machinable.experiment import Experiment
if isinstance(experiments, Experiment):
experiments = [experiments]
for experiment in experiments:
if not isinstance(experiment, Experiment):
raise ValueError(
f"Expected experiment, found: {type(experiment)} {experiment}"
)
if experiment.is_mounted():
continue
# ensure that configuration has been parsed
assert experiment.config is not None
group = experiment.group
if group is None:
group = Group(self.__model__.default_group)
experiment.__related__["group"] = group
self.storage().create_experiment(
experiment=experiment,
group=group,
project=Project.get(),
)
# write deferred experiment data
for filepath, data in experiment._deferred_data.items():
experiment.save_file(filepath, data)
experiment._deferred_data = {}
if execution is None or execution.is_mounted():
return
self.storage().create_execution(execution, experiments)
def __repr__(self):
return f"Repository <{self.__model__.default_group}>"
def __str__(self):
return self.__repr__()
| 32.278846 | 82 | 0.631218 |
7764ef409976db3a84de8358223ff4c7f3844b60
| 7,408 |
py
|
Python
|
runcode/views.py
|
lgb2002/club
|
099f5777699abe48f067684fc5fd2b7c7e97e07d
|
[
"MIT"
] | 1 |
2019-09-15T01:23:32.000Z
|
2019-09-15T01:23:32.000Z
|
runcode/views.py
|
lgb2002/club
|
099f5777699abe48f067684fc5fd2b7c7e97e07d
|
[
"MIT"
] | 1 |
2022-02-12T05:22:29.000Z
|
2022-02-12T05:22:29.000Z
|
runcode/views.py
|
lgb2002/club
|
099f5777699abe48f067684fc5fd2b7c7e97e07d
|
[
"MIT"
] | null | null | null |
from django.shortcuts import render
import requests, json, os, urllib
from django.http import JsonResponse, HttpResponse
from django.views.decorators.csrf import csrf_exempt
from django.contrib.auth.decorators import login_required
from urllib.parse import urlencode
from django.utils import timezone
from runcode.models import *
from datetime import datetime
from ipware.ip import get_ip
from django.core.exceptions import MultipleObjectsReturned
def logout(request):
ip = get_ip(request)
login_user = UserLogin.objects.get(user_ip = ip)
login_user.last_id = ""
login_user.last_pwd = ""
login_user.login_now = False
return render(request, 'runcode/login.html', {})
def profile(request):
return render(request, 'runcode/profile.html', {})
def login_check(request):
ip = get_ip(request)
login_user = UserLogin.objects.get(user_ip = ip)
check = login_user.login_now
if check :
id = login_user.last_id
user = UserInfo.objects.get(user_id = id)
name = user.user_name
data = UserData.objects.get(user_name = name)
visit_lank = data.visit_lank
return render(request, 'runcode/profile.html', {'name' : name, 'visit_lank' : str(visit_lank)})
else :
return render(request, 'runcode/login.html', {'check' : 0})
return render(request, 'runcode/profile.html', {'name' : "error"})
def login_success(ip, name):
login_user = UserLogin.objects.get(user_ip = ip)
login_user.login_now = True
login_user.login_date = datetime.now()
user = UserInfo.objects.get(user_name = name)
login_user.last_id = user.user_id
login_user.last_pwd = user.user_pwd
login_user.save()
def manual(request):
return render(request, 'runcode/manual.html', {})
def services(request):
return render(request, 'runcode/services.html', {})
def home(request):
ip = get_ip(request)
print(ip)
try:
login_user = UserLogin.objects.get(user_ip = ip)
except UserLogin.MultipleObjectsReturned:
print("hi")
test = UserLogin.objects.filter(user_ip = ip).distinct('login_data')
test.delete()
login = UserLogin(
user_ip = ip,
login_now = False,
login_date = datetime.now()
)
login.save()
except UserLogin.DoesNotExist:
login = UserLogin(
user_ip = ip,
login_now = False,
login_date = datetime.now()
)
login.save()
posts = Learning.objects.all().order_by('id')
return render(request, 'runcode/home.html', {'posts':posts})
@csrf_exempt
def register(request):
if request.method == "POST":
register_id = request.POST.get('userid')
register_pwd = request.POST.get('userpwd')
register_name = request.POST.get('username')
print("userid : "+register_id+" userpwd : "+register_pwd+" username : "+register_name)
try:
register_user = UserInfo.objects.get(user_name = register_name)
try:
register_user = UserInfo.objects.get(user_id = register_id)
print("id exist")
return render(request, 'runcode/register.html', {
'result' : 'id exist'
})
except UserInfo.DoesNotExist:
print("name exist")
return render(request, 'runcode/register.html', {
'result' : 'name exist'
})
except UserInfo.DoesNotExist:
print("success")
register = UserInfo(
user_id = register_id,
user_pwd = register_pwd,
user_name = register_name,
created_date = datetime.now()
)
register.save()
data = UserData(
user_name = register_name,
visit_lank = 0,
study_lank = "0_0_0_0_0_0_0",
coding_lank = "0_0_0_0_0_0_0"
)
data.save()
return render(request, 'runcode/register.html', {
'user_id' : register_id,
'user_pwd' : register_pwd,
'user_name' : register_name,
'result' : 'success'
})
else:
return render(request, 'runcode/register.html',{})
@csrf_exempt
def login(request):
if request.method == "POST":
get_id = request.POST.get('userid')
get_pwd = request.POST.get('userpwd')
try:
imsi_user = UserInfo.objects.get(user_id = get_id, user_pwd = get_pwd)
user_id = imsi_user.user_id
user_pwd = imsi_user.user_pwd
user_name = imsi_user.user_name
data = UserData.objects.get(user_name = user_name)
data.visit_lank += 1
data.save()
login = Login(
login_id = user_id,
login_pwd = user_pwd,
login_date = datetime.now(),
login_error = "No Error"
)
login.save()
ip = get_ip(request)
login_success(ip, user_name)
#login_check(request)
return render(request, 'runcode/login.html', {'user_name': user_name, 'user_id' :user_id, 'user_pwd' : user_pwd, 'error' : 'No Error'})
except UserInfo.DoesNotExist:
alert(" Error! No Match UserInformation! ")
login = Login(
login_id = get_id,
login_pwd = get_pwd,
login_date = datetime.now(),
login_error = "No Match"
)
login.save()
imsi_login = Login.objects.filter(login_id = get_id, login_pwd = get_pwd).order_by('login_date').last()
error = imsi_login.login_error
return render(request, 'runcode/login.html', {'user_id' : "No id", 'user_pwd' : "No pwd", 'error' : error})
else:
return render(request, 'runcode/login.html', {})
@csrf_exempt
def run(request):
if request.method == "POST":
message = str(request.body, encoding='utf-8')
else :
return render(request, 'runcode/home.html', {
'warnings' : "None",
'errors' : "None",
'result' : "None",
'stats' : "None"
})
print("test message:" + message)
url = "https://rextester.com/rundotnet/run"
headers = {'Host': 'rextester.com', 'Connection': 'keep-alive', 'Accept': 'text/plain, */*; q=0.01' ,'Origin': 'https://rextester.com'
, 'X-Requested-With': 'XMLHttpRequest', 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/69.0.3497.100 Safari/537.36'
, 'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8', 'Referer': 'https://rextester.com/rundotnet'
, 'Accept-Encoding': 'gzip, deflate, br', 'Accept-Language': 'ko-KR,ko;q=0.9,en-US;q=0.8,en;q=0.7'}
cookies = {'Cookie': '__utmz=178476455.1533087180.1.1.utmcsr=(direct)|utmccn=(direct)|utmcmd=(none); __utmc=178476455; ASP.NET_SessionId=5ispjysxn0kh5iy4uz0afxpo; __utma=178476455.1346840277.1533087180.1538900674.1538902623.22; __utmt=1; .REXTESTER=F293D23BFD81DE732D7F7AC1911F57A5270931E19C1ADC0B140386B7C20D03AE14F4D0B527712DEC150CF216DABAA81F3F5421C68B3D396C88FB90A98D1499AEE474FD27A79E11E3A9A3207B65EAC80BB25D9F2377C169993AE7982129F6D1385A543B684CB300A08AC7626D754270B1D6FEE5472BE67486167C2E5F1D8FE0774FE31939DADA147043EE94501A7FC367; __utmb=178476455.12.10.1538902623'}
res = requests.post(url, headers=headers , cookies=cookies, data=message)
#print(res.status_code)
#print(res.text)
j = json.loads(res.text)
#print("j:" +j)
jsonString = json.dumps(j, indent=4)
#print("jsonString:" +jsonString)
dict = json.loads(jsonString)
warnings = str(dict['Warnings'])
errors = str(dict['Errors'])
result = str(dict['Result'])
stats = str(dict['Stats'])
'''
print("text : "+res.text)
print("headers : "+str(res.headers))
print("raise_for_status : "+str(res.raise_for_status))
print("url : "+res.url)
'''
if errors == "None" and warnings == "None":
return render(request, 'runcode/home.html', {
'warnings' : warnings,
'errors' : errors,
'result' : result,
'stats' : stats ,
'alert' : 'success'
})
return render(request, 'runcode/home.html', {
'warnings' : warnings,
'errors' : errors,
'result' : result,
'stats' : stats
})
| 29.991903 | 575 | 0.697354 |
4e61c11a2dce123e2be5dda9199bfafde96f507a
| 546 |
py
|
Python
|
demos/live/live_demo.py
|
pearsonlab/improv
|
9dfa92a0a0e768505ee90586004d84a8e360c6a8
|
[
"MIT"
] | 11 |
2019-09-24T18:11:20.000Z
|
2021-08-16T09:58:01.000Z
|
demos/live/live_demo.py
|
pearsonlab/improv
|
9dfa92a0a0e768505ee90586004d84a8e360c6a8
|
[
"MIT"
] | 4 |
2019-12-05T20:48:53.000Z
|
2021-03-16T19:50:52.000Z
|
demos/live/live_demo.py
|
pearsonlab/improv
|
9dfa92a0a0e768505ee90586004d84a8e360c6a8
|
[
"MIT"
] | 2 |
2021-03-11T04:33:49.000Z
|
2021-04-19T02:37:12.000Z
|
import logging
# Matplotlib is overly verbose by default
logging.getLogger("matplotlib").setLevel(logging.WARNING)
from improv.nexus import Nexus
loadFile = './live_demo.yaml'
nexus = Nexus('Nexus')
nexus.createNexus(file=loadFile)
# All modules needed have been imported
# so we can change the level of logging here
# import logging
# import logging.config
# logging.config.dictConfig({
# 'version': 1,
# 'disable_existing_loggers': True,
# })
# logger = logging.getLogger("improv")
# logger.setLevel(logging.INFO)
nexus.startNexus()
| 24.818182 | 57 | 0.750916 |
9277ab1df95b65643c018ccd3f1b659d4dc12d8d
| 30,246 |
py
|
Python
|
model_deployment/model/grapy/networks/deeplab_xception_synBN.py
|
Pherokung/VIRTUON
|
987cf4e37a72b214f02f0f7fbda68c0cc74e6de4
|
[
"MIT"
] | 55 |
2019-11-14T02:32:59.000Z
|
2022-02-04T08:03:15.000Z
|
model_deployment/model/grapy/networks/deeplab_xception_synBN.py
|
Pherokung/VIRTUON
|
987cf4e37a72b214f02f0f7fbda68c0cc74e6de4
|
[
"MIT"
] | 7 |
2020-02-27T12:13:21.000Z
|
2021-05-14T00:10:22.000Z
|
model_deployment/model/grapy/networks/deeplab_xception_synBN.py
|
Pherokung/VIRTUON
|
987cf4e37a72b214f02f0f7fbda68c0cc74e6de4
|
[
"MIT"
] | 11 |
2020-11-28T04:09:29.000Z
|
2022-03-21T09:00:55.000Z
|
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.model_zoo as model_zoo
from torch.nn.parameter import Parameter
from collections import OrderedDict
from sync_batchnorm import SynchronizedBatchNorm1d, DataParallelWithCallback, SynchronizedBatchNorm2d
def fixed_padding(inputs, kernel_size, rate):
kernel_size_effective = kernel_size + (kernel_size - 1) * (rate - 1)
pad_total = kernel_size_effective - 1
pad_beg = pad_total // 2
pad_end = pad_total - pad_beg
padded_inputs = F.pad(inputs, (pad_beg, pad_end, pad_beg, pad_end))
return padded_inputs
class SeparableConv2d_aspp(nn.Module):
def __init__(self, inplanes, planes, kernel_size=3, stride=1, dilation=1, bias=False, padding=0):
super(SeparableConv2d_aspp, self).__init__()
self.depthwise = nn.Conv2d(inplanes, inplanes, kernel_size, stride, padding, dilation,
groups=inplanes, bias=bias)
self.depthwise_bn = SynchronizedBatchNorm2d(inplanes)
self.pointwise = nn.Conv2d(inplanes, planes, 1, 1, 0, 1, 1, bias=bias)
self.pointwise_bn = SynchronizedBatchNorm2d(planes)
self.relu = nn.ReLU()
def forward(self, x):
# x = fixed_padding(x, self.depthwise.kernel_size[0], rate=self.depthwise.dilation[0])
x = self.depthwise(x)
x = self.depthwise_bn(x)
x = self.relu(x)
x = self.pointwise(x)
x = self.pointwise_bn(x)
x = self.relu(x)
return x
class Decoder_module(nn.Module):
def __init__(self, inplanes, planes, rate=1):
super(Decoder_module, self).__init__()
self.atrous_convolution = SeparableConv2d_aspp(inplanes, planes, 3, stride=1, dilation=rate,padding=1)
def forward(self, x):
x = self.atrous_convolution(x)
return x
class ASPP_module(nn.Module):
def __init__(self, inplanes, planes, rate):
super(ASPP_module, self).__init__()
if rate == 1:
raise RuntimeError()
else:
kernel_size = 3
padding = rate
self.atrous_convolution = SeparableConv2d_aspp(inplanes, planes, 3, stride=1, dilation=rate,
padding=padding)
def forward(self, x):
x = self.atrous_convolution(x)
return x
class ASPP_module_rate0(nn.Module):
def __init__(self, inplanes, planes, rate=1):
super(ASPP_module_rate0, self).__init__()
if rate == 1:
kernel_size = 1
padding = 0
self.atrous_convolution = nn.Conv2d(inplanes, planes, kernel_size=kernel_size,
stride=1, padding=padding, dilation=rate, bias=False)
self.bn = SynchronizedBatchNorm2d(planes, eps=1e-5, affine=True)
self.relu = nn.ReLU()
else:
raise RuntimeError()
def forward(self, x):
x = self.atrous_convolution(x)
x = self.bn(x)
return self.relu(x)
class SeparableConv2d_same(nn.Module):
def __init__(self, inplanes, planes, kernel_size=3, stride=1, dilation=1, bias=False, padding=0):
super(SeparableConv2d_same, self).__init__()
self.depthwise = nn.Conv2d(inplanes, inplanes, kernel_size, stride, padding, dilation,
groups=inplanes, bias=bias)
self.depthwise_bn = SynchronizedBatchNorm2d(inplanes)
self.pointwise = nn.Conv2d(inplanes, planes, 1, 1, 0, 1, 1, bias=bias)
self.pointwise_bn = SynchronizedBatchNorm2d(planes)
def forward(self, x):
x = fixed_padding(x, self.depthwise.kernel_size[0], rate=self.depthwise.dilation[0])
x = self.depthwise(x)
x = self.depthwise_bn(x)
x = self.pointwise(x)
x = self.pointwise_bn(x)
return x
class Block(nn.Module):
def __init__(self, inplanes, planes, reps, stride=1, dilation=1, start_with_relu=True, grow_first=True, is_last=False):
super(Block, self).__init__()
if planes != inplanes or stride != 1:
self.skip = nn.Conv2d(inplanes, planes, 1, stride=2, bias=False)
if is_last:
self.skip = nn.Conv2d(inplanes, planes, 1, stride=1, bias=False)
self.skipbn = SynchronizedBatchNorm2d(planes)
else:
self.skip = None
self.relu = nn.ReLU(inplace=True)
rep = []
filters = inplanes
if grow_first:
rep.append(self.relu)
rep.append(SeparableConv2d_same(inplanes, planes, 3, stride=1, dilation=dilation))
# rep.append(nn.BatchNorm2d(planes))
filters = planes
for i in range(reps - 1):
rep.append(self.relu)
rep.append(SeparableConv2d_same(filters, filters, 3, stride=1, dilation=dilation))
# rep.append(nn.BatchNorm2d(filters))
if not grow_first:
rep.append(self.relu)
rep.append(SeparableConv2d_same(inplanes, planes, 3, stride=1, dilation=dilation))
# rep.append(nn.BatchNorm2d(planes))
if not start_with_relu:
rep = rep[1:]
if stride != 1:
rep.append(self.relu)
rep.append(SeparableConv2d_same(planes, planes, 3, stride=2,dilation=dilation))
if is_last:
rep.append(self.relu)
rep.append(SeparableConv2d_same(planes, planes, 3, stride=1,dilation=dilation))
self.rep = nn.Sequential(*rep)
def forward(self, inp):
x = self.rep(inp)
if self.skip is not None:
skip = self.skip(inp)
skip = self.skipbn(skip)
else:
skip = inp
# print(x.size(),skip.size())
x += skip
return x
class Block2(nn.Module):
def __init__(self, inplanes, planes, reps, stride=1, dilation=1, start_with_relu=True, grow_first=True, is_last=False):
super(Block2, self).__init__()
if planes != inplanes or stride != 1:
self.skip = nn.Conv2d(inplanes, planes, 1, stride=stride, bias=False)
self.skipbn = SynchronizedBatchNorm2d(planes)
else:
self.skip = None
self.relu = nn.ReLU(inplace=True)
rep = []
filters = inplanes
if grow_first:
rep.append(self.relu)
rep.append(SeparableConv2d_same(inplanes, planes, 3, stride=1, dilation=dilation))
# rep.append(nn.BatchNorm2d(planes))
filters = planes
for i in range(reps - 1):
rep.append(self.relu)
rep.append(SeparableConv2d_same(filters, filters, 3, stride=1, dilation=dilation))
# rep.append(nn.BatchNorm2d(filters))
if not grow_first:
rep.append(self.relu)
rep.append(SeparableConv2d_same(inplanes, planes, 3, stride=1, dilation=dilation))
# rep.append(nn.BatchNorm2d(planes))
if not start_with_relu:
rep = rep[1:]
if stride != 1:
self.block2_lastconv = nn.Sequential(*[self.relu,SeparableConv2d_same(planes, planes, 3, stride=2,dilation=dilation)])
if is_last:
rep.append(SeparableConv2d_same(planes, planes, 3, stride=1))
self.rep = nn.Sequential(*rep)
def forward(self, inp):
x = self.rep(inp)
low_middle = x.clone()
x1 = x
x1 = self.block2_lastconv(x1)
if self.skip is not None:
skip = self.skip(inp)
skip = self.skipbn(skip)
else:
skip = inp
x1 += skip
return x1,low_middle
class Xception(nn.Module):
"""
Modified Alighed Xception
"""
def __init__(self, inplanes=3, os=16, pretrained=False):
super(Xception, self).__init__()
if os == 16:
entry_block3_stride = 2
middle_block_rate = 1
exit_block_rates = (1, 2)
elif os == 8:
entry_block3_stride = 1
middle_block_rate = 2
exit_block_rates = (2, 4)
else:
raise NotImplementedError
# Entry flow
self.conv1 = nn.Conv2d(inplanes, 32, 3, stride=2, padding=1, bias=False)
self.bn1 = SynchronizedBatchNorm2d(32)
self.relu = nn.ReLU(inplace=True)
self.conv2 = nn.Conv2d(32, 64, 3, stride=1, padding=1, bias=False)
self.bn2 = SynchronizedBatchNorm2d(64)
self.block1 = Block(64, 128, reps=2, stride=2, start_with_relu=False)
self.block2 = Block2(128, 256, reps=2, stride=2, start_with_relu=True, grow_first=True)
self.block3 = Block(256, 728, reps=2, stride=entry_block3_stride, start_with_relu=True, grow_first=True)
# Middle flow
self.block4 = Block(728, 728, reps=3, stride=1, dilation=middle_block_rate, start_with_relu=True, grow_first=True)
self.block5 = Block(728, 728, reps=3, stride=1, dilation=middle_block_rate, start_with_relu=True, grow_first=True)
self.block6 = Block(728, 728, reps=3, stride=1, dilation=middle_block_rate, start_with_relu=True, grow_first=True)
self.block7 = Block(728, 728, reps=3, stride=1, dilation=middle_block_rate, start_with_relu=True, grow_first=True)
self.block8 = Block(728, 728, reps=3, stride=1, dilation=middle_block_rate, start_with_relu=True, grow_first=True)
self.block9 = Block(728, 728, reps=3, stride=1, dilation=middle_block_rate, start_with_relu=True, grow_first=True)
self.block10 = Block(728, 728, reps=3, stride=1, dilation=middle_block_rate, start_with_relu=True, grow_first=True)
self.block11 = Block(728, 728, reps=3, stride=1, dilation=middle_block_rate, start_with_relu=True, grow_first=True)
self.block12 = Block(728, 728, reps=3, stride=1, dilation=middle_block_rate, start_with_relu=True, grow_first=True)
self.block13 = Block(728, 728, reps=3, stride=1, dilation=middle_block_rate, start_with_relu=True, grow_first=True)
self.block14 = Block(728, 728, reps=3, stride=1, dilation=middle_block_rate, start_with_relu=True, grow_first=True)
self.block15 = Block(728, 728, reps=3, stride=1, dilation=middle_block_rate, start_with_relu=True, grow_first=True)
self.block16 = Block(728, 728, reps=3, stride=1, dilation=middle_block_rate, start_with_relu=True, grow_first=True)
self.block17 = Block(728, 728, reps=3, stride=1, dilation=middle_block_rate, start_with_relu=True, grow_first=True)
self.block18 = Block(728, 728, reps=3, stride=1, dilation=middle_block_rate, start_with_relu=True, grow_first=True)
self.block19 = Block(728, 728, reps=3, stride=1, dilation=middle_block_rate, start_with_relu=True, grow_first=True)
# Exit flow
self.block20 = Block(728, 1024, reps=2, stride=1, dilation=exit_block_rates[0],
start_with_relu=True, grow_first=False, is_last=True)
self.conv3 = SeparableConv2d_aspp(1024, 1536, 3, stride=1, dilation=exit_block_rates[1],padding=exit_block_rates[1])
# self.bn3 = nn.BatchNorm2d(1536)
self.conv4 = SeparableConv2d_aspp(1536, 1536, 3, stride=1, dilation=exit_block_rates[1],padding=exit_block_rates[1])
# self.bn4 = nn.BatchNorm2d(1536)
self.conv5 = SeparableConv2d_aspp(1536, 2048, 3, stride=1, dilation=exit_block_rates[1],padding=exit_block_rates[1])
# self.bn5 = nn.BatchNorm2d(2048)
# Init weights
# self.__init_weight()
# Load pretrained model
if pretrained:
self.__load_xception_pretrained()
def forward(self, x):
# Entry flow
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
# print('conv1 ',x.size())
x = self.conv2(x)
x = self.bn2(x)
x = self.relu(x)
x = self.block1(x)
# print('block1',x.size())
# low_level_feat = x
x,low_level_feat = self.block2(x)
# print('block2',x.size())
x = self.block3(x)
# print('xception block3 ',x.size())
# Middle flow
x = self.block4(x)
x = self.block5(x)
x = self.block6(x)
x = self.block7(x)
x = self.block8(x)
x = self.block9(x)
x = self.block10(x)
x = self.block11(x)
x = self.block12(x)
x = self.block13(x)
x = self.block14(x)
x = self.block15(x)
x = self.block16(x)
x = self.block17(x)
x = self.block18(x)
x = self.block19(x)
# Exit flow
x = self.block20(x)
x = self.conv3(x)
# x = self.bn3(x)
x = self.relu(x)
x = self.conv4(x)
# x = self.bn4(x)
x = self.relu(x)
x = self.conv5(x)
# x = self.bn5(x)
x = self.relu(x)
return x, low_level_feat
def __init_weight(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
# n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
# m.weight.data.normal_(0, math.sqrt(2. / n))
torch.nn.init.kaiming_normal_(m.weight)
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def __load_xception_pretrained(self):
pretrain_dict = model_zoo.load_url('http://data.lip6.fr/cadene/pretrainedmodels/xception-b5690688.pth')
model_dict = {}
state_dict = self.state_dict()
for k, v in pretrain_dict.items():
if k in state_dict:
if 'pointwise' in k:
v = v.unsqueeze(-1).unsqueeze(-1)
if k.startswith('block12'):
model_dict[k.replace('block12', 'block20')] = v
elif k.startswith('block11'):
model_dict[k.replace('block11', 'block12')] = v
model_dict[k.replace('block11', 'block13')] = v
model_dict[k.replace('block11', 'block14')] = v
model_dict[k.replace('block11', 'block15')] = v
model_dict[k.replace('block11', 'block16')] = v
model_dict[k.replace('block11', 'block17')] = v
model_dict[k.replace('block11', 'block18')] = v
model_dict[k.replace('block11', 'block19')] = v
elif k.startswith('conv3'):
model_dict[k] = v
elif k.startswith('bn3'):
model_dict[k] = v
model_dict[k.replace('bn3', 'bn4')] = v
elif k.startswith('conv4'):
model_dict[k.replace('conv4', 'conv5')] = v
elif k.startswith('bn4'):
model_dict[k.replace('bn4', 'bn5')] = v
else:
model_dict[k] = v
state_dict.update(model_dict)
self.load_state_dict(state_dict)
class DeepLabv3_plus(nn.Module):
def __init__(self, nInputChannels=3, n_classes=21, os=16, pretrained=False, _print=True):
if _print:
print("Constructing DeepLabv3+ model...")
print("Number of classes: {}".format(n_classes))
print("Output stride: {}".format(os))
print("Number of Input Channels: {}".format(nInputChannels))
super(DeepLabv3_plus, self).__init__()
# Atrous Conv
self.xception_features = Xception(nInputChannels, os, pretrained)
# ASPP
if os == 16:
rates = [1, 6, 12, 18]
elif os == 8:
rates = [1, 12, 24, 36]
else:
raise NotImplementedError
self.aspp1 = ASPP_module_rate0(2048, 256, rate=rates[0])
self.aspp2 = ASPP_module(2048, 256, rate=rates[1])
self.aspp3 = ASPP_module(2048, 256, rate=rates[2])
self.aspp4 = ASPP_module(2048, 256, rate=rates[3])
self.relu = nn.ReLU()
self.global_avg_pool = nn.Sequential(nn.AdaptiveAvgPool2d((1, 1)),
nn.Conv2d(2048, 256, 1, stride=1, bias=False),
SynchronizedBatchNorm2d(256),
nn.ReLU()
)
self.concat_projection_conv1 = nn.Conv2d(1280, 256, 1, bias=False)
self.concat_projection_bn1 = SynchronizedBatchNorm2d(256)
# adopt [1x1, 48] for channel reduction.
self.feature_projection_conv1 = nn.Conv2d(256, 48, 1, bias=False)
self.feature_projection_bn1 = SynchronizedBatchNorm2d(48)
self.decoder = nn.Sequential(Decoder_module(304, 256),
Decoder_module(256, 256)
)
self.semantic = nn.Conv2d(256, n_classes, kernel_size=1, stride=1)
def forward(self, input):
x, low_level_features = self.xception_features(input)
# print(x.size())
x1 = self.aspp1(x)
x2 = self.aspp2(x)
x3 = self.aspp3(x)
x4 = self.aspp4(x)
x5 = self.global_avg_pool(x)
x5 = F.upsample(x5, size=x4.size()[2:], mode='bilinear', align_corners=True)
x = torch.cat((x1, x2, x3, x4, x5), dim=1)
x = self.concat_projection_conv1(x)
x = self.concat_projection_bn1(x)
x = self.relu(x)
# print(x.size())
x = F.upsample(x, size=low_level_features.size()[2:], mode='bilinear', align_corners=True)
low_level_features = self.feature_projection_conv1(low_level_features)
low_level_features = self.feature_projection_bn1(low_level_features)
low_level_features = self.relu(low_level_features)
# print(low_level_features.size())
# print(x.size())
x = torch.cat((x, low_level_features), dim=1)
x = self.decoder(x)
x = self.semantic(x)
x = F.upsample(x, size=input.size()[2:], mode='bilinear', align_corners=True)
return x
def freeze_bn(self):
for m in self.xception_features.modules():
if isinstance(m, nn.BatchNorm2d) or isinstance(m,SynchronizedBatchNorm2d):
m.eval()
def freeze_aspp_bn(self):
for m in self.aspp1.modules():
if isinstance(m, nn.BatchNorm2d):
m.eval()
for m in self.aspp2.modules():
if isinstance(m, nn.BatchNorm2d):
m.eval()
for m in self.aspp3.modules():
if isinstance(m, nn.BatchNorm2d):
m.eval()
for m in self.aspp4.modules():
if isinstance(m, nn.BatchNorm2d):
m.eval()
def learnable_parameters(self):
layer_features_BN = []
layer_features = []
layer_aspp = []
layer_projection =[]
layer_decoder = []
layer_other = []
model_para = list(self.named_parameters())
for name,para in model_para:
if 'xception' in name:
if 'bn' in name or 'downsample.1.weight' in name or 'downsample.1.bias' in name:
layer_features_BN.append(para)
else:
layer_features.append(para)
# print (name)
elif 'aspp' in name:
layer_aspp.append(para)
elif 'projection' in name:
layer_projection.append(para)
elif 'decode' in name:
layer_decoder.append(para)
else:
layer_other.append(para)
return layer_features_BN,layer_features,layer_aspp,layer_projection,layer_decoder,layer_other
def __init_weight(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
# torch.nn.init.kaiming_normal_(m.weight)
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def load_state_dict_new(self, state_dict):
own_state = self.state_dict()
#for name inshop_cos own_state:
# print name
new_state_dict = OrderedDict()
for name, param in state_dict.items():
name = name.replace('module.','')
new_state_dict[name] = 0
if name not in own_state:
if 'num_batch' in name:
continue
print ('unexpected key "{}" in state_dict'
.format(name))
continue
# if isinstance(param, own_state):
if isinstance(param, Parameter):
# backwards compatibility for serialized parameters
param = param.data
try:
own_state[name].copy_(param)
except:
print('While copying the parameter named {}, whose dimensions in the model are'
' {} and whose dimensions in the checkpoint are {}, ...'.format(
name, own_state[name].size(), param.size()))
continue # i add inshop_cos 2018/02/01
# raise
# print 'copying %s' %name
# if isinstance(param, own_state):
# backwards compatibility for serialized parameters
own_state[name].copy_(param)
# print 'copying %s' %name
missing = set(own_state.keys()) - set(new_state_dict.keys())
if len(missing) > 0:
print('missing keys in state_dict: "{}"'.format(missing))
class DeepLabv3_plus_multi_set(nn.Module):
def __init__(self, nInputChannels=3, n_classes=21, os=16, pretrained=False, _print=True):
if _print:
print("Constructing DeepLabv3+ model...")
print("Number of classes: {}".format(n_classes))
print("Output stride: {}".format(os))
print("Number of Input Channels: {}".format(nInputChannels))
super(DeepLabv3_plus_multi_set, self).__init__()
# Atrous Conv
self.xception_features = Xception(nInputChannels, os, pretrained)
# ASPP
if os == 16:
rates = [1, 6, 12, 18]
elif os == 8:
rates = [1, 12, 24, 36]
else:
raise NotImplementedError
self.aspp1 = ASPP_module_rate0(2048, 256, rate=rates[0])
self.aspp2 = ASPP_module(2048, 256, rate=rates[1])
self.aspp3 = ASPP_module(2048, 256, rate=rates[2])
self.aspp4 = ASPP_module(2048, 256, rate=rates[3])
self.relu = nn.ReLU()
self.global_avg_pool = nn.Sequential(nn.AdaptiveAvgPool2d((1, 1)),
nn.Conv2d(2048, 256, 1, stride=1, bias=False),
SynchronizedBatchNorm2d(256),
nn.ReLU()
)
self.concat_projection_conv1 = nn.Conv2d(1280, 256, 1, bias=False)
self.concat_projection_bn1 = SynchronizedBatchNorm2d(256)
# adopt [1x1, 48] for channel reduction.
self.feature_projection_conv1 = nn.Conv2d(256, 48, 1, bias=False)
self.feature_projection_bn1 = SynchronizedBatchNorm2d(48)
self.decoder = nn.Sequential(Decoder_module(304, 256),
Decoder_module(256, 256)
)
self.semantic_aux_cihp = nn.Conv2d(256, 20, kernel_size=1, stride=1)
self.semantic_aux_pascal = nn.Conv2d(256, 7, kernel_size=1, stride=1)
self.semantic_aux_atr = nn.Conv2d(256, 18, kernel_size=1, stride=1)
def forward(self, input):
input, input_cate = input
x, low_level_features = self.xception_features(input)
# print(x.size())
x1 = self.aspp1(x)
x2 = self.aspp2(x)
x3 = self.aspp3(x)
x4 = self.aspp4(x)
x5 = self.global_avg_pool(x)
x5 = F.upsample(x5, size=x4.size()[2:], mode='bilinear', align_corners=True)
x = torch.cat((x1, x2, x3, x4, x5), dim=1)
x = self.concat_projection_conv1(x)
x = self.concat_projection_bn1(x)
x = self.relu(x)
# print(x.size())
x = F.upsample(x, size=low_level_features.size()[2:], mode='bilinear', align_corners=True)
low_level_features = self.feature_projection_conv1(low_level_features)
low_level_features = self.feature_projection_bn1(low_level_features)
low_level_features = self.relu(low_level_features)
# print(low_level_features.size())
# print(x.size())
x = torch.cat((x, low_level_features), dim=1)
x = self.decoder(x)
# x = self.semantic(x)
# x = F.upsample(x, size=input.size()[2:], mode='bilinear', align_corners=True)
if input_cate == 0:
x = self.semantic_aux_cihp(x)
x = F.upsample(x, size=input.size()[2:], mode='bilinear', align_corners=True)
elif input_cate == 1:
x = self.semantic_aux_pascal(x)
x = F.upsample(x, size=input.size()[2:], mode='bilinear', align_corners=True)
else:
x = self.semantic_aux_atr(x)
x = F.upsample(x, size=input.size()[2:], mode='bilinear', align_corners=True)
return x
def freeze_bn(self):
for m in self.xception_features.modules():
if isinstance(m, nn.BatchNorm2d) or isinstance(m,SynchronizedBatchNorm2d):
m.eval()
def freeze_aspp_bn(self):
for m in self.aspp1.modules():
if isinstance(m, nn.BatchNorm2d):
m.eval()
for m in self.aspp2.modules():
if isinstance(m, nn.BatchNorm2d):
m.eval()
for m in self.aspp3.modules():
if isinstance(m, nn.BatchNorm2d):
m.eval()
for m in self.aspp4.modules():
if isinstance(m, nn.BatchNorm2d):
m.eval()
def learnable_parameters(self):
layer_features_BN = []
layer_features = []
layer_aspp = []
layer_projection =[]
layer_decoder = []
layer_other = []
model_para = list(self.named_parameters())
for name,para in model_para:
if 'xception' in name:
if 'bn' in name or 'downsample.1.weight' in name or 'downsample.1.bias' in name:
layer_features_BN.append(para)
else:
layer_features.append(para)
# print (name)
elif 'aspp' in name:
layer_aspp.append(para)
elif 'projection' in name:
layer_projection.append(para)
elif 'decode' in name:
layer_decoder.append(para)
else:
layer_other.append(para)
return layer_features_BN,layer_features,layer_aspp,layer_projection,layer_decoder,layer_other
def __init_weight(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
# torch.nn.init.kaiming_normal_(m.weight)
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def load_state_dict_new(self, state_dict):
own_state = self.state_dict()
#for name inshop_cos own_state:
# print name
new_state_dict = OrderedDict()
for name, param in state_dict.items():
name = name.replace('module.','')
new_state_dict[name] = 0
if name not in own_state:
if 'num_batch' in name:
continue
print ('unexpected key "{}" in state_dict'
.format(name))
continue
# if isinstance(param, own_state):
if isinstance(param, Parameter):
# backwards compatibility for serialized parameters
param = param.data
try:
own_state[name].copy_(param)
except:
print('While copying the parameter named {}, whose dimensions in the model are'
' {} and whose dimensions in the checkpoint are {}, ...'.format(
name, own_state[name].size(), param.size()))
continue # i add inshop_cos 2018/02/01
# raise
# print 'copying %s' %name
# if isinstance(param, own_state):
# backwards compatibility for serialized parameters
own_state[name].copy_(param)
# print 'copying %s' %name
missing = set(own_state.keys()) - set(new_state_dict.keys())
if len(missing) > 0:
print('missing keys in state_dict: "{}"'.format(missing))
def get_1x_lr_params(model):
"""
This generator returns all the parameters of the net except for
the last classification layer. Note that for each batchnorm layer,
requires_grad is set to False in deeplab_resnet.py, therefore this function does not return
any batchnorm parameter
"""
b = [model.xception_features]
for i in range(len(b)):
for k in b[i].parameters():
if k.requires_grad:
yield k
def get_10x_lr_params(model):
"""
This generator returns all the parameters for the last layer of the net,
which does the classification of pixel into classes
"""
b = [model.aspp1, model.aspp2, model.aspp3, model.aspp4, model.conv1, model.conv2, model.last_conv]
for j in range(len(b)):
for k in b[j].parameters():
if k.requires_grad:
yield k
if __name__ == "__main__":
model = DeepLabv3_plus(nInputChannels=3, n_classes=21, os=16, pretrained=False, _print=True)
model.eval()
# ckt = torch.load('C:\\Users\gaoyi\code_python\deeplab_v3plus.pth')
# model.load_state_dict_new(ckt)
image = torch.randn(1, 3, 512, 512)*255
with torch.no_grad():
output = model.forward(image)
print(output.size())
# print(output)
| 38.628352 | 130 | 0.577795 |
889b0bd9dc34f58c1e4ce23a94d9fc9911eddd7c
| 12,807 |
py
|
Python
|
src/python/grpcio_tests/commands.py
|
arghyadip01/grpc
|
9e10bfc8a096ef91a327e22f84f10c0fabff4417
|
[
"Apache-2.0"
] | 4 |
2020-08-11T10:00:16.000Z
|
2021-10-08T15:17:25.000Z
|
src/python/grpcio_tests/commands.py
|
arghyadip01/grpc
|
9e10bfc8a096ef91a327e22f84f10c0fabff4417
|
[
"Apache-2.0"
] | 54 |
2020-06-23T17:34:04.000Z
|
2022-03-31T02:04:06.000Z
|
src/python/grpcio_tests/commands.py
|
arghyadip01/grpc
|
9e10bfc8a096ef91a327e22f84f10c0fabff4417
|
[
"Apache-2.0"
] | 12 |
2020-07-14T23:59:57.000Z
|
2022-03-22T09:59:18.000Z
|
# Copyright 2015 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Provides distutils command classes for the gRPC Python setup process."""
from distutils import errors as _errors
import glob
import os
import os.path
import platform
import re
import shutil
import sys
import setuptools
from setuptools.command import build_ext
from setuptools.command import build_py
from setuptools.command import easy_install
from setuptools.command import install
from setuptools.command import test
PYTHON_STEM = os.path.dirname(os.path.abspath(__file__))
GRPC_STEM = os.path.abspath(PYTHON_STEM + '../../../../')
GRPC_PROTO_STEM = os.path.join(GRPC_STEM, 'src', 'proto')
PROTO_STEM = os.path.join(PYTHON_STEM, 'src', 'proto')
PYTHON_PROTO_TOP_LEVEL = os.path.join(PYTHON_STEM, 'src')
class CommandError(object):
pass
class GatherProto(setuptools.Command):
description = 'gather proto dependencies'
user_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
# TODO(atash) ensure that we're running from the repository directory when
# this command is used
try:
shutil.rmtree(PROTO_STEM)
except Exception as error:
# We don't care if this command fails
pass
shutil.copytree(GRPC_PROTO_STEM, PROTO_STEM)
for root, _, _ in os.walk(PYTHON_PROTO_TOP_LEVEL):
path = os.path.join(root, '__init__.py')
open(path, 'a').close()
class BuildPy(build_py.build_py):
"""Custom project build command."""
def run(self):
try:
self.run_command('build_package_protos')
except CommandError as error:
sys.stderr.write('warning: %s\n' % error.message)
build_py.build_py.run(self)
class TestLite(setuptools.Command):
"""Command to run tests without fetching or building anything."""
description = 'run tests without fetching or building anything.'
user_options = []
def initialize_options(self):
pass
def finalize_options(self):
# distutils requires this override.
pass
def run(self):
self._add_eggs_to_path()
import tests
loader = tests.Loader()
loader.loadTestsFromNames(['tests'])
runner = tests.Runner(dedicated_threads=True)
result = runner.run(loader.suite)
if not result.wasSuccessful():
sys.exit('Test failure')
def _add_eggs_to_path(self):
"""Fetch install and test requirements"""
self.distribution.fetch_build_eggs(self.distribution.install_requires)
self.distribution.fetch_build_eggs(self.distribution.tests_require)
class TestPy3Only(setuptools.Command):
"""Command to run tests for Python 3+ features.
This does not include asyncio tests, which are housed in a separate
directory.
"""
description = 'run tests for py3+ features'
user_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
self._add_eggs_to_path()
import tests
loader = tests.Loader()
loader.loadTestsFromNames(['tests_py3_only'])
runner = tests.Runner()
result = runner.run(loader.suite)
if not result.wasSuccessful():
sys.exit('Test failure')
def _add_eggs_to_path(self):
self.distribution.fetch_build_eggs(self.distribution.install_requires)
self.distribution.fetch_build_eggs(self.distribution.tests_require)
class TestAio(setuptools.Command):
"""Command to run aio tests without fetching or building anything."""
description = 'run aio tests without fetching or building anything.'
user_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
self._add_eggs_to_path()
import tests
loader = tests.Loader()
loader.loadTestsFromNames(['tests_aio'])
# Even without dedicated threads, the framework will somehow spawn a
# new thread for tests to run upon. New thread doesn't have event loop
# attached by default, so initialization is needed.
runner = tests.Runner(dedicated_threads=False)
result = runner.run(loader.suite)
if not result.wasSuccessful():
sys.exit('Test failure')
def _add_eggs_to_path(self):
"""Fetch install and test requirements"""
self.distribution.fetch_build_eggs(self.distribution.install_requires)
self.distribution.fetch_build_eggs(self.distribution.tests_require)
class TestGevent(setuptools.Command):
"""Command to run tests w/gevent."""
BANNED_TESTS = (
# Fork support is not compatible with gevent
'fork._fork_interop_test.ForkInteropTest',
# These tests send a lot of RPCs and are really slow on gevent. They will
# eventually succeed, but need to dig into performance issues.
'unit._cython._no_messages_server_completion_queue_per_call_test.Test.test_rpcs',
'unit._cython._no_messages_single_server_completion_queue_test.Test.test_rpcs',
'unit._compression_test',
# TODO(https://github.com/grpc/grpc/issues/16890) enable this test
'unit._cython._channel_test.ChannelTest.test_multiple_channels_lonely_connectivity',
# I have no idea why this doesn't work in gevent, but it shouldn't even be
# using the c-core
'testing._client_test.ClientTest.test_infinite_request_stream_real_time',
# TODO(https://github.com/grpc/grpc/issues/15743) enable this test
'unit._session_cache_test.SSLSessionCacheTest.testSSLSessionCacheLRU',
# TODO(https://github.com/grpc/grpc/issues/14789) enable this test
'unit._server_ssl_cert_config_test',
# TODO(https://github.com/grpc/grpc/issues/14901) enable this test
'protoc_plugin._python_plugin_test.PythonPluginTest',
'protoc_plugin._python_plugin_test.SimpleStubsPluginTest',
# Beta API is unsupported for gevent
'protoc_plugin.beta_python_plugin_test',
'unit.beta._beta_features_test',
# TODO(https://github.com/grpc/grpc/issues/15411) unpin gevent version
# This test will stuck while running higher version of gevent
'unit._auth_context_test.AuthContextTest.testSessionResumption',
# TODO(https://github.com/grpc/grpc/issues/15411) enable these tests
'unit._channel_ready_future_test.ChannelReadyFutureTest.test_immediately_connectable_channel_connectivity',
"unit._cython._channel_test.ChannelTest.test_single_channel_lonely_connectivity",
'unit._exit_test.ExitTest.test_in_flight_unary_unary_call',
'unit._exit_test.ExitTest.test_in_flight_unary_stream_call',
'unit._exit_test.ExitTest.test_in_flight_stream_unary_call',
'unit._exit_test.ExitTest.test_in_flight_stream_stream_call',
'unit._exit_test.ExitTest.test_in_flight_partial_unary_stream_call',
'unit._exit_test.ExitTest.test_in_flight_partial_stream_unary_call',
'unit._exit_test.ExitTest.test_in_flight_partial_stream_stream_call',
# TODO(https://github.com/grpc/grpc/issues/18980): Reenable.
'unit._signal_handling_test.SignalHandlingTest',
'unit._metadata_flags_test',
'health_check._health_servicer_test.HealthServicerTest.test_cancelled_watch_removed_from_watch_list',
# TODO(https://github.com/grpc/grpc/issues/17330) enable these three tests
'channelz._channelz_servicer_test.ChannelzServicerTest.test_many_subchannels',
'channelz._channelz_servicer_test.ChannelzServicerTest.test_many_subchannels_and_sockets',
'channelz._channelz_servicer_test.ChannelzServicerTest.test_streaming_rpc',
# TODO(https://github.com/grpc/grpc/issues/15411) enable this test
'unit._cython._channel_test.ChannelTest.test_negative_deadline_connectivity',
# TODO(https://github.com/grpc/grpc/issues/15411) enable this test
'unit._local_credentials_test.LocalCredentialsTest',
# TODO(https://github.com/grpc/grpc/issues/22020) LocalCredentials
# aren't supported with custom io managers.
'unit._contextvars_propagation_test',
'testing._time_test.StrictRealTimeTest',
)
BANNED_WINDOWS_TESTS = (
# TODO(https://github.com/grpc/grpc/pull/15411) enable this test
'unit._dns_resolver_test.DNSResolverTest.test_connect_loopback',
# TODO(https://github.com/grpc/grpc/pull/15411) enable this test
'unit._server_test.ServerTest.test_failed_port_binding_exception',
)
description = 'run tests with gevent. Assumes grpc/gevent are installed'
user_options = []
def initialize_options(self):
pass
def finalize_options(self):
# distutils requires this override.
pass
def run(self):
from gevent import monkey
monkey.patch_all()
import tests
import grpc.experimental.gevent
grpc.experimental.gevent.init_gevent()
import gevent
import tests
loader = tests.Loader()
loader.loadTestsFromNames(['tests'])
runner = tests.Runner()
if sys.platform == 'win32':
runner.skip_tests(self.BANNED_TESTS + self.BANNED_WINDOWS_TESTS)
else:
runner.skip_tests(self.BANNED_TESTS)
result = gevent.spawn(runner.run, loader.suite)
result.join()
if not result.value.wasSuccessful():
sys.exit('Test failure')
class RunInterop(test.test):
description = 'run interop test client/server'
user_options = [
('args=', None, 'pass-thru arguments for the client/server'),
('client', None, 'flag indicating to run the client'),
('server', None, 'flag indicating to run the server'),
('use-asyncio', None, 'flag indicating to run the asyncio stack')
]
def initialize_options(self):
self.args = ''
self.client = False
self.server = False
self.use_asyncio = False
def finalize_options(self):
if self.client and self.server:
raise _errors.DistutilsOptionError(
'you may only specify one of client or server')
def run(self):
if self.distribution.install_requires:
self.distribution.fetch_build_eggs(
self.distribution.install_requires)
if self.distribution.tests_require:
self.distribution.fetch_build_eggs(self.distribution.tests_require)
if self.client:
self.run_client()
elif self.server:
self.run_server()
def run_server(self):
# We import here to ensure that our setuptools parent has had a chance to
# edit the Python system path.
if self.use_asyncio:
import asyncio
from tests_aio.interop import server
sys.argv[1:] = self.args.split()
asyncio.get_event_loop().run_until_complete(server.serve())
else:
from tests.interop import server
sys.argv[1:] = self.args.split()
server.serve()
def run_client(self):
# We import here to ensure that our setuptools parent has had a chance to
# edit the Python system path.
from tests.interop import client
sys.argv[1:] = self.args.split()
client.test_interoperability()
class RunFork(test.test):
description = 'run fork test client'
user_options = [('args=', 'a', 'pass-thru arguments for the client')]
def initialize_options(self):
self.args = ''
def finalize_options(self):
# distutils requires this override.
pass
def run(self):
if self.distribution.install_requires:
self.distribution.fetch_build_eggs(
self.distribution.install_requires)
if self.distribution.tests_require:
self.distribution.fetch_build_eggs(self.distribution.tests_require)
# We import here to ensure that our setuptools parent has had a chance to
# edit the Python system path.
from tests.fork import client
sys.argv[1:] = self.args.split()
client.test_fork()
| 37.121739 | 115 | 0.687046 |
21f535ec1e4ffe9a4d767165a107778b9afb582e
| 235 |
py
|
Python
|
homeassistant/components/brother/const.py
|
MrDelik/core
|
93a66cc357b226389967668441000498a10453bb
|
[
"Apache-2.0"
] | 30,023 |
2016-04-13T10:17:53.000Z
|
2020-03-02T12:56:31.000Z
|
homeassistant/components/brother/const.py
|
MrDelik/core
|
93a66cc357b226389967668441000498a10453bb
|
[
"Apache-2.0"
] | 31,101 |
2020-03-02T13:00:16.000Z
|
2022-03-31T23:57:36.000Z
|
homeassistant/components/brother/const.py
|
MrDelik/core
|
93a66cc357b226389967668441000498a10453bb
|
[
"Apache-2.0"
] | 11,956 |
2016-04-13T18:42:31.000Z
|
2020-03-02T09:32:12.000Z
|
"""Constants for Brother integration."""
from __future__ import annotations
from typing import Final
DATA_CONFIG_ENTRY: Final = "config_entry"
DOMAIN: Final = "brother"
PRINTER_TYPES: Final = ["laser", "ink"]
SNMP: Final = "snmp"
| 18.076923 | 41 | 0.73617 |
c8ff9d323fa639776c7b84efc969b747b68b2f41
| 2,373 |
py
|
Python
|
Backend/getplate/read_plate.py
|
skwasiborski/Stroller
|
78bf15beb6ae55aa2fa39446139353e15154d337
|
[
"MIT"
] | null | null | null |
Backend/getplate/read_plate.py
|
skwasiborski/Stroller
|
78bf15beb6ae55aa2fa39446139353e15154d337
|
[
"MIT"
] | null | null | null |
Backend/getplate/read_plate.py
|
skwasiborski/Stroller
|
78bf15beb6ae55aa2fa39446139353e15154d337
|
[
"MIT"
] | null | null | null |
import os
import cv2
import numpy as np
import matplotlib.pyplot as plt
from local_utils import detect_lp
from os.path import splitext, basename
from keras.models import model_from_json
import glob
import matplotlib.gridspec as gridspec
from sklearn.preprocessing import LabelEncoder
from helper import *
#img_path='Plate_detect_and_recognize/Plate_examples\\16-204476.jpg'
def read_plate(img_path, plate_model):
LpImg=[]
cor=[]
L=[]
binary=[]
respath='ERROR'
try:
LpImg,cor,L = get_plate(img_path, wpod_net = plate_model)
print('--------------------')
print("Detect %i plate(s) in"%len(LpImg),os.path.split(img_path)[1])
#print("Coordinate of plate(s) in image: \n", cor)
print("Max Probability of a plate [0]: ", L[0])
# plt.title('Probability of this licence plate : '+str(L[0]))
# plt.axis(False)
# plt.imshow(LpImg[0])
# resname=os.path.split(img_path)[1]
# resname=os.path.splitext(resname)[0]
# respath=os.path.join('results','Extracted','EXTRACTED_'+resname+'PNG')
# print('----------------------')
# print(' saving '+ respath)
# plt.savefig(respath, dpi=400)
# plate_image = cv2.convertScaleAbs(LpImg[0], alpha=(255.0))
# # convert to grayscale and blur the image
# gray = cv2.cvtColor(plate_image, cv2.COLOR_BGR2GRAY)
# blur = cv2.GaussianBlur(gray,(7,7),0)
# # Applied inversed thresh_binary
# binary = cv2.threshold(blur, 180, 255,
# cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU)[1]
# kernel3 = cv2.getStructuringElement(cv2.MORPH_RECT, (3, 3))
# thre_mor = cv2.morphologyEx(binary, cv2.MORPH_DILATE, kernel3)
# plt.imshow(binary,cmap="gray")
# resname=os.path.split(img_path)[1]
# resname=os.path.splitext(resname)[0]
# respath=os.path.join('results','Binary','BINARY_'+resname)
# print('----------------------')
# print(' saving '+ respath)
# plt.savefig(respath, dpi=400)
except: pass
return(LpImg, cor, L)
# model_path=os.path.join('Plate_detect_and_recognize',"wpod-net.json")
# wpod_net = load_model(model_path)
# LpImg, cor, L =read_plate(img_path,wpod_net)
| 33.422535 | 81 | 0.59587 |
0663ae571c913ec173c0d8dcf8af4d087404a3e2
| 9,591 |
py
|
Python
|
tensorflow_hub/keras_layer.py
|
Jabrils/hub
|
84ac11ac756050a186cc8bddb54e104323fb9dff
|
[
"Apache-2.0"
] | 1 |
2020-07-12T06:36:06.000Z
|
2020-07-12T06:36:06.000Z
|
tensorflow_hub/keras_layer.py
|
Jabrils/hub
|
84ac11ac756050a186cc8bddb54e104323fb9dff
|
[
"Apache-2.0"
] | null | null | null |
tensorflow_hub/keras_layer.py
|
Jabrils/hub
|
84ac11ac756050a186cc8bddb54e104323fb9dff
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2019 The TensorFlow Hub Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A Keras Layer for using TF Hub modules in TF2 format."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import json
import six
import tensorflow as tf
from tensorflow_hub import module_v2
# ATTENTION: This file uses private imports from TF2.
# __init__ may not import this file if tensorflow is too old.
# pylint: disable=g-direct-tensorflow-import
from tensorflow.python.framework import smart_cond
from tensorflow.python.training.tracking import base as tracking_base
from tensorflow.python.util import tf_inspect
# pylint: enable=g-direct-tensorflow-import
class KerasLayer(tf.keras.layers.Layer):
"""Wraps a Hub module (or a similar callable) for TF2 as a Keras Layer.
This layer wraps a callable object for use as a Keras layer. The callable
object can be passed directly, or be specified by a Python string with a
handle that gets passed to `hub.load()`.
This is the preferred API to load a TF2-style SavedModel from TF Hub
into a Keras model. Calling this function requires TF 1.14 or newer.
It can be called both in eager and graph mode.
The callable object is expected to follow the conventions detailed below.
(These are met by TF2-compatible modules loaded from TensorFlow Hub.)
The callable is invoked with a single positional argument set to one tensor
or a list of tensors containing the inputs to the layer. If the callable
accepts a `training` argument, a Python boolean is passed for it. It is True
if this layer is marked trainable *and* called for training.
If present, the following attributes of callable are understood to have
special meanings:
variables: a list of all tf.Variable objects that the callable depends on.
trainable_variables: those elements of `variables` that are reported
as trainable variables of this Keras Layer when the layer is trainable.
regularization_losses: a list of callables to be added as losses of this
Keras Layer when the layer is trainable. Each one must accept zero
arguments and return a scalar tensor.
Note: to work-around missing shape inference functionalities from functions
created from FunctionDefs, in many cases one has to pass an 'output_shape'
and potentially 'input_shape' and 'dtype'. E.g. the following is a typical
work-around:
```
hub.KerasLayer(
"/tmp/text_embedding_model",
output_shape=[20], # Outputs a tensor with shape [batch_size, 20].
input_shape=[], # Expects a tensor of shape [batch_size] as input.
dtype=tf.string) # Expects a tf.string input tensor.
```
Note: This layer can be used inside the model_fn of a TF2 Estimator. See the
[migration guide](https://www.tensorflow.org/beta/guide/migration_guide#using_a_custom_model_fn)
for guidance on how to pick up trainable variables, losses and updates
explicitly from Keras objects instead of relying on graph collections.
This layer class does not support graph collections.
Args:
handle: a callable object (subject to the conventions above), or a
Python string for which hub.load() returns such a callable.
A string is required to save the Keras config of this Layer.
trainable: Boolean controlling whether this layer is trainable.
arguments: optionally, a dict with additional keyword arguments passed
to the callable. These must be JSON-serializable to save the Keras config
of this layer.
**kwargs: 'output_shape': A tuple with the (possibly partial) output
shape of the callable *without* leading batch size. Other arguments
are pass into the Layer constructor.
"""
def __init__(self, handle, trainable=False, arguments=None, **kwargs):
# Note: for compatibility with keras-model serialization this layer is
# json-serializable. If you add or change arguments here, please also update
# the `get_config` method.
self._handle = handle
# Resolve the handle to a callable `func`.
# NOTE: The name _func gets baked into object-based checkpoints.
if callable(handle):
self._func = handle
else:
self._func = module_v2.load(handle)
if not callable(self._func):
raise ValueError("Non-callable result from hub.load('%s')" %
str(handle))
# TODO(b/124219898): We should do shape inference on the callable.
if "output_shape" in kwargs:
self._output_shape = tuple(kwargs.pop("output_shape"))
# Initialize an empty layer, then add_weight() etc. as needed.
super(KerasLayer, self).__init__(trainable=trainable, **kwargs)
# Add trainable and non-trainable weights from the callable.
if hasattr(self._func, "trainable_variables"):
for v in self._func.trainable_variables:
self._add_existing_weight(v, trainable=True)
trainable_variables = {id(v) for v in self._func.trainable_variables}
else:
trainable_variables = set()
if hasattr(self._func, "variables"):
for v in self._func.variables:
if id(v) not in trainable_variables:
self._add_existing_weight(v, trainable=False)
# Forward the callable's regularization losses (if any).
if hasattr(self._func, "regularization_losses"):
for l in self._func.regularization_losses:
if not callable(l):
raise ValueError(
"hub.KerasLayer(obj) expects obj.regularization_losses to be an "
"iterable of callables, each returning a scalar loss term.")
self.add_loss(self._call_loss_if_trainable(l)) # Supports callables.
# Prepare to call `func`.
self._func_fullargspec = tf_inspect.getfullargspec(self._func.__call__)
self._func_wants_training = (
"training" in self._func_fullargspec.args or
"training" in self._func_fullargspec.kwonlyargs)
if arguments is not None:
self._arguments = arguments
def _add_existing_weight(self, weight, trainable=None):
"""Calls add_weight() to register but not create an existing weight."""
if trainable is None: trainable = weight.trainable
self.add_weight(name=weight.name, shape=weight.shape, dtype=weight.dtype,
trainable=trainable, getter=lambda *_, **__: weight)
def _call_loss_if_trainable(self, loss):
"""Returns `loss` conditioned on whether this layer is trainable."""
return lambda: loss() if self.trainable else 0.
def call(self, inputs, training=None):
# We basically want to call this...
kwargs = getattr(self, "_arguments", None)
if kwargs is None:
kwargs = {}
f = functools.partial(self._func, inputs, **kwargs)
# ...but we may also have to pass a Python boolean for `training`, which
# is the logical "and" of this layer's trainability and what the surrounding
# model is doing (analogous to tf.keras.layers.BatchNormalization in TF2).
# For the latter, we have to look in two places: the `training` argument,
# or else Keras' global `learning_phase`, which might actually be a tensor.
if not self._func_wants_training:
result = f()
else:
if self.trainable:
if training is None:
training = tf.keras.backend.learning_phase()
else:
training = False
result = smart_cond.smart_cond(training,
lambda: f(training=True),
lambda: f(training=False))
# TODO(b/124219898): Polymorphic function should return shaped tensor.
if hasattr(self, "_output_shape"):
result.set_shape((inputs.shape[0],) + self._output_shape)
return result
def get_config(self):
config = super(KerasLayer, self).get_config()
if not isinstance(self._handle, six.string_types):
# Need to raise this type in order for tf.saved_model.save() to fall back
# to not using config, instead of crashing.
# TODO(b/134528831): Reconsider the usability implications.
raise NotImplementedError(
"Can only generate a valid config for `hub.KerasLayer(handle, ...)`"
"that uses a string `handle`.\n\n"
"Got `type(handle)`: {}".format(type(self._handle)))
config.update({
"handle": self._handle,
})
if hasattr(self, "_output_shape"):
config["output_shape"] = self._output_shape
if hasattr(self, "_arguments"):
# Raise clear errors for non-serializable arguments.
for key, value in self._arguments.items():
try:
json.dumps(value)
except TypeError as e:
raise ValueError(
"`hub.KerasLayer(..., arguments)` contains non json-serializable"
"values in key: {}".format(key))
config["arguments"] = self._arguments
return config
@property
def resolved_object(self):
"""Returns the callable object to which `handle` resolved in `__init__`."""
return self._func
| 43.39819 | 98 | 0.701178 |
d9f2ecbca370dedc98136f53d5c271738a65ebea
| 8,544 |
py
|
Python
|
test/test_envelopes.py
|
keefehuang/c3
|
1df65bc43d891c7af77c8eff0ef8879b2fe5c3e6
|
[
"Apache-2.0"
] | null | null | null |
test/test_envelopes.py
|
keefehuang/c3
|
1df65bc43d891c7af77c8eff0ef8879b2fe5c3e6
|
[
"Apache-2.0"
] | 2 |
2020-10-28T21:46:21.000Z
|
2020-10-30T17:25:01.000Z
|
test/test_envelopes.py
|
keefehuang/c3
|
1df65bc43d891c7af77c8eff0ef8879b2fe5c3e6
|
[
"Apache-2.0"
] | 1 |
2021-06-01T17:04:31.000Z
|
2021-06-01T17:04:31.000Z
|
import pickle
from c3.c3objs import Quantity
from c3.libraries.envelopes import envelopes
import numpy as np
import pytest
ts = np.linspace(0, 10e-9, 100)
with open("test/envelopes.pickle", "rb") as filename:
test_data = pickle.load(filename)
ABS_TOL_FACTOR = 1e-11
def get_atol(test_type: str) -> float:
"""Get the absolute tolerance corresponding to a specific test data
Parameters
----------
test_type : str
String representing the test type to be used as a key in the test_data dict
Returns
-------
float
Absolute tolerance for the desired value of this test type
"""
return ABS_TOL_FACTOR * np.max(test_data[test_type])
@pytest.mark.unit
def test_pwc_shape():
params = {
"t_bin_start": Quantity(1e-10),
"t_bin_end": Quantity(9.9e-9),
"t_final": Quantity(10e-9),
"inphase": Quantity([0, 0.1, 0.3, 0.5, 0.1, 1.1, 0.4, 0.1]),
}
np.testing.assert_allclose(
actual=envelopes["pwc_shape"](t=ts, params=params),
desired=test_data["pwc_shape"],
atol=get_atol("pwc_shape"),
)
np.testing.assert_allclose(
actual=envelopes["pwc_symmetric"](t=ts, params=params),
desired=test_data["pwc_symmetric"],
atol=get_atol("pwc_symmetric"),
)
np.testing.assert_allclose(
actual=envelopes["pwc_shape_plateau"](t=ts, params=params),
desired=test_data["pwc_shape_plateau1"],
atol=get_atol("pwc_shape_plateau1"),
)
params["width"] = Quantity(5e-9)
np.testing.assert_allclose(
actual=envelopes["pwc_shape_plateau"](t=ts, params=params),
desired=test_data["pwc_shape_plateau2"],
atol=get_atol("pwc_shape_plateau2"),
)
@pytest.mark.unit
def test_delta_pulse():
params = {
"t_sig": Quantity(
[
0.5e-9,
]
),
"t_final": Quantity(10e-9),
}
np.testing.assert_allclose(
actual=envelopes["delta_pulse"](t=ts, params=params),
desired=test_data["delta_pulse"],
atol=get_atol("delta_pulse"),
)
@pytest.mark.unit
def test_fourier():
params = {
"amps": Quantity([0.5, 0.2]),
"freqs": Quantity([1e6, 1e10]),
"phases": Quantity([0, 1]),
"t_final": Quantity(10e-9),
}
np.testing.assert_allclose(
actual=envelopes["fourier_sin"](t=ts, params=params),
desired=test_data["fourier_sin"],
atol=get_atol("fourier_sin"),
)
np.testing.assert_allclose(
actual=envelopes["fourier_cos"](t=ts, params=params),
desired=test_data["fourier_cos"],
atol=get_atol("fourier_cos"),
)
params = {
"width": Quantity(9e-9),
"fourier_coeffs": Quantity([1, 0.5, 0.2]),
"offset": Quantity(0.1),
"amp": Quantity(0.5),
"t_final": Quantity(10e-9),
}
np.testing.assert_allclose(
actual=envelopes["slepian_fourier"](t=ts, params=params),
desired=test_data["slepian_fourier"],
atol=get_atol("slepian_fourier"),
)
params["risefall"] = Quantity(4e-9)
np.testing.assert_allclose(
actual=envelopes["slepian_fourier"](t=ts, params=params),
desired=test_data["slepian_fourier_risefall"],
atol=get_atol("slepian_fourier_risefall"),
)
params["sin_coeffs"] = Quantity([0.3])
np.testing.assert_allclose(
actual=envelopes["slepian_fourier"](t=ts, params=params),
desired=test_data["slepian_fourier_sin"],
atol=get_atol("slepian_fourier_sin"),
)
@pytest.mark.unit
def test_flattop():
params = {
"risefall": Quantity(2e-9),
"t_final": Quantity(10e-9),
}
np.testing.assert_allclose(
actual=envelopes["trapezoid"](t=ts, params=params),
desired=test_data["trapezoid"],
atol=get_atol("trapezoid"),
)
np.testing.assert_allclose(
actual=envelopes["flattop_risefall"](t=ts, params=params),
desired=test_data["flattop_risefall"],
atol=get_atol("flattop_risefall"),
)
np.testing.assert_allclose(
actual=envelopes["flattop_risefall_1ns"](t=ts, params=params),
desired=test_data["flattop_risefall_1ns"],
atol=get_atol("flattop_risefall_1ns"),
)
params = {
"ramp": Quantity(2e-9),
"t_up": Quantity(1e-9),
"t_down": Quantity(10e-9),
"t_final": Quantity(10e-9),
}
np.testing.assert_allclose(
actual=envelopes["flattop_variant"](t=ts, params=params),
desired=test_data["flattop_variant"],
atol=get_atol("flattop_variant"),
)
params = {
"risefall": Quantity(2e-9),
"t_up": Quantity(1e-9),
"t_down": Quantity(10e-9),
"t_final": Quantity(10e-9),
}
np.testing.assert_allclose(
actual=envelopes["flattop"](t=ts, params=params), desired=test_data["flattop"]
)
@pytest.mark.unit
def test_flattop_cut():
params = {
"risefall": Quantity(2e-9),
"t_up": Quantity(1e-9),
"t_down": Quantity(10e-9),
"t_final": Quantity(10e-9),
}
np.testing.assert_allclose(
actual=envelopes["flattop_cut"](t=ts, params=params),
desired=test_data["flattop_cut"],
atol=get_atol("flattop_cut"),
)
params = {
"risefall": Quantity(2e-9),
"width": Quantity(9e-9),
"t_final": Quantity(10e-9),
}
np.testing.assert_allclose(
actual=envelopes["flattop_cut_center"](t=ts, params=params),
desired=test_data["flattop_cut_center"],
atol=get_atol("flattop_cut_center"),
)
@pytest.mark.unit
def test_gaussian():
params = {
"t_final": Quantity(10e-9),
"sigma": Quantity(5e-9),
}
np.testing.assert_allclose(
actual=envelopes["gaussian_sigma"](t=ts, params=params),
desired=test_data["gaussian_sigma"],
atol=get_atol("gaussian_sigma"),
)
np.testing.assert_allclose(
actual=envelopes["gaussian"](t=ts, params=params),
desired=test_data["gaussian"],
atol=get_atol("gaussian"),
)
np.testing.assert_allclose(
actual=envelopes["gaussian_nonorm"](t=ts, params=params),
desired=test_data["gaussian_nonorm"],
atol=get_atol("gaussian_nonorm"),
)
np.testing.assert_allclose(
actual=envelopes["gaussian_der_nonorm"](t=ts, params=params),
desired=test_data["gaussian_der_nonorm"],
atol=get_atol("gaussian_der_nonorm"),
)
np.testing.assert_allclose(
actual=envelopes["gaussian_der"](t=ts, params=params),
desired=test_data["gaussian_der"],
atol=get_atol("gaussian_der"),
)
np.testing.assert_allclose(
actual=envelopes["drag_sigma"](t=ts, params=params),
desired=test_data["drag_sigma"],
atol=get_atol("drag_sigma"),
)
np.testing.assert_allclose(
actual=envelopes["drag_der"](t=ts, params=params),
desired=test_data["drag_der"],
atol=get_atol("drag_der"),
)
np.testing.assert_allclose(
actual=envelopes["drag"](t=ts, params=params),
desired=test_data["drag"],
atol=get_atol("drag"),
)
@pytest.mark.unit
def test_cosine():
params = {
"t_final": Quantity(10e-9),
}
np.testing.assert_allclose(
actual=envelopes["cosine"](t=ts, params=params),
desired=test_data["cosine"],
atol=get_atol("cosine"),
)
params = {
"t_final": Quantity(10e-9),
"t_rise": Quantity(2e-9),
}
np.testing.assert_allclose(
actual=np.reshape(
envelopes["cosine_flattop"](t=np.reshape(ts, (-1, 1)), params=params), (-1,)
),
desired=test_data["cosine_flattop"],
atol=get_atol("cosine_flattop"),
)
# Nico: to be consistent with the signal generation code, the resphapes above are necessary. Somewhere in the
# masking the time vector gets an additional dimension. It all works fine since it's elementwise, but the
# flattop implementation has a concat in it that is strict about shapes. This should be investigated.
@pytest.mark.unit
def test_nodrive():
params = {}
np.testing.assert_allclose(
actual=envelopes["no_drive"](t=ts, params=params),
desired=test_data["no_drive"],
atol=get_atol("no_drive"),
)
np.testing.assert_allclose(
actual=envelopes["rect"](t=ts, params=params),
desired=test_data["rect"],
atol=get_atol("rect"),
)
| 27.56129 | 113 | 0.617158 |
8e35b375550915c15c3e17f0ca932b8175c87c98
| 3,849 |
py
|
Python
|
lib/surface/compute/backend_services/add_signed_url_key.py
|
google-cloud-sdk-unofficial/google-cloud-sdk
|
2a48a04df14be46c8745050f98768e30474a1aac
|
[
"Apache-2.0"
] | 2 |
2019-11-10T09:17:07.000Z
|
2019-12-18T13:44:08.000Z
|
lib/surface/compute/backend_services/add_signed_url_key.py
|
google-cloud-sdk-unofficial/google-cloud-sdk
|
2a48a04df14be46c8745050f98768e30474a1aac
|
[
"Apache-2.0"
] | null | null | null |
lib/surface/compute/backend_services/add_signed_url_key.py
|
google-cloud-sdk-unofficial/google-cloud-sdk
|
2a48a04df14be46c8745050f98768e30474a1aac
|
[
"Apache-2.0"
] | 1 |
2020-07-25T01:40:19.000Z
|
2020-07-25T01:40:19.000Z
|
# -*- coding: utf-8 -*- #
# Copyright 2017 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Command to add a Cloud CDN Signed URL key to a backend service."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from googlecloudsdk.api_lib.compute import base_classes
from googlecloudsdk.api_lib.compute.operations import poller
from googlecloudsdk.api_lib.util import waiter
from googlecloudsdk.calliope import base
from googlecloudsdk.command_lib.compute import flags as compute_flags
from googlecloudsdk.command_lib.compute import signed_url_flags
from googlecloudsdk.command_lib.compute.backend_services import flags
from googlecloudsdk.core.util import files
class AddSignedUrlKey(base.UpdateCommand):
"""Add Cloud CDN Signed URL key to a backend service.
*{command}* is used to add a new Cloud CDN Signed URL key to a backend
service.
Cloud CDN Signed URLs give you a way to serve responses from the
globally distributed CDN cache, even if the request needs to be
authorized.
Signed URLs are a mechanism to temporarily give a client access to a
private resource without requiring additional authorization. To achieve
this, the full request URL that should be allowed is hashed
and cryptographically signed. By using the signed URL you give it, that
one request will be considered authorized to receive the requested
content.
Generally, a signed URL can be used by anyone who has it. However, it
is usually only intended to be used by the client that was directly
given the URL. To mitigate this, they expire at a time chosen by the
issuer. To minimize the risk of a signed URL being shared, it is recommended
that the signed URL be set to expire as soon as possible.
A 128-bit secret key is used for signing the URLs.
"""
@staticmethod
def Args(parser):
"""Set up arguments for this command."""
flags.GLOBAL_BACKEND_SERVICE_ARG.AddArgument(parser)
signed_url_flags.AddCdnSignedUrlKeyName(parser, required=True)
signed_url_flags.AddCdnSignedUrlKeyFile(parser, required=True)
def Run(self, args):
"""Issues the request to add Signed URL key to the backend bucket."""
holder = base_classes.ComputeApiHolder(self.ReleaseTrack())
api_client = holder.client.apitools_client
messages = holder.client.messages
service = api_client.backendServices
backend_service_ref = flags.GLOBAL_BACKEND_SERVICE_ARG.ResolveAsResource(
args,
holder.resources,
scope_lister=compute_flags.GetDefaultScopeLister(holder.client))
key_value = files.ReadFileContents(args.key_file).rstrip()
request = messages.ComputeBackendServicesAddSignedUrlKeyRequest(
project=backend_service_ref.project,
backendService=backend_service_ref.Name(),
signedUrlKey=messages.SignedUrlKey(
keyName=args.key_name, keyValue=key_value))
operation = service.AddSignedUrlKey(request)
operation_ref = holder.resources.Parse(
operation.selfLink, collection='compute.globalOperations')
operation_poller = poller.Poller(service)
return waiter.WaitFor(operation_poller, operation_ref,
'Adding Cloud CDN Signed URL key to [{0}]'.format(
backend_service_ref.Name()))
| 42.766667 | 78 | 0.762796 |
d7adcfb877318ad9d29cd3230d140eab98e7fdfd
| 788 |
py
|
Python
|
field_permissions/forms.py
|
brunopastor/django-field-permissions
|
5e52c8b1984ca1e72ae6461e42064d783f1d0f3b
|
[
"MIT"
] | 26 |
2016-09-23T12:46:10.000Z
|
2021-12-03T11:36:34.000Z
|
field_permissions/forms.py
|
brunopastor/django-field-permissions
|
5e52c8b1984ca1e72ae6461e42064d783f1d0f3b
|
[
"MIT"
] | 2 |
2017-11-01T19:29:41.000Z
|
2018-10-10T13:33:23.000Z
|
field_permissions/forms.py
|
tiliv/django-field-permissions
|
4536037247c8a04ccb34ed9d5fa5b0d866286fbd
|
[
"MIT"
] | 16 |
2017-07-17T15:33:17.000Z
|
2021-11-22T13:14:35.000Z
|
from django import forms
class FieldPermissionFormMixin:
"""
ModelForm logic for removing fields when a user is found not to have change permissions.
"""
def __init__(self, *args, **kwargs):
user = kwargs.pop('user')
super(FieldPermissionFormMixin, self).__init__(*args, **kwargs)
model = self.Meta.model
model_field_names = [f.name for f in model._meta.get_fields()] # this might be too broad
for name in model_field_names:
if name in self.fields and not self.instance.has_field_perm(user, field=name):
self.remove_unauthorized_field(name)
def remove_unauthorized_field(self, name):
del self.fields[name]
class FieldPermissionForm(FieldPermissionFormMixin, forms.ModelForm):
pass
| 32.833333 | 97 | 0.687817 |
23b3574af032d07d4a9e7e00e719ae1163524bed
| 6,958 |
py
|
Python
|
models/system_model_v3/model/parts/eth_leveragers.py
|
trangnv/geb-simulations-h20
|
df86e1ad1ff8e98cf2c3f6025d1626d260a3b125
|
[
"MIT"
] | 7 |
2021-08-31T13:11:51.000Z
|
2022-02-10T09:05:16.000Z
|
models/system_model_v3/model/parts/eth_leveragers.py
|
trangnv/geb-simulations-h20
|
df86e1ad1ff8e98cf2c3f6025d1626d260a3b125
|
[
"MIT"
] | null | null | null |
models/system_model_v3/model/parts/eth_leveragers.py
|
trangnv/geb-simulations-h20
|
df86e1ad1ff8e98cf2c3f6025d1626d260a3b125
|
[
"MIT"
] | 8 |
2021-09-03T08:29:09.000Z
|
2021-12-04T04:20:49.000Z
|
import pandas as pd
from .utils import target_rate_to_apy
from .debt_market import is_cdp_above_liquidation_ratio
from .debt_market import wipe_to_rr_apy, draw_to_rr_apy
from .uniswap import get_output_price, get_input_price
"""
A DEFI saver kind of an agent.
The agent keeps a CDP open and keeps its liquidation ratio between
"eth_leverager_target_min_liquidity_ratio" and "eth_leverager_target_max_liquidity_ratio".
When the liquidation ratio goes out of these predefined ranges, the agent pushes the
liquidation to approximately to an average of these numbers
The purpose is to keep constant leverage on ETH. This agent believes that ETH is going
up over time (relative to RAI).
"""
def p_leverage_eth(params, substep, state_history, state):
debug = params['debug']
uniswap_state_delta = {
'RAI_delta': 0,
'ETH_delta': 0,
'UNI_delta': 0,
}
cdps = state['cdps'].copy()
if state["timestep"] == 1:
if debug:
print("Initializing ETH Leverager")
new_cdp = [{
'open': 1, # Is the CDP open or closed? True/False == 1/0 for integer/float series
'arbitrage': 0,
'time': 0, # How long the CDP has been open for
'locked': state['eth_leverager_eth_balance'],
'drawn': state['eth_leverager_rai_balance'],
'wiped': 0.0, # Principal debt wiped
'freed': 0.0, # ETH collateral freed
'w_wiped': 0.0, # Accrued interest wiped
'v_bitten': 0.0, # ETH collateral bitten (liquidated)
'u_bitten': 0.0, # Principal debt bitten
'w_bitten': 0.0, # Accrued interest bitten
'dripped': 0.0, # Total interest accrued
'owner': 'leverager' #specifies which agent code controls the cdp
}]
new_cdp = pd.DataFrame(new_cdp)
cdps = pd.concat((cdps, new_cdp), ignore_index=True)
return {"cdps": cdps, **uniswap_state_delta}
eth_price = state["eth_price"]
target_price = state["target_price"]
RAI_balance = state['RAI_balance']
ETH_balance = state['ETH_balance']
uniswap_fee = params['uniswap_fee']
rr_apy = target_rate_to_apy(state['target_rate'])
#operate only cdps that are managed by this agent
for index, cdp_at_start in cdps.query("open == 1").query("owner == 'leverager'").iterrows():
RAI_delta = 0
ETH_delta = 0
#perform actions on the SAFE only if we are above or below the threshold rates
above_min = is_cdp_above_liquidation_ratio(cdps.loc[index], eth_price, target_price,
params["eth_leverager_target_min_liquidity_ratio"])
above_max = is_cdp_above_liquidation_ratio(cdps.loc[index], eth_price, target_price,
params["eth_leverager_target_max_liquidity_ratio"])
if not above_min or above_max:
#calculate how we need to change the cdp to get the liquidation ratio to the preferred rate
preferred_ratio = (params["eth_leverager_target_min_liquidity_ratio"] + \
params["eth_leverager_target_max_liquidity_ratio"])/2
drawn_total = cdps.at[index, "drawn"] - cdps.at[index, "wiped"] - cdps.at[index, "u_bitten"]
locked_total = cdps.at[index, "locked"] - cdps.at[index, "freed"] - cdps.at[index, "v_bitten"]
p_uniswap = RAI_balance / ETH_balance
d_locked = (preferred_ratio * state['target_price'] * drawn_total - locked_total * state['eth_price']) \
/ (state['eth_price'] - preferred_ratio * state['target_price'] * p_uniswap)
d_drawn = p_uniswap * d_locked
cdp_above_liquidation_buffer = is_cdp_above_liquidation_ratio(cdps.loc[index], eth_price,
target_price, preferred_ratio)
if not cdp_above_liquidation_buffer and rr_apy > params['min_redemption_rate']:
# too low liquidation ratio, pump it higher
# unlock ETH, sell ETH for RAI, wipe debt
RAI_delta, ETH_delta = get_output_price(d_locked, RAI_balance, ETH_balance, uniswap_fee)
if params['min_redemption_rate'] <= -100 or params['kp'] == 0:
wiped = -RAI_delta
freed = ETH_delta
else:
wipe_apy = wipe_to_rr_apy(params['min_redemption_rate'], ETH_balance, RAI_balance, eth_price, state, params)
wiped = min(-RAI_delta, wipe_apy)
RAI_delta, ETH_delta = get_input_price(-wiped, RAI_balance, ETH_balance, uniswap_fee)
freed = ETH_delta
assert d_locked <= 0 # - ETH_delta
# Make sure that no balance goes negative and then perform the swap if possible.
# The swaps can go negative if uniswap lacks liquidity
if freed <= locked_total and wiped <= drawn_total and wiped < RAI_balance:
cdps.at[index, "freed"] = cdps.at[index, "freed"] + freed
cdps.at[index, "wiped"] = cdps.at[index, "wiped"] + wiped
# update uniswap
uniswap_state_delta['ETH_delta'] += freed
uniswap_state_delta['RAI_delta'] -= wiped
ETH_balance += freed
RAI_balance -= wiped
elif cdp_above_liquidation_buffer and rr_apy < params['max_redemption_rate']:
# too high liquidation ratio, dump it lower
# draw debt, sell RAI for ETH, lock ETH
RAI_delta, ETH_delta = get_input_price(d_drawn, RAI_balance, ETH_balance, uniswap_fee)
if params['max_redemption_rate'] == float("inf") or params['kp'] == 0:
drawn = RAI_delta
locked = -ETH_delta
else:
draw_apy = draw_to_rr_apy(params['max_redemption_rate'], ETH_balance, RAI_balance, eth_price, state, params)
drawn = min(RAI_delta, draw_apy)
RAI_delta, ETH_delta = get_input_price(drawn, RAI_balance, ETH_balance, uniswap_fee)
locked = -ETH_delta
assert d_locked >= 0
# Make sure that no balance goes negative and then perform the swap if possible.
# The swaps can go negative if uniswap lacks liquidity
if locked <= ETH_balance:
cdps.at[index, "locked"] = cdps.at[index, "locked"] + locked
cdps.at[index, "drawn"] = cdps.at[index, "drawn"] + drawn
# update uniswap
uniswap_state_delta['ETH_delta'] -= locked
uniswap_state_delta['RAI_delta'] += drawn
ETH_balance -= locked
RAI_balance += drawn
return {"cdps": cdps, **uniswap_state_delta}
if __name__ == "__main__":
import doctest
doctest.testmod()
| 46.386667 | 128 | 0.610808 |
86b3b682945b9b7f83aa83501bff6a82dbe481ab
| 13,770 |
py
|
Python
|
enso/contrib/scriptotron/tracker.py
|
blackdaemon/enso-launcher-continued
|
346f82811e77caf73560619cdeb16afabfbf1fce
|
[
"BSD-3-Clause"
] | 7 |
2015-09-19T20:57:32.000Z
|
2020-12-31T16:34:42.000Z
|
enso/contrib/scriptotron/tracker.py
|
blackdaemon/enso-launcher-continued
|
346f82811e77caf73560619cdeb16afabfbf1fce
|
[
"BSD-3-Clause"
] | 21 |
2015-11-03T23:15:25.000Z
|
2018-10-11T21:57:45.000Z
|
enso/contrib/scriptotron/tracker.py
|
blackdaemon/enso-launcher-continued
|
346f82811e77caf73560619cdeb16afabfbf1fce
|
[
"BSD-3-Clause"
] | 4 |
2015-09-15T17:18:00.000Z
|
2021-06-16T07:06:06.000Z
|
# Copyright (c) 2008, Humanized, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of Enso nor the names of its contributors may
# be used to endorse or promote products derived from this
# software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY Humanized, Inc. ``AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL Humanized, Inc. BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# ----------------------------------------------------------------------------
#
# enso
#
# ----------------------------------------------------------------------------
__updated__ = "2018-07-12"
import logging
import os
import re
import time
import types
from os.path import basename
import enso.config
import enso.system
from enso.commands.manager import CommandAlreadyRegisteredError
from enso.contrib.scriptotron import (
adapters,
cmdretriever,
concurrency,
ensoapi,
)
from enso.contrib.scriptotron.events import EventResponderList
from enso.contrib.scriptotron.tracebacks import TracebackCommand, safetyNetted
from enso.messages import MessageManager, displayMessage as display_xml_message
from enso.platform import PlatformUnsupportedError
from enso.utils import do_once
# This may no longer be required (it was for backward compat)
SCRIPTS_FILE_NAME = os.path.expanduser("~/.ensocommands")
# IGNORE:E1101 @UndefinedVariable Keep PyLint and PyDev happy
# IGNORE:E1101 @UndefinedVariable Keep PyLint and PyDev happy
_SCRIPTS_FOLDER_NAME = enso.system.SPECIALFOLDER_ENSOCOMMANDS # @UndefinedVariable
# String to search for in the file to determine if it contains any command definitions
COMMAND_FILE_CHECK = re.compile(
r"^def %s[a-zA-Z0-9]|class [a-zA-Z0-9_]+\(CommandObject\):" % cmdretriever.SCRIPT_PREFIX,
re.MULTILINE)
class ScriptCommandTracker(object):
def __init__(self, commandManager, eventManager):
self._cmdExprs = []
self._cmdMgr = commandManager
self._genMgr = concurrency.GeneratorManager(eventManager)
self._quasimodeStartEvents = EventResponderList(
eventManager,
"startQuasimode",
self._onQuasimodeStart
)
self._textModifiedEvents = EventResponderList(
eventManager,
"textModified",
self._onTextModified
)
@safetyNetted
def _callHandler(self, handler, *args, **kwargs):
assert logging.debug("calling handler %s", handler.__name__) or True
result = handler(*args, **kwargs)
if isinstance(result, types.GeneratorType):
self._genMgr.add(result)
def _onQuasimodeStart(self):
perf = []
for cmdName, handler in self._quasimodeStartEvents:
_ = cmdName
started = time.time()
self._callHandler(handler)
elapsed = time.time() - started
perf.append((handler, [], [], elapsed))
return perf
def _onTextModified(self, keyCode, oldText, newText, quasimodeId=0):
perf = []
for cmdName, handler in self._textModifiedEvents:
if not newText.startswith(cmdName + " "):
continue
oldText = oldText[len(cmdName) + 1:]
newText = newText[len(cmdName) + 1:]
started = time.time()
try:
self._callHandler(
handler, keyCode, oldText, newText, quasimodeId=quasimodeId)
except Exception:
logging.error(
"onTextModified handler is missing quasimodeId parameter: %s" % cmdName)
self._callHandler(handler, keyCode, oldText, newText)
elapsed = time.time() - started
perf.append((handler, [], [], elapsed))
return perf
def _registerCommand(self, cmdObj, cmdExpr):
try:
self._cmdMgr.registerCommand(cmdExpr, cmdObj)
self._cmdExprs.append(cmdExpr)
except CommandAlreadyRegisteredError:
logging.warn("Command already registered: %s" % cmdExpr)
def registerNewCommands(self, commandInfoList):
for info in commandInfoList:
if hasattr(info["func"], "on_quasimode_start"):
self._quasimodeStartEvents[info["cmdName"]] = info[
"func"].on_quasimode_start
if hasattr(info["func"], "on_text_modified"):
self._textModifiedEvents[info["cmdName"]] = info[
"func"].on_text_modified
cmd = adapters.makeCommandFromInfo(
info,
ensoapi.EnsoApi(),
self._genMgr
)
self._registerCommand(cmd, info["cmdExpr"])
def clearCommands(self, commandInfoList=None):
if commandInfoList:
for info in commandInfoList:
if hasattr(info["func"], "on_quasimode_start"):
del self._quasimodeStartEvents[info["cmdName"]]
if hasattr(info["func"], "on_text_modified"):
del self._textModifiedEvents[info["cmdName"]]
# Both below can fail and it should be tolerated
try:
self._cmdMgr.unregisterCommand(info["cmdExpr"])
except RuntimeError:
logging.warn("Error unregistering command '%s'" % info["cmdExpr"])
try:
del self._cmdExprs[self._cmdExprs.index(info["cmdExpr"])]
except Exception:
logging.warn("Error deleting command '%s'" % info["cmdExpr"])
# FIXME: remove generator from _genMgr
else:
for cmdExpr in self._cmdExprs:
try:
self._cmdMgr.unregisterCommand(cmdExpr)
except RuntimeError as e:
print e, cmdExpr
self._cmdExprs = []
self._quasimodeStartEvents[:] = []
self._genMgr.reset()
class ScriptTracker(object):
def __init__(self, eventManager, commandManager):
self._firstRun = True
self._scriptCmdTracker = ScriptCommandTracker(commandManager,
eventManager)
self._scriptFilename = SCRIPTS_FILE_NAME
self._scriptFolder = getScriptsFolderName()
self._lastMods = {}
self._registerDependencies()
self._commandsInFile = {}
# Call it now, otherwise there is a delay on first quasimode invocation
self._updateScripts()
eventManager.registerResponder(
self._updateScripts,
"startQuasimode"
)
commandManager.registerCommand(TracebackCommand.NAME,
TracebackCommand())
@classmethod
def install(cls, eventManager, commandManager):
cls(eventManager, commandManager)
@staticmethod
@safetyNetted
def _getGlobalsFromSourceCode(text, filename):
allGlobals = {}
code = compile(text + "\n", filename, "exec")
try:
exec code in allGlobals
except PlatformUnsupportedError as e:
logging.warning(
"Command '%s' is not supported on this platform%s"
% (
basename(filename),
" (%s)." % str(e) if str(e) else "."
)
)
return None
return allGlobals
def _getCommandFiles(self):
try:
# Get all *.py files, except those not valid for current platform, example:
# example.windows.py, example.linux.py, example.osx.py
commandFiles = [
os.path.join(self._scriptFolder, x)
for x in os.listdir(self._scriptFolder)
if x.endswith(".py")
]
except:
commandFiles = []
return commandFiles
def _reloadPyScripts(self, files=None):
if files:
for file_name in files:
cmd_infos = self._commandsInFile.get(file_name, None)
if cmd_infos:
self._scriptCmdTracker.clearCommands(cmd_infos)
else:
self._scriptCmdTracker.clearCommands()
commandFiles = [self._scriptFilename]
if files:
commandFiles = commandFiles + files
else:
commandFiles = commandFiles + self._getCommandFiles()
assert logging.debug(commandFiles) or True
for file_name in commandFiles:
try:
with open(file_name, "r") as fd:
file_contents = fd.read().replace('\r\n', '\n') + "\n"
except IOError as e:
if file_name == SCRIPTS_FILE_NAME:
do_once(
logging.warning,
"Legacy script file %s not found" % SCRIPTS_FILE_NAME
)
else:
logging.error(e)
continue
except Exception as e:
logging.error(e)
continue
# Do not bother to parse files which does not contain command definitions
if not COMMAND_FILE_CHECK.search(file_contents):
logging.warning(
"Skipping file %s as it does not contain any command definitions",
file_name)
continue
allGlobals = self._getGlobalsFromSourceCode(
file_contents,
file_name
)
if allGlobals is not None:
infos = cmdretriever.getCommandsFromObjects(allGlobals)
self._scriptCmdTracker.registerNewCommands(infos)
self._registerDependencies(allGlobals)
self._commandsInFile[file_name] = infos
logging.info(
"Scriptotron registered commands from '%s': [%s]" %
(basename(file_name), ", ".join(info["cmdName"] for info in infos))
)
def _registerDependencies(self, allGlobals=None):
baseDeps = [self._scriptFilename] + self._getCommandFiles()
if allGlobals:
# Find any other files that the script may have executed
# via execfile().
extraDeps = [
obj.func_code.co_filename
for obj in allGlobals.values()
if ((hasattr(obj, "__module__")) and
(obj.__module__ is None) and
(hasattr(obj, "func_code")))
]
else:
extraDeps = []
self._fileDependencies = list(set(baseDeps + extraDeps))
def _updateScripts(self):
filesToReload = {}
for fileName in self._fileDependencies:
if os.path.isfile(fileName):
lastMod = os.path.getmtime(fileName)
if lastMod != self._lastMods.get(fileName, 0):
self._lastMods[fileName] = lastMod
filesToReload[fileName] = lastMod
for fileName in self._getCommandFiles():
if fileName not in self._fileDependencies:
self._fileDependencies.append(fileName)
self._lastMods[fileName] = os.path.getmtime(fileName)
filesToReload[fileName] = lastMod
if filesToReload:
if not self._firstRun:
display_xml_message(
u"<p>Reloading commands, please wait...</p><caption>enso</caption>")
# TODO: This can be enabled after issues in clearCommands are
# solved...
self._reloadPyScripts(filesToReload.keys())
# self._reloadPyScripts()
if not self._firstRun:
# Force primary-message to disappear
MessageManager.get().finishPrimaryMessage()
# Display mini message with result
display_xml_message(
u"<p>Commands have been reloaded.</p><caption>enso</caption>",
primaryMsg=False, miniMsg=True, miniWaitTime=10)
if self._firstRun:
self._firstRun = False
def getScriptsFolderName():
if hasattr(enso.config, "SCRIPTS_FOLDER_NAME"):
if os.path.isdir(enso.config.SCRIPTS_FOLDER_NAME): # IGNORE:E1101
return enso.config.SCRIPTS_FOLDER_NAME # IGNORE:E1101
else:
raise Exception("enso.config.SCRIPTS_FOLDER_NAME is not valid folder: \"%s\""
% enso.config.SCRIPTS_FOLDER_NAME) # IGNORE:E1101
else:
if not os.path.isdir(_SCRIPTS_FOLDER_NAME):
os.makedirs(_SCRIPTS_FOLDER_NAME)
return _SCRIPTS_FOLDER_NAME
| 39.568966 | 93 | 0.594553 |
db6393ee5e487342e49c57389330b3e3dcf21faf
| 7,614 |
py
|
Python
|
workflow/scripts/rm_orphan_pe_bam.py
|
AnnaLorenc/ATACseq
|
19d1e48da2022fa75837d400e5165050bd131787
|
[
"MIT"
] | 19 |
2020-04-15T14:52:36.000Z
|
2022-03-13T06:50:49.000Z
|
workflow/scripts/rm_orphan_pe_bam.py
|
AnnaLorenc/ATACseq
|
19d1e48da2022fa75837d400e5165050bd131787
|
[
"MIT"
] | 5 |
2020-05-06T18:10:56.000Z
|
2022-02-03T23:43:19.000Z
|
workflow/scripts/rm_orphan_pe_bam.py
|
AnnaLorenc/ATACseq
|
19d1e48da2022fa75837d400e5165050bd131787
|
[
"MIT"
] | 16 |
2020-10-20T09:05:31.000Z
|
2021-07-16T11:30:25.000Z
|
#!/usr/bin/env python
# source: https://github.com/nf-core/chipseq/blob/master/bin/bampe_rm_orphan.py
# MIT License
#
# Copyright (c) Philip Ewels
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
###############################################################################
#!# own changes and adjustments for snakemake-workflow chipseq are marked with "#!# AVI: " in comment
###############################################################################
###############################################################################
## Created on February 1st 2017 to remove singletons from paired-end BAM file
###############################################################################
###############################################################################
import os
import pysam
import argparse
############################################
############################################
## PARSE ARGUMENTS
############################################
############################################
Description = 'Remove singleton reads from paired-end BAM file i.e if read1 is present in BAM file without read 2 and vice versa.'
Epilog = """Example usage: bampe_rm_orphan.py <BAM_INPUT_FILE> <BAM_OUTPUT_FILE>"""
argParser = argparse.ArgumentParser(description=Description, epilog=Epilog)
## REQUIRED PARAMETERS
argParser.add_argument('BAM_INPUT_FILE', help="Input BAM file sorted by name.")
argParser.add_argument('BAM_OUTPUT_FILE', help="Output BAM file sorted by name.")
## OPTIONAL PARAMETERS
argParser.add_argument('-fr', '--only_fr_pairs', dest="ONLY_FR_PAIRS", help="Only keeps pairs that are in FR orientation on same chromosome.",action='store_true')
args = argParser.parse_args()
############################################
############################################
## HELPER FUNCTIONS
############################################
############################################
def makedir(path):
if not len(path) == 0:
try:
#!# AVI: changed because of race conditions if directory exists, original code: os.makedirs(path)
os.makedirs(path, exist_ok=True)
except OSError as exception:
if exception.errno != errno.EEXIST:
raise
############################################
############################################
## MAIN FUNCTION
############################################
############################################
def bampe_rm_orphan(BAMIn,BAMOut,onlyFRPairs=False):
## SETUP DIRECTORY/FILE STRUCTURE
OutDir = os.path.dirname(BAMOut)
makedir(OutDir)
## COUNT VARIABLES
totalReads = 0; totalOutputPairs = 0; totalSingletons = 0; totalImproperPairs = 0
## ITERATE THROUGH BAM FILE
EOF = 0
SAMFin = pysam.AlignmentFile(BAMIn,"rb") #!# AVI: changed to new API from pysam.Samfile
SAMFout = pysam.AlignmentFile(BAMOut, "wb",header=SAMFin.header) #!# AVI: changed to new API from pysam.Samfile
currRead = next(SAMFin) #!# AVI: adapted for the use of the iterator, original code: currRead = SAMFin.next()
for read in SAMFin.fetch(until_eof=True): #!# AVI: added .fetch() to explicitly use new API
totalReads += 1
if currRead.qname == read.qname:
pair1 = currRead; pair2 = read
## FILTER FOR READS ON SAME CHROMOSOME IN FR ORIENTATION
if onlyFRPairs:
if pair1.tid == pair2.tid:
## READ1 FORWARD AND READ2 REVERSE STRAND
if not pair1.is_reverse and pair2.is_reverse:
if pair1.reference_start <= pair2.reference_start:
totalOutputPairs += 1
SAMFout.write(pair1)
SAMFout.write(pair2)
else:
totalImproperPairs += 1
## READ1 REVERSE AND READ2 FORWARD STRAND
elif pair1.is_reverse and not pair2.is_reverse:
if pair2.reference_start <= pair1.reference_start:
totalOutputPairs += 1
SAMFout.write(pair1)
SAMFout.write(pair2)
else:
totalImproperPairs += 1
else:
totalImproperPairs += 1
else:
totalImproperPairs += 1
else:
totalOutputPairs += 1
SAMFout.write(pair1)
SAMFout.write(pair2)
## RESET COUNTER
try:
totalReads += 1
currRead = next(SAMFin) #!# AVI: adapted for the use of the iterator, original code: currRead = SAMFin.next()
except:
StopIteration
EOF = 1
## READS WHERE ONLY ONE OF A PAIR IS IN FILE
else:
totalSingletons += 1
pair1 = currRead
currRead = read
if not EOF:
totalReads += 1
totalSingletons += 1
pair1 = currRead
## CLOSE ALL FILE HANDLES
SAMFin.close()
SAMFout.close()
LogFile = os.path.join(OutDir,'%s_bampe_rm_orphan.log' % (os.path.basename(BAMOut[:-4])))
SamLogFile = open(LogFile,'w')
SamLogFile.write('\n##############################\n')
SamLogFile.write('FILES/DIRECTORIES')
SamLogFile.write('\n##############################\n\n')
SamLogFile.write('Input File: ' + BAMIn + '\n')
SamLogFile.write('Output File: ' + BAMOut + '\n')
SamLogFile.write('\n##############################\n')
SamLogFile.write('OVERALL COUNTS')
SamLogFile.write('\n##############################\n\n')
SamLogFile.write('Total Input Reads = ' + str(totalReads) + '\n')
SamLogFile.write('Total Output Pairs = ' + str(totalOutputPairs) + '\n')
SamLogFile.write('Total Singletons Excluded = ' + str(totalSingletons) + '\n')
SamLogFile.write('Total Improper Pairs Excluded = ' + str(totalImproperPairs) + '\n')
SamLogFile.write('\n##############################\n')
SamLogFile.close()
############################################
############################################
## RUN FUNCTION
############################################
############################################
bampe_rm_orphan(BAMIn=args.BAM_INPUT_FILE,BAMOut=args.BAM_OUTPUT_FILE,onlyFRPairs=args.ONLY_FR_PAIRS)
############################################
############################################
############################################
############################################
| 40.935484 | 162 | 0.51471 |
d107a261dfe9e54d46ef322437fe421038e40c9f
| 7,050 |
py
|
Python
|
moai/export/local/rendered_mesh.py
|
ai-in-motion/moai
|
e38cac046c059d2e2331ef4883bbabc5a500a5cf
|
[
"Apache-2.0"
] | 10 |
2021-04-02T11:21:33.000Z
|
2022-01-18T18:32:32.000Z
|
moai/export/local/rendered_mesh.py
|
ai-in-motion/moai
|
e38cac046c059d2e2331ef4883bbabc5a500a5cf
|
[
"Apache-2.0"
] | 1 |
2022-03-22T20:10:55.000Z
|
2022-03-24T13:11:02.000Z
|
moai/export/local/rendered_mesh.py
|
ai-in-motion/moai
|
e38cac046c059d2e2331ef4883bbabc5a500a5cf
|
[
"Apache-2.0"
] | 3 |
2021-05-16T20:47:40.000Z
|
2021-12-01T21:15:36.000Z
|
from moai.export.local.image2d import Image2d
from moai.monads.execution.cascade import _create_accessor
import torch
import pyrender
import typing
import logging
import numpy as np
import math
import trimesh
import itertools
from PIL import Image
log = logging.getLogger(__name__)
__all__ = ["RenderedMesh"]
class RenderedMesh(Image2d):
def __init__(self,
path: str,
vertices: typing.Union[str, typing.Sequence[str]],
faces: typing.Union[str, typing.Sequence[str]],
image: typing.Union[str, typing.Sequence[str]],
colormap: typing.Union[str, typing.Sequence[str]],
transform: typing.Union[str, typing.Sequence[str]],
translation: typing.Union[str, typing.Sequence[str]]=None,
rotation: typing.Union[str, typing.Sequence[str]]=None,
focal_length: typing.Union[float, typing.Tuple[float, float]]=5000.0,
extension: typing.Union[str, typing.Sequence[str]]=["png"], # jpg or png or exr
scale: float=1.0,
batch_percentage: float=1.0,
):
super(RenderedMesh, self).__init__(
path=path, image=image, extension=extension,
type=list(itertools.repeat('color', len([vertices] if isinstance(vertices, str) else vertices))),
transform=transform, batch_percentage=batch_percentage,
colormap=colormap,
)
self.focal_length = (float(focal_length), float(focal_length)) \
if isinstance(focal_length, float) or isinstance(focal_length, int) else focal_length
self.material = pyrender.MetallicRoughnessMaterial(
metallicFactor=0.0, alphaMode='OPAQUE', baseColorFactor=(1.0, 1.0, 0.9, 1.0)
)
self.vertices = [vertices] if isinstance(vertices, str) else list(vertices)
self.vertices = [_create_accessor(k) for k in self.vertices]
self.faces = [faces] if isinstance(faces, str) else list(faces)
self.faces = [_create_accessor(k) for k in self.faces]
self.scene = pyrender.Scene(
bg_color=[0.0, 0.0, 0.0, 0.0],
ambient_light=(0.3, 0.3, 0.3)
)
for light in self._create_raymond_lights():
self.scene.add_node(light)
self.translation = list(itertools.repeat('', len(self.keys)) if translation is None else\
([translation] if isinstance(translation, str) else list(translation)))
self.rotation = list(itertools.repeat('', len(self.keys)) if rotation is None else\
([rotation] if isinstance(rotation, str) else list(rotation)))
self.scale = scale
self.renderer = None
def _get_renderer(self, width: int, height: int) -> pyrender.OffscreenRenderer:
if self.renderer is None or self.renderer.viewport_width != width\
or self.renderer.viewport_height != height:
self.renderer = pyrender.OffscreenRenderer(
viewport_width=width, viewport_height=height, point_size=1.0
)
return self.renderer
def __call__(self, tensors: typing.Dict[str, torch.Tensor]) -> None:
for v, f, r, t, k, _, tf, c, f in zip(
self.vertices, self.faces, self.rotation, self.translation,
self.keys, self.types, self.transforms, self.colormaps, self.formats
):
take = int(math.ceil(self.batch_percentage * tensors[k].shape[0]))
background = self.colorize_map[c](
self.transform_map[tf](tensors, k, take)
)
b, c, h, w = background.shape
renderer = self._get_renderer(width=w, height=h)
results = []
for i in range(b):
rotation = tensors[r][i].detach().cpu().numpy().squeeze() if r else np.eye(3)
translation = tensors[t][i].detach().cpu().numpy().squeeze() if t else np.zeros(3)
tmesh = trimesh.Trimesh(
v(tensors).detach().cpu().numpy().squeeze(),
f(tensors).detach().cpu().numpy().squeeze(),
process=False
)
rot = trimesh.transformations.rotation_matrix(np.radians(180), [1, 0, 0])
tmesh.apply_transform(rot)
mesh = pyrender.Mesh.from_trimesh(tmesh, material=self.material)
node = self.scene.add(mesh, 'mesh')
# Equivalent to 180 degrees around the y-axis. Transforms the fit to
# OpenGL compatible coordinate system.
translation[0] *= -1.0
camera_pose = np.eye(4)
camera_pose[:3, :3] = rotation
camera_pose[:3, 3] = translation
camera = pyrender.camera.IntrinsicsCamera(
fx=self.focal_length[0], cx=w // 2,
fy=self.focal_length[1], cy=h // 2,
)
cam = self.scene.add(camera, pose=camera_pose)
color, _ = renderer.render(self.scene, flags=pyrender.RenderFlags.RGBA)
color = color.astype(np.float32) / 255.0
valid_mask = (color[:, :, -1] > 0)[:, :, np.newaxis]
input_img = background.detach().cpu().numpy().squeeze().transpose(1, 2, 0)
output_img = (color[:, :, :-1] * valid_mask + (1 - valid_mask) * input_img)
if self.scale != 1.0:
output_img = np.array(
Image.fromarray(
(output_img * 255.0).astype(np.uint8)
).resize(
(int(w * self.scale), int(h * self.scale)), Image.ANTIALIAS
)
)
results.append(output_img)
self.scene.remove_node(node)
self.scene.remove_node(cam)
self.save_map['color'](
np.stack(results).transpose(0, 3, 1, 2),
f"{k}_overlay", self.index, f
)
self.index = 0 if self.mode == "overwrite" else self.index + b
def _create_raymond_lights(self):
thetas = np.pi * np.array([1.0 / 6.0, 1.0 / 6.0, 1.0 / 6.0])
phis = np.pi * np.array([0.0, 2.0 / 3.0, 4.0 / 3.0])
nodes = []
for phi, theta in zip(phis, thetas):
xp = np.sin(theta) * np.cos(phi)
yp = np.sin(theta) * np.sin(phi)
zp = np.cos(theta)
z = np.array([xp, yp, zp])
z = z / np.linalg.norm(z)
x = np.array([-z[1], z[0], 0.0])
if np.linalg.norm(x) == 0:
x = np.array([1.0, 0.0, 0.0])
x = x / np.linalg.norm(x)
y = np.cross(z, x)
matrix = np.eye(4)
matrix[:3,:3] = np.c_[x,y,z]
nodes.append(pyrender.Node(
light=pyrender.DirectionalLight(color=np.ones(3), intensity=1.0),
matrix=matrix
))
return nodes
| 44.339623 | 109 | 0.54766 |
70763b25617a9ef03a6d9f3855387b41dd80a448
| 1,084 |
py
|
Python
|
rdpyg/draw/pyopengl/paths.py
|
illume/eyestabs
|
9ce717743a6a4fe7b561c68599e9352da3acf080
|
[
"Unlicense"
] | null | null | null |
rdpyg/draw/pyopengl/paths.py
|
illume/eyestabs
|
9ce717743a6a4fe7b561c68599e9352da3acf080
|
[
"Unlicense"
] | null | null | null |
rdpyg/draw/pyopengl/paths.py
|
illume/eyestabs
|
9ce717743a6a4fe7b561c68599e9352da3acf080
|
[
"Unlicense"
] | null | null | null |
from OpenGL.GL import glBegin, GL_LINES, glVertex3f, glEnd, glPushAttrib, GL_ALL_ATTRIB_BITS, glMatrixMode, GL_MODELVIEW, glLoadIdentity, glScale, glTranslatef, glPopAttrib
from OpenGL.GLUT import glutSolidTeapot
def draw_path(a_path):
""" draws a path with opengl lines.
"""
glBegin(GL_LINES)
try:
last = 0
for x,y,z in a_path.points:
if last:
glVertex3f(lx,ly,lz)
glVertex3f(x,y,z)
lx, ly, lz = x,y,z
else:
last = 1
lx, ly, lz = x,y,z
finally:
glEnd()
def draw_path_with_traveler(a_path):
""" draws a box where the position of the traveler is on the path.
"""
#TODO: speed this up. take out the scale as well.
draw_path(a_path)
# Get where the traveler is, and draw something there.
x,y,z = a_path.Where()
##glPushAttrib(GL_ALL_ATTRIB_BITS)
##glMatrixMode(GL_MODELVIEW)
##glLoadIdentity()
##glScale(0.2, 0.2, 0.2)
glTranslatef(x,y,z)
# now draw a teapot there.
glutSolidTeapot(0.1)
glTranslatef(-x,-y,-z)
##glPopAttrib()
| 15.485714 | 172 | 0.633764 |
7d5aeaf8c411ef1af39c896f2901406ab897e754
| 1,080 |
py
|
Python
|
services/migrations/0024_parler_field_type_change.py
|
City-of-Helsinki/opencity-profile
|
a430b562b9937f443d391475fabdc27068b95c49
|
[
"MIT"
] | null | null | null |
services/migrations/0024_parler_field_type_change.py
|
City-of-Helsinki/opencity-profile
|
a430b562b9937f443d391475fabdc27068b95c49
|
[
"MIT"
] | null | null | null |
services/migrations/0024_parler_field_type_change.py
|
City-of-Helsinki/opencity-profile
|
a430b562b9937f443d391475fabdc27068b95c49
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.2.11 on 2022-01-13 12:18
import django.db.models.deletion
import parler.fields
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
("services", "0023_change_ordering__noop"),
]
operations = [
migrations.AlterField(
model_name="alloweddatafieldtranslation",
name="master",
field=parler.fields.TranslationsForeignKey(
editable=False,
null=True,
on_delete=django.db.models.deletion.CASCADE,
related_name="translations",
to="services.alloweddatafield",
),
),
migrations.AlterField(
model_name="servicetranslation",
name="master",
field=parler.fields.TranslationsForeignKey(
editable=False,
null=True,
on_delete=django.db.models.deletion.CASCADE,
related_name="translations",
to="services.service",
),
),
]
| 28.421053 | 60 | 0.564815 |
7df746e3d44d5a3da77726b436147b566721958e
| 541 |
py
|
Python
|
backend/post/migrations/0012_auto_20191003_1840.py
|
VarunDev2000/CTF
|
1cc296cb18a3ec08ef198b47b3ac6980d66bef71
|
[
"bzip2-1.0.6"
] | null | null | null |
backend/post/migrations/0012_auto_20191003_1840.py
|
VarunDev2000/CTF
|
1cc296cb18a3ec08ef198b47b3ac6980d66bef71
|
[
"bzip2-1.0.6"
] | null | null | null |
backend/post/migrations/0012_auto_20191003_1840.py
|
VarunDev2000/CTF
|
1cc296cb18a3ec08ef198b47b3ac6980d66bef71
|
[
"bzip2-1.0.6"
] | null | null | null |
# Generated by Django 2.0 on 2019-10-03 13:10
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('post', '0011_remove_posts_createdat'),
]
operations = [
migrations.AlterField(
model_name='posts',
name='user',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='posts', to=settings.AUTH_USER_MODEL),
),
]
| 25.761905 | 143 | 0.667283 |
b64ceb4273cc06d4eefffd113b40c2f37aa3ca3e
| 3,260 |
py
|
Python
|
ymir/backend/src/ymir_controller/tests/unit/test_invoker_cmd_branch_commit.py
|
Zhang-SJ930104/ymir
|
dd6481be6f229ade4cf8fba64ef44a15357430c4
|
[
"Apache-2.0"
] | 64 |
2021-11-15T03:48:00.000Z
|
2022-03-25T07:08:46.000Z
|
ymir/backend/src/ymir_controller/tests/unit/test_invoker_cmd_branch_commit.py
|
Zhang-SJ930104/ymir
|
dd6481be6f229ade4cf8fba64ef44a15357430c4
|
[
"Apache-2.0"
] | 35 |
2021-11-23T04:14:35.000Z
|
2022-03-26T09:03:43.000Z
|
ymir/backend/src/ymir_controller/tests/unit/test_invoker_cmd_branch_commit.py
|
Aryalfrat/ymir
|
d4617ed00ef67a77ab4e1944763f608bface4be6
|
[
"Apache-2.0"
] | 57 |
2021-11-11T10:15:40.000Z
|
2022-03-29T07:27:54.000Z
|
import logging
import os
import shutil
import unittest
from unittest import mock
from google.protobuf.json_format import MessageToDict, ParseDict
import tests.utils as test_utils
from controller.utils.invoker_call import make_invoker_cmd_call
from controller.utils.invoker_mapping import RequestTypeToInvoker
from proto import backend_pb2
RET_ID = 'commit t000aaaabbbbbbzzzzzzzzzzzzzzz3\nabc'
class TestInvokerCommitBranch(unittest.TestCase):
def __init__(self, methodName: str) -> None:
# dir structure:
# test_involer_CLSNAME_sandbox_root
# ├── media_storage_root
# └── test_user
# └── ymir-dvc-test
super().__init__(methodName=methodName)
self._user_name = "user"
self._mir_repo_name = "repoid"
self._storage_name = "media_storage_root"
self._task_id = 't000aaaabbbbbbzzzzzzzzzzzzzzz5'
self._commit_message = 't000aaaabbbbbbzzzzzzzzzzzzzzz4'
self._sandbox_root = test_utils.dir_test_root(self.id().split(".")[-3:])
self._user_root = os.path.join(self._sandbox_root, self._user_name)
self._mir_repo_root = os.path.join(self._user_root, self._mir_repo_name)
self._storage_root = os.path.join(self._sandbox_root, self._storage_name)
def setUp(self):
test_utils.check_commands()
self._prepare_dirs()
self._prepare_mir_repo()
logging.info("preparing done.")
def tearDown(self):
if os.path.isdir(self._sandbox_root):
shutil.rmtree(self._sandbox_root)
pass
# custom: env prepare
def _prepare_dirs(self):
if os.path.isdir(self._sandbox_root):
logging.info("sandbox root exists, remove it first")
shutil.rmtree(self._sandbox_root)
os.makedirs(self._sandbox_root)
os.mkdir(self._user_root)
os.mkdir(self._mir_repo_root)
os.mkdir(self._storage_root)
def _prepare_mir_repo(self):
# init repo
test_utils.mir_repo_init(self._mir_repo_root)
# prepare branch a
def _mock_run_func(*args, **kwargs):
ret = type('', (), {})()
ret.returncode = 0
ret.stdout = RET_ID
return ret
@mock.patch("subprocess.run", side_effect=_mock_run_func)
def test_invoker_00(self, mock_run):
response = make_invoker_cmd_call(invoker=RequestTypeToInvoker[backend_pb2.CMD_COMMIT],
sandbox_root=self._sandbox_root,
req_type=backend_pb2.CMD_COMMIT,
user_id=self._user_name,
repo_id=self._mir_repo_name,
task_id=self._task_id,
commit_message=self._commit_message)
print(MessageToDict(response))
expected_cmd = "mir commit --root {0} -m {1}".format(self._mir_repo_root, self._commit_message)
mock_run.assert_called_once_with(expected_cmd.split(' '), capture_output=True, text=True)
expected_ret = backend_pb2.GeneralResp()
expected_dict = {'message': RET_ID}
ParseDict(expected_dict, expected_ret)
self.assertEqual(response, expected_ret)
| 37.906977 | 103 | 0.647239 |
7906810ca3e53172179e0508842ca7cdc2d85ad2
| 4,110 |
py
|
Python
|
pyyolo/utils.py
|
isarandi/pyyolo
|
0f26210fd72f7ce973b34d51b6a38b5dd0f57115
|
[
"Apache-2.0"
] | null | null | null |
pyyolo/utils.py
|
isarandi/pyyolo
|
0f26210fd72f7ce973b34d51b6a38b5dd0f57115
|
[
"Apache-2.0"
] | null | null | null |
pyyolo/utils.py
|
isarandi/pyyolo
|
0f26210fd72f7ce973b34d51b6a38b5dd0f57115
|
[
"Apache-2.0"
] | null | null | null |
"""
File name: utils
Author: rameshpr
Date: 11/5/18
"""
import numpy as np
from ctypes import *
from typing import List, Tuple
import cv2
from pyyolo.darknet import c_array, IMAGE, METADATA, predict_image, get_network_boxes, \
do_nms_obj, do_nms_sort, free_image, free_detections, ndarray_image
import pyyolo.darknet
from pyyolo.yolo_data import BBox, YoloData
def load_image(filename, flags=None):
# type: (str, int) -> IMAGE
"""
This will call cv2.imread() with the given arguments and convert
the resulting numpy array to a darknet image
:param filename: Image file name
:param flags: imread flags
:return: Given image file as a darknet image
:rtype: IMAGE
"""
image = cv2.imread(filename, flags)
return array_to_image(image)
def array_to_image(arr):
# type: (np.ndarray) -> IMAGE
"""
Given image with numpy array will be converted to
darkent image
Remember to call free_image(im) function after using this image
:rtype: IMAGE
:param arr: numpy array
:return: darknet image
"""
data = arr.ctypes.data_as(POINTER(c_ubyte))
im = ndarray_image(data, arr.ctypes.shape, arr.ctypes.strides)
return im
def classify(net, meta, im):
# type: (object, METADATA, IMAGE) -> Tuple[str, float]
out = predict_image(net, im)
res = []
for i in range(meta.classes):
res.append((meta.names[i], out[i]))
res = sorted(res, key=lambda x: -x[1])
return res
def detect(net, meta, im, thresh=.2, hier_thresh=0, nms=.4):
# type: (object, METADATA, IMAGE, float, float, float) -> List[YoloData]
"""
Detect the objects in the given image. free_image function is called inside this function.
Therefore the input darkent image is not usable after calling this function.
:param net:
:param meta:
:param im:
:param thresh:
:param hier_thresh:
:param nms:
:return:
"""
num = c_int(0)
pnum = pointer(num)
predict_image(net, im)
dets = get_network_boxes(net, im.w, im.h, thresh, hier_thresh, None, 0, pnum)
num = pnum[0]
if nms:
do_nms_sort(dets, num, meta.classes, nms)
res = []
for j in range(num):
for i in range(meta.classes):
if dets[j].prob[i] > 0:
b = dets[j].bbox
res.append(YoloData(id=i, name=meta.names[i], bbox=BBox(b.x - b.w/2.0, b.y - b.h/2.0, b.w, b.h, dets[j].prob[i])))
res = sorted(res, key=lambda x: -x.bbox.c)
free_image(im)
free_detections(dets, num)
return res
def load_net(cfg_filepath, weights_filepath, clear):
# type: (str, str, bool) -> object
"""
:param cfg_filepath: cfg file name
:param weights_filepath: weights file name
:param clear: True if you want to clear the weights otherwise False
:return: darknet network object
"""
return pyyolo.darknet.load_net(cfg_filepath, weights_filepath, clear)
def load_meta(meta_filepath):
# type: (str) -> METADATA
"""
Recommend using load_names(str) function instead.
:param meta_filepath: metadata file path
:return: darknet metadata object
"""
return pyyolo.darknet.load_meta(meta_filepath)
def load_names(names_filepath):
# type: (str) -> METADATA
"""
Loading metadata from data file (eg: coco.data) is a mess as you need to edit that file also by pointing it to the names file.
Using this function you can directly load the names file as METADATA object.
Older function is still available if you need.
:param names_filepath: Filepath of the names file. Eg: coco.names
:return: darknet metadata object
"""
data = None
with open(names_filepath) as f:
data = f.readlines()
if data is None:
raise ValueError("Names file not found.. %s" % names_filepath)
n_cls = len(data)
p_names = (c_char_p * n_cls)()
for cls in range(n_cls):
name = data[cls].encode('utf-8')
c_name = c_char_p()
c_name.value = name[:-1]
p_names[cls] = c_name
return METADATA(n_cls, cast(p_names, POINTER(c_char_p)))
| 29.148936 | 130 | 0.652798 |
876f2a61c527dc5fea95afa33224ba11b553dd34
| 4,809 |
py
|
Python
|
qiskit_experiments/library/tomography/qst_experiment.py
|
spencerking/qiskit-experiments
|
11a254b010afe35933aaabac70de12b5b5a244bf
|
[
"Apache-2.0"
] | null | null | null |
qiskit_experiments/library/tomography/qst_experiment.py
|
spencerking/qiskit-experiments
|
11a254b010afe35933aaabac70de12b5b5a244bf
|
[
"Apache-2.0"
] | null | null | null |
qiskit_experiments/library/tomography/qst_experiment.py
|
spencerking/qiskit-experiments
|
11a254b010afe35933aaabac70de12b5b5a244bf
|
[
"Apache-2.0"
] | null | null | null |
# This code is part of Qiskit.
#
# (C) Copyright IBM 2021.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""
Quantum State Tomography experiment
"""
from typing import Union, Optional, Iterable, List
from qiskit.circuit import QuantumCircuit, Instruction
from qiskit.quantum_info.operators.base_operator import BaseOperator
from qiskit.quantum_info import Statevector
from qiskit_experiments.framework import Options
from .tomography_experiment import TomographyExperiment
from .qst_analysis import StateTomographyAnalysis
from . import basis
class StateTomography(TomographyExperiment):
"""Quantum state tomography experiment.
# section: overview
Quantum state tomography (QST) is a method for experimentally
reconstructing the quantum state from measurement data.
A QST experiment measures the state prepared by quantum
circuit in different measurement bases and post-processes the
measurement data to reconstruct the state.
# section: note
Performing full state tomography on an `N`-qubit state requires
running :math:`3^N` measurement circuits when using the default
measurement basis.
# section: see_also
qiskit_experiments.library.tomography.tomography_experiment.TomographyExperiment
"""
__analysis_class__ = StateTomographyAnalysis
@classmethod
def _default_analysis_options(cls) -> Options:
"""Default analysis options.
Analysis Options:
measurement_basis (:class`~basis.BaseFitterMeasurementBasis`): A custom
measurement basis for analysis. By default the :meth:`experiment_options`
measurement basis will be used.
fitter (``str`` or ``Callable``): The fitter function to use for reconstruction.
rescale_psd (``bool``): If True rescale the fitted state to be
positive-semidefinite (Default: True).
rescale_trace (``bool``): If True rescale the state returned by the fitter
have either trace 1 (Default: True).
kwargs: Additional kwargs will be supplied to the fitter function.
"""
options = super()._default_analysis_options()
options.measurement_basis = basis.PauliMeasurementBasis().matrix
return options
def __init__(
self,
circuit: Union[QuantumCircuit, Instruction, BaseOperator, Statevector],
measurement_basis: basis.BaseTomographyMeasurementBasis = basis.PauliMeasurementBasis(),
measurement_qubits: Optional[Iterable[int]] = None,
basis_indices: Optional[Iterable[List[int]]] = None,
qubits: Optional[Iterable[int]] = None,
):
"""Initialize a quantum process tomography experiment.
Args:
circuit: the quantum process circuit. If not a quantum circuit
it must be a class that can be appended to a quantum circuit.
measurement_basis: Tomography basis for measurements. If not specified the
default basis is the :class:`~basis.PauliMeasurementBasis`.
measurement_qubits: Optional, the qubits to be measured. These should refer
to the logical qubits in the state circuit. If None all qubits
in the state circuit will be measured.
basis_indices: Optional, a list of basis indices for generating partial
tomography measurement data. Each item should be given as a list of
measurement basis configurations ``[m[0], m[1], ...]`` where ``m[i]``
is the measurement basis index for qubit-i. If not specified full
tomography for all indices of the measurement basis will be performed.
qubits: Optional, the physical qubits for the initial state circuit.
"""
if isinstance(circuit, Statevector):
# Convert to circuit using initialize instruction
circ = QuantumCircuit(circuit.num_qubits)
circ.initialize(circuit)
circuit = circ
if basis_indices is not None:
# Add trivial preparation indices for base class
basis_indices = [([], i) for i in basis_indices]
super().__init__(
circuit,
measurement_basis=measurement_basis,
measurement_qubits=measurement_qubits,
basis_indices=basis_indices,
qubits=qubits,
)
| 42.557522 | 96 | 0.684342 |
bf2846be11fec909044eb0a5efa89e183d434bd5
| 11,503 |
py
|
Python
|
test/test_LensModel/test_numeric_lens_differentials.py
|
jiwoncpark/lenstronomy
|
c1d12580f8d8cf1d065d80568a58c0694e23945a
|
[
"MIT"
] | 1 |
2020-07-31T07:55:17.000Z
|
2020-07-31T07:55:17.000Z
|
test/test_LensModel/test_numeric_lens_differentials.py
|
jiwoncpark/lenstronomy
|
c1d12580f8d8cf1d065d80568a58c0694e23945a
|
[
"MIT"
] | null | null | null |
test/test_LensModel/test_numeric_lens_differentials.py
|
jiwoncpark/lenstronomy
|
c1d12580f8d8cf1d065d80568a58c0694e23945a
|
[
"MIT"
] | 2 |
2020-10-26T10:45:11.000Z
|
2021-03-04T12:25:19.000Z
|
__author__ = 'sibirrer'
import pytest
import numpy as np
import numpy.testing as npt
from lenstronomy.LensModel.lens_model import LensModel
class TestNumerics(object):
"""
tests the source model routines
"""
def setup(self):
self.lensModel = LensModel(['GAUSSIAN'])
self.kwargs = [{'amp': 1./4., 'sigma_x': 2., 'sigma_y': 2., 'center_x': 0., 'center_y': 0.}]
def test_kappa(self):
x, y = 1., 1.
output = self.lensModel.kappa(x, y, self.kwargs)
output_num = self.lensModel.kappa(x, y, self.kwargs, diff=0.00001)
npt.assert_almost_equal(output_num, output, decimal=5)
def test_gamma(self):
x, y = 1., 2.
output1, output2 = self.lensModel.gamma(x, y, self.kwargs)
output1_num, output2_num = self.lensModel.gamma(x, y, self.kwargs, diff=0.00001)
npt.assert_almost_equal(output1_num, output1, decimal=5)
npt.assert_almost_equal(output2_num, output2, decimal=5)
def test_magnification(self):
x, y = 1., 1.
output = self.lensModel.magnification(x, y, self.kwargs)
output_num = self.lensModel.magnification(x, y, self.kwargs, diff=0.00001)
npt.assert_almost_equal(output_num, output, decimal=5)
def test_differentials(self):
x, y = 1., 1.
f_xx, f_xy, f_yx, f_yy = self.lensModel.hessian(x, y, self.kwargs)
f_xx_num, f_xy_num, f_yx_num, f_yy_num = self.lensModel.hessian(x, y, self.kwargs, diff=0.00001)
assert f_xy_num == f_yx_num
npt.assert_almost_equal(f_xx_num, f_xx, decimal=5)
npt.assert_almost_equal(f_xy_num, f_xy, decimal=5)
npt.assert_almost_equal(f_yx_num, f_yx, decimal=5)
npt.assert_almost_equal(f_yy_num, f_yy, decimal=5)
def test_flexion(self):
lensModel = LensModel(lens_model_list=['FLEXION'])
g1, g2, g3, g4 = 0.01, 0.02, 0.03, 0.04
kwargs = [{'g1': g1, 'g2': g2, 'g3': g3, 'g4': g4}]
f_xxx, f_xxy, f_xyy, f_yyy = lensModel.flexion(x=1., y=1., kwargs=kwargs, diff=0.0001)
npt.assert_almost_equal(f_xxx, g1, decimal=2)
npt.assert_almost_equal(f_xxy, g2, decimal=2)
npt.assert_almost_equal(f_xyy, g3, decimal=2)
npt.assert_almost_equal(f_yyy, g4, decimal=2)
f_xxx, f_xxy, f_xyy, f_yyy = lensModel.flexion(x=1., y=1., kwargs=kwargs, diff=0.0001, hessian_diff=0.001)
npt.assert_almost_equal(f_xxx, g1, decimal=2)
npt.assert_almost_equal(f_xxy, g2, decimal=2)
npt.assert_almost_equal(f_xyy, g3, decimal=2)
npt.assert_almost_equal(f_yyy, g4, decimal=2)
class TestNumericsProfile(object):
"""
tests the second derivatives of various lens models
"""
def setup(self):
pass
def assert_differentials(self, lens_model, kwargs, potential=True):
#lensModelNum = NumericLens(lens_model)
diff = 0.000001
#x, y = 1., 2.
x = np.linspace(start=0.1, stop=5.5, num=10)
y = np.zeros_like(x)
lensModel = LensModel(lens_model)
f_xx, f_xy, f_yx, f_yy = lensModel.hessian(x, y, [kwargs])
f_xx_num, f_xy_num, f_yx_num, f_yy_num = lensModel.hessian(x, y, [kwargs], diff=diff)
npt.assert_almost_equal(f_xx, f_xx_num, decimal=3)
npt.assert_almost_equal(f_yy, f_yy_num, decimal=3)
npt.assert_almost_equal(f_xy, f_xy_num, decimal=3)
if potential is True:
f_x, f_y = lensModel.alpha(x, y, [kwargs])
f_x_num, f_y_num = lensModel.alpha(x, y, [kwargs], diff=diff)
npt.assert_almost_equal(f_x, f_x_num, decimal=3)
npt.assert_almost_equal(f_y, f_y_num, decimal=3)
def test_gaussian(self):
lens_model = ['GAUSSIAN']
kwargs = {'amp': 1. / 4., 'sigma_x': 2., 'sigma_y': 2., 'center_x': 0., 'center_y': 0.}
self.assert_differentials(lens_model, kwargs)
kwargs = {'amp': 1. / 4., 'sigma_x': 20., 'sigma_y': 20., 'center_x': 0., 'center_y': 0.}
self.assert_differentials(lens_model, kwargs)
kwargs = {'amp': 1. / 4., 'sigma_x': 2., 'sigma_y': 2., 'center_x': 2., 'center_y': 2.}
self.assert_differentials(lens_model, kwargs)
def test_gausian_kappa(self):
kwargs = {'amp': 1. / 4., 'sigma': 2., 'center_x': 0., 'center_y': 0.}
lens_model = ['GAUSSIAN_KAPPA']
self.assert_differentials(lens_model, kwargs)
def test_gausian_ellipse_kappa(self):
kwargs = {'amp': 1., 'sigma': 1., 'e1': 0.1, 'e2': -0.1, 'center_x':
0., 'center_y': 0.}
lens_model = ['GAUSSIAN_ELLIPSE_KAPPA']
self.assert_differentials(lens_model, kwargs)
def test_gausian_ellipse_potential(self):
kwargs = {'amp': 1., 'sigma': 2., 'e1': .1, 'e2': -0.1, 'center_x': 0., 'center_y': 0.}
lens_model = ['GAUSSIAN_ELLIPSE_POTENTIAL']
self.assert_differentials(lens_model, kwargs)
def test_external_shear(self):
kwargs = {'gamma1': 0.1, 'gamma2': -0.1}
lens_model = ['SHEAR']
self.assert_differentials(lens_model, kwargs)
def test_mass_sheet(self):
kwargs = {'kappa_ext': 0.1}
lens_model = ['CONVERGENCE']
self.assert_differentials(lens_model, kwargs)
def test_sis(self):
kwargs = {'theta_E': 0.5}
lens_model = ['SIS']
self.assert_differentials(lens_model, kwargs)
def test_flexion(self):
kwargs = {'g1': 0.01, 'g2': -0.01, 'g3': 0.001, 'g4': 0}
lens_model = ['FLEXION']
self.assert_differentials(lens_model, kwargs)
def test_nfw(self):
kwargs = {'alpha_Rs': .1, 'Rs': 5.}
lens_model = ['NFW']
self.assert_differentials(lens_model, kwargs)
def test_tnfw(self):
kwargs = {'alpha_Rs': .1, 'Rs': 5., 'r_trunc': 7}
lens_model = ['TNFW']
self.assert_differentials(lens_model, kwargs)
kwargs = {'Rs': 2., 'alpha_Rs': 1., 'r_trunc': 7}
lens_model = ['TNFW']
self.assert_differentials(lens_model, kwargs)
def test_nfw_ellipse(self):
kwargs = {'alpha_Rs': .1, 'Rs': 5., 'e1': 0.04, 'e2': -0.04}
lens_model = ['NFW_ELLIPSE']
self.assert_differentials(lens_model, kwargs)
def test_nfw_ellipse_gauss_dec(self):
kwargs = {'alpha_Rs': .1, 'Rs': 5., 'e1': 0.04, 'e2': -0.04}
lens_model = ['NFW_ELLIPSE_GAUSS_DEC']
self.assert_differentials(lens_model, kwargs)
def test_ctnfw_gauss_dec(self):
kwargs = {'rho_s': 5, 'r_s': 5., 'r_trunc': 10., 'r_core': 0.3, 'a': 2}
lens_model = ['CTNFW_GAUSS_DEC']
self.assert_differentials(lens_model, kwargs)
def test_point_mass(self):
kwargs = {'theta_E': 2.}
lens_model = ['POINT_MASS']
self.assert_differentials(lens_model, kwargs)
def test_sersic(self):
kwargs = {'n_sersic': .5, 'R_sersic': 1.5, 'k_eff': 0.3}
lens_model = ['SERSIC']
self.assert_differentials(lens_model, kwargs)
def test_sersic_ellipse_gauss_dec(self):
kwargs = {'n_sersic': 1., 'R_sersic': 2., 'k_eff': 1., 'e1': 0.04,
'e2': 0.}
lens_model = ['SERSIC_ELLIPSE_GAUSS_DEC']
self.assert_differentials(lens_model, kwargs)
def test_sersic_ellipse_pot(self):
kwargs = {'n_sersic': 2., 'R_sersic': 0.5, 'k_eff': 0.3, 'e1': 0.04, 'e2': -0.0}
lens_model = ['SERSIC_ELLIPSE_POTENTIAL']
self.assert_differentials(lens_model, kwargs)
def test_shapelets_pot_2(self):
kwargs = {'coeffs': [0, 1, 2, 3, 4, 5], 'beta': 0.3}
lens_model = ['SHAPELETS_CART']
self.assert_differentials(lens_model, kwargs)
def test_sis_truncate(self):
kwargs = {'theta_E': 0.5, 'r_trunc': 2.}
lens_model = ['SIS_TRUNCATED']
self.assert_differentials(lens_model, kwargs)
def test_spep(self):
kwargs = {'theta_E': 0.5, 'gamma': 1.9, 'e1': 0.04, 'e2': -0.1}
lens_model = ['SPEP']
self.assert_differentials(lens_model, kwargs)
def test_spp(self):
kwargs = {'theta_E': 0.5, 'gamma': 1.9}
lens_model = ['SPP']
self.assert_differentials(lens_model, kwargs)
def test_PJaffe(self):
kwargs = {'sigma0': 1., 'Ra': 0.2, 'Rs': 2.}
lens_model = ['PJAFFE']
self.assert_differentials(lens_model, kwargs)
def test_PJaffe_ellipse(self):
kwargs = {'sigma0': 1., 'Ra': 0.2, 'Rs': 2., 'e1': 0.04, 'e2': -0.0}
lens_model = ['PJAFFE_ELLIPSE']
self.assert_differentials(lens_model, kwargs)
def test_Hernquist(self):
kwargs = {'sigma0': 1., 'Rs': 1.5}
lens_model = ['HERNQUIST']
self.assert_differentials(lens_model, kwargs)
def test_Hernquist_ellipse(self):
kwargs = {'sigma0': 1., 'Rs': 1.5, 'e1': 0.04, 'e2': -0.0}
lens_model = ['HERNQUIST_ELLIPSE']
self.assert_differentials(lens_model, kwargs)
def test_NIE(self):
kwargs = {'theta_E': 2., 'e1': 0.1, 'e2': 0., 's_scale': 0.04}
lens_model = ['NIE']
self.assert_differentials(lens_model, kwargs)
def test_NIE_simple(self):
kwargs = {'b': 2., 'q': 0.3, 's': 0.04}
lens_model = ['NIE_SIMPLE']
self.assert_differentials(lens_model, kwargs)
def test_EPL(self):
kwargs = {'theta_E': 2., 'e1': 0.1, 'e2': 0., 't': 1.23}
lens_model = ['EPL']
self.assert_differentials(lens_model, kwargs)
def test_coreBurk(self):
kwargs={'Rs':2, 'alpha_Rs': 1, 'r_core':0.4}
lens_model = ['coreBURKERT']
self.assert_differentials(lens_model, kwargs)
kwargs = {'Rs': 2, 'alpha_Rs': 1, 'r_core':5}
self.assert_differentials(lens_model, kwargs)
def test_cnfw(self):
kwargs={'Rs': 15.5, 'alpha_Rs': 1., 'r_core': 8.}
lens_model = ['CNFW']
self.assert_differentials(lens_model, kwargs, potential=True)
def test_cnfw_ellipse(self):
kwargs = {'alpha_Rs': .1, 'Rs': 5., 'r_core': 0.1, 'e1': 0.04, 'e2': -0.04}
lens_model = ['CNFW_ELLIPSE']
self.assert_differentials(lens_model, kwargs, potential=True)
def test_cored_density(self):
kwargs = {'sigma0': 0.1, 'r_core': 8}
lens_model = ['CORED_DENSITY']
self.assert_differentials(lens_model, kwargs)
def test_cored_density_2(self):
kwargs = {'sigma0': 0.1, 'r_core': 8}
lens_model = ['CORED_DENSITY_2']
self.assert_differentials(lens_model, kwargs)
def test_cored_density_mst(self):
kwargs = {'lambda_approx': 1.1, 'r_core': 8}
lens_model = ['CORED_DENSITY_MST']
self.assert_differentials(lens_model, kwargs)
def test_cored_density_2_mst(self):
kwargs = {'lambda_approx': 1.1, 'r_core': 8}
lens_model = ['CORED_DENSITY_2_MST']
self.assert_differentials(lens_model, kwargs)
def test_const_mag_positive(self):
kwargs = {'mu_r': 1, 'mu_t': 10, 'parity': 1, 'phi_G': 0.1}
lens_model = ['CONST_MAG']
self.assert_differentials(lens_model, kwargs)
def test_const_mag_negative(self):
kwargs = {'mu_r': 1, 'mu_t': 10, 'parity': -1, 'phi_G': 0.1}
lens_model = ['CONST_MAG']
self.assert_differentials(lens_model, kwargs)
def test_nie_potential(self):
kwargs = {'theta_E':2. , 'theta_c':1. , 'e1': 0.1, 'e2': 0.1}
lens_model = ['NIE_POTENTIAL']
self.assert_differentials(lens_model, kwargs)
if __name__ == '__main__':
pytest.main("-k TestLensModel")
| 38.73064 | 114 | 0.609754 |
5d4897c51786b74da66187d2524d9e3fe0098af1
| 20,223 |
py
|
Python
|
skfda/misc/operators/_linear_differential_operator.py
|
KonstantinKlepikov/scikit-fda
|
93c4ad80aaba8739b4f90932a2a759d6f5960387
|
[
"BSD-3-Clause"
] | 1 |
2020-06-27T22:25:49.000Z
|
2020-06-27T22:25:49.000Z
|
skfda/misc/operators/_linear_differential_operator.py
|
KonstantinKlepikov/scikit-fda
|
93c4ad80aaba8739b4f90932a2a759d6f5960387
|
[
"BSD-3-Clause"
] | null | null | null |
skfda/misc/operators/_linear_differential_operator.py
|
KonstantinKlepikov/scikit-fda
|
93c4ad80aaba8739b4f90932a2a759d6f5960387
|
[
"BSD-3-Clause"
] | null | null | null |
import numbers
from numpy import polyder, polyint, polymul, polyval
import scipy.integrate
from scipy.interpolate import PPoly
import numpy as np
from ..._utils import _same_domain
from ...representation import FDataGrid
from ...representation.basis import Constant, Monomial, Fourier, BSpline
from ._operators import Operator, gramian_matrix_optimization
__author__ = "Pablo Pérez Manso"
__email__ = "[email protected]"
class LinearDifferentialOperator(Operator):
"""Defines the structure of a linear differential operator function system
.. math::
Lx(t) = b_0(t) x(t) + b_1(t) x'(x) +
\\dots + b_{n-1}(t) d^{n-1}(x(t)) + b_n(t) d^n(x(t))
Can only be applied to functional data, as multivariate data has no
derivatives.
Attributes:
weights (list): A list of callables.
Examples:
Create a linear differential operator that penalizes the second
derivative (acceleration)
>>> from skfda.misc.operators import LinearDifferentialOperator
>>> from skfda.representation.basis import (FDataBasis,
... Monomial, Constant)
>>>
>>> LinearDifferentialOperator(2)
LinearDifferentialOperator(
weights=[
FDataBasis(
basis=Constant(domain_range=[array([0, 1])], n_basis=1),
coefficients=[[0]],
...),
FDataBasis(
basis=Constant(domain_range=[array([0, 1])], n_basis=1),
coefficients=[[0]],
...),
FDataBasis(
basis=Constant(domain_range=[array([0, 1])], n_basis=1),
coefficients=[[1]],
...)]
)
Create a linear differential operator that penalizes three times
the second derivative (acceleration) and twice the first (velocity).
>>> LinearDifferentialOperator(weights=[0, 2, 3])
LinearDifferentialOperator(
weights=[
FDataBasis(
basis=Constant(domain_range=[array([0, 1])], n_basis=1),
coefficients=[[0]],
...),
FDataBasis(
basis=Constant(domain_range=[array([0, 1])], n_basis=1),
coefficients=[[2]],
...),
FDataBasis(
basis=Constant(domain_range=[array([0, 1])], n_basis=1),
coefficients=[[3]],
...)]
)
Create a linear differential operator with non-constant weights.
>>> constant = Constant()
>>> monomial = Monomial((0, 1), n_basis=3)
>>> fdlist = [FDataBasis(constant, [0]),
... FDataBasis(constant, [0]),
... FDataBasis(monomial, [1, 2, 3])]
>>> LinearDifferentialOperator(weights=fdlist)
LinearDifferentialOperator(
weights=[
FDataBasis(
basis=Constant(domain_range=[array([0, 1])], n_basis=1),
coefficients=[[0]],
...),
FDataBasis(
basis=Constant(domain_range=[array([0, 1])], n_basis=1),
coefficients=[[0]],
...),
FDataBasis(
basis=Monomial(domain_range=[array([0, 1])], n_basis=3),
coefficients=[[1 2 3]],
...)]
)
"""
def __init__(
self, order_or_weights=None, *, order=None, weights=None,
domain_range=None):
"""Constructor. You have to provide either order or weights.
If both are provided, it will raise an error.
If a positional argument is supplied it will be considered the
order if it is an integral type and the weights otherwise.
Args:
order (int, optional): the order of the operator. It's the highest
derivative order of the operator
weights (list, optional): A FDataBasis objects list of length
order + 1 items
domain_range (tuple or list of tuples, optional): Definition
of the interval where the weight functions are
defined. If the functional weights are specified
and this is not, takes the domain range from them.
Otherwise, defaults to (0,1).
"""
from ...representation.basis import FDataBasis
num_args = sum(
[a is not None for a in [order_or_weights, order, weights]])
if num_args > 1:
raise ValueError("You have to provide the order or the weights, "
"not both")
real_domain_range = (domain_range if domain_range is not None
else (0, 1))
if order_or_weights is not None:
if isinstance(order_or_weights, numbers.Integral):
order = order_or_weights
else:
weights = order_or_weights
if order is None and weights is None:
self.weights = (FDataBasis(Constant(real_domain_range), 0),)
elif weights is None:
if order < 0:
raise ValueError("Order should be an non-negative integer")
self.weights = [
FDataBasis(Constant(real_domain_range),
0 if (i < order) else 1)
for i in range(order + 1)]
else:
if len(weights) == 0:
raise ValueError("You have to provide one weight at least")
if all(isinstance(n, numbers.Real) for n in weights):
self.weights = (FDataBasis(Constant(real_domain_range),
np.array(weights)
.reshape(-1, 1)).to_list())
elif all(isinstance(n, FDataBasis) for n in weights):
if all([_same_domain(weights[0], x)
and x.n_samples == 1 for x in weights]):
self.weights = weights
real_domain_range = weights[0].domain_range
if (domain_range is not None
and real_domain_range != domain_range):
raise ValueError("The domain range provided for the "
"linear operator does not match the "
"domain range of the weights")
else:
raise ValueError("FDataBasis objects in the list have "
"not the same domain_range")
else:
raise ValueError("The elements of the list are neither "
"integers or FDataBasis objects")
self.domain_range = real_domain_range
def __repr__(self):
"""Representation of linear differential operator object."""
bwtliststr = ""
for w in self.weights:
bwtliststr = bwtliststr + "\n" + repr(w) + ","
return (f"{self.__class__.__name__}("
f"\nweights=[{bwtliststr[:-1]}]"
f"\n)").replace('\n', '\n ')
def __eq__(self, other):
"""Equality of linear differential operator objects"""
return (self.weights == other.weights)
def constant_weights(self):
"""
Return the scalar weights of the linear differential operator if they
are constant basis.
Otherwise, return None.
This function is mostly useful for basis which want to override
the _penalty method in order to use an analytical expression
for constant weights.
"""
coefs = [w.coefficients[0, 0] if isinstance(w.basis, Constant)
else None
for w in self.weights]
return np.array(coefs) if coefs.count(None) == 0 else None
def __call__(self, f):
"""Return the function that results of applying the operator."""
function_derivatives = [
f.derivative(order=i) for i, _ in enumerate(self.weights)]
def applied_linear_diff_op(t):
return sum(w(t) * function_derivatives[i](t)
for i, w in enumerate(self.weights))
return applied_linear_diff_op
#############################################################
#
# Optimized implementations of gramian matrix for each basis.
#
#############################################################
@gramian_matrix_optimization.register
def constant_penalty_matrix_optimized(
linear_operator: LinearDifferentialOperator,
basis: Constant):
coefs = linear_operator.constant_weights()
if coefs is None:
return NotImplemented
return np.array([[coefs[0] ** 2 *
(basis.domain_range[0][1] -
basis.domain_range[0][0])]])
def _monomial_evaluate_constant_linear_diff_op(basis, weights):
"""
Evaluate constant weights of a linear differential operator
over the basis functions.
"""
max_derivative = len(weights) - 1
seq = np.arange(basis.n_basis)
coef_mat = np.linspace(seq, seq - max_derivative + 1,
max_derivative, dtype=int)
# Compute coefficients for each derivative
coefs = np.cumprod(coef_mat, axis=0)
# Add derivative 0 row
coefs = np.concatenate((np.ones((1, basis.n_basis)), coefs))
# Now each row correspond to each basis and each column to
# each derivative
coefs_t = coefs.T
# Multiply by the weights
weighted_coefs = coefs_t * weights
assert len(weighted_coefs) == basis.n_basis
# Now each row has the right weight, but the polynomials are in a
# decreasing order and with different exponents
# Resize the coefs so that there are as many rows as the number of
# basis
# The matrix is now triangular
# refcheck is False to prevent exceptions while debugging
weighted_coefs = np.copy(weighted_coefs.T)
weighted_coefs.resize(basis.n_basis,
basis.n_basis, refcheck=False)
weighted_coefs = weighted_coefs.T
# Shift the coefficients so that they correspond to the right
# exponent
indexes = np.tril_indices(basis.n_basis)
polynomials = np.zeros_like(weighted_coefs)
polynomials[indexes[0], indexes[1] -
indexes[0] - 1] = weighted_coefs[indexes]
# At this point, each row of the matrix correspond to a polynomial
# that is the result of applying the linear differential operator
# to each element of the basis
return polynomials
@gramian_matrix_optimization.register
def monomial_penalty_matrix_optimized(
linear_operator: LinearDifferentialOperator,
basis: Monomial):
weights = linear_operator.constant_weights()
if weights is None:
return NotImplemented
polynomials = _monomial_evaluate_constant_linear_diff_op(basis, weights)
# Expand the polinomials with 0, so that the multiplication fits
# inside. It will need the double of the degree
length_with_padding = polynomials.shape[1] * 2 - 1
# Multiplication of polynomials is a convolution.
# The convolution can be performed in parallel applying a Fourier
# transform and then doing a normal multiplication in that
# space, coverting back with the inverse Fourier transform
fft = np.fft.rfft(polynomials, length_with_padding)
# We compute only the upper matrix, as the penalty matrix is
# symmetrical
indices = np.triu_indices(basis.n_basis)
fft_mul = fft[indices[0]] * fft[indices[1]]
integrand = np.fft.irfft(fft_mul, length_with_padding)
integration_domain = basis.domain_range[0]
# To integrate, divide by the position and increase the exponent
# in the evaluation
denom = np.arange(integrand.shape[1], 0, -1)
integrand /= denom
# Add column of zeros at the right to increase exponent
integrand = np.pad(integrand,
pad_width=((0, 0),
(0, 1)),
mode='constant')
# Now, apply Barrow's rule
# polyval applies Horner method over the first dimension,
# so we need to transpose
x_right = np.polyval(integrand.T, integration_domain[1])
x_left = np.polyval(integrand.T, integration_domain[0])
integral = x_right - x_left
penalty_matrix = np.empty((basis.n_basis, basis.n_basis))
# Set upper matrix
penalty_matrix[indices] = integral
# Set lower matrix
penalty_matrix[(indices[1], indices[0])] = integral
return penalty_matrix
def _fourier_penalty_matrix_optimized_orthonormal(basis, weights):
"""
Return the penalty when the basis is orthonormal.
"""
signs = np.array([1, 1, -1, -1])
signs_expanded = np.tile(signs, len(weights) // 4 + 1)
signs_odd = signs_expanded[:len(weights)]
signs_even = signs_expanded[1:len(weights) + 1]
phases = (np.arange(1, (basis.n_basis - 1) // 2 + 1) *
2 * np.pi / basis.period)
# Compute increasing powers
coefs_no_sign = np.vander(phases, len(weights), increasing=True)
coefs_no_sign *= weights
coefs_odd = signs_odd * coefs_no_sign
coefs_even = signs_even * coefs_no_sign
# After applying the linear differential operator to a sinusoidal
# element of the basis e, the result can be expressed as
# A e + B e*, where e* is the other basis element in the pair
# with the same phase
odd_sin_coefs = np.sum(coefs_odd[:, ::2], axis=1)
odd_cos_coefs = np.sum(coefs_odd[:, 1::2], axis=1)
even_cos_coefs = np.sum(coefs_even[:, ::2], axis=1)
even_sin_coefs = np.sum(coefs_even[:, 1::2], axis=1)
# The diagonal is the inner product of A e + B e*
# with itself. As the basis is orthonormal, the cross products e e*
# are 0, and the products e e and e* e* are one.
# Thus, the diagonal is A^2 + B^2
# All elements outside the main diagonal are 0
main_diag_odd = odd_sin_coefs**2 + odd_cos_coefs**2
main_diag_even = even_sin_coefs**2 + even_cos_coefs**2
# The main diagonal should intercalate both diagonals
main_diag = np.array((main_diag_odd, main_diag_even)).T.ravel()
penalty_matrix = np.diag(main_diag)
# Add row and column for the constant
penalty_matrix = np.pad(penalty_matrix, pad_width=((1, 0), (1, 0)),
mode='constant')
penalty_matrix[0, 0] = weights[0]**2
return penalty_matrix
@gramian_matrix_optimization.register
def fourier_penalty_matrix_optimized(
linear_operator: LinearDifferentialOperator,
basis: Fourier):
weights = linear_operator.constant_weights()
if weights is None:
return NotImplemented
# If the period and domain range are not the same, the basis functions
# are not orthogonal
if basis.period != (basis.domain_range[0][1] - basis.domain_range[0][0]):
return NotImplemented
return _fourier_penalty_matrix_optimized_orthonormal(basis, weights)
@gramian_matrix_optimization.register
def bspline_penalty_matrix_optimized(
linear_operator: LinearDifferentialOperator,
basis: BSpline):
coefs = linear_operator.constant_weights()
if coefs is None:
return NotImplemented
nonzero = np.flatnonzero(coefs)
# All derivatives above the order of the spline are effectively
# zero
nonzero = nonzero[nonzero < basis.order]
if len(nonzero) == 0:
return np.zeros((basis.n_basis, basis.n_basis))
# We will only deal with one nonzero coefficient right now
if len(nonzero) != 1:
return NotImplemented
derivative_degree = nonzero[0]
if derivative_degree == basis.order - 1:
# The derivative of the bsplines are constant in the intervals
# defined between knots
knots = np.array(basis.knots)
mid_inter = (knots[1:] + knots[:-1]) / 2
basis_deriv = basis.derivative(order=derivative_degree)
constants = basis_deriv(mid_inter)[..., 0].T
knots_intervals = np.diff(basis.knots)
# Integration of product of constants
return constants.T @ np.diag(knots_intervals) @ constants
# We only deal with the case without zero length intervals
# for now
if np.any(np.diff(basis.knots) == 0):
return NotImplemented
# Compute exactly using the piecewise polynomial
# representation of splines
# Places m knots at the boundaries
knots = basis._evaluation_knots()
# c is used the select which spline the function
# PPoly.from_spline below computes
c = np.zeros(len(knots))
# Initialise empty list to store the piecewise polynomials
ppoly_lst = []
no_0_intervals = np.where(np.diff(knots) > 0)[0]
# For each basis gets its piecewise polynomial representation
for i in range(basis.n_basis):
# Write a 1 in c in the position of the spline
# transformed in each iteration
c[i] = 1
# Gets the piecewise polynomial representation and gets
# only the positions for no zero length intervals
# This polynomial are defined relatively to the knots
# meaning that the column i corresponds to the ith knot.
# Let the ith knot be a
# Then f(x) = pp(x - a)
pp = PPoly.from_spline((knots, c, basis.order - 1))
pp_coefs = pp.c[:, no_0_intervals]
# We have the coefficients for each interval in coordinates
# (x - a), so we will need to subtract a when computing the
# definite integral
ppoly_lst.append(pp_coefs)
c[i] = 0
# Now for each pair of basis computes the inner product after
# applying the linear differential operator
penalty_matrix = np.zeros((basis.n_basis, basis.n_basis))
for interval in range(len(no_0_intervals)):
for i in range(basis.n_basis):
poly_i = np.trim_zeros(ppoly_lst[i][:,
interval], 'f')
if len(poly_i) <= derivative_degree:
# if the order of the polynomial is lesser or
# equal to the derivative the result of the
# integral will be 0
continue
# indefinite integral
derivative = polyder(poly_i, derivative_degree)
square = polymul(derivative, derivative)
integral = polyint(square)
# definite integral
penalty_matrix[i, i] += np.diff(polyval(
integral, basis.knots[interval: interval + 2]
- basis.knots[interval]))[0]
for j in range(i + 1, basis.n_basis):
poly_j = np.trim_zeros(ppoly_lst[j][:,
interval], 'f')
if len(poly_j) <= derivative_degree:
# if the order of the polynomial is lesser
# or equal to the derivative the result of
# the integral will be 0
continue
# indefinite integral
integral = polyint(
polymul(polyder(poly_i, derivative_degree),
polyder(poly_j, derivative_degree)))
# definite integral
penalty_matrix[i, j] += np.diff(polyval(
integral, basis.knots[interval: interval + 2]
- basis.knots[interval])
)[0]
penalty_matrix[j, i] = penalty_matrix[i, j]
return penalty_matrix
@gramian_matrix_optimization.register
def fdatagrid_penalty_matrix_optimized(
linear_operator: LinearDifferentialOperator,
basis: FDataGrid):
evaluated_basis = sum(
w(basis.sample_points[0]) *
basis.derivative(order=i)(basis.sample_points[0])
for i, w in enumerate(linear_operator.weights))
indices = np.triu_indices(basis.n_samples)
product = evaluated_basis[indices[0]] * evaluated_basis[indices[1]]
triang_vec = scipy.integrate.simps(product[..., 0], x=basis.sample_points)
matrix = np.empty((basis.n_samples, basis.n_samples))
# Set upper matrix
matrix[indices] = triang_vec
# Set lower matrix
matrix[(indices[1], indices[0])] = triang_vec
return matrix
| 34.687822 | 78 | 0.604806 |
31b38d30a709b9a6e24bb1c073a418c48f795b90
| 557 |
py
|
Python
|
src/activelyDeletingState.py
|
bobjects/dharmacorder
|
082144c3e70a9ae5f788ad4130d79599c2400dfa
|
[
"MIT"
] | null | null | null |
src/activelyDeletingState.py
|
bobjects/dharmacorder
|
082144c3e70a9ae5f788ad4130d79599c2400dfa
|
[
"MIT"
] | null | null | null |
src/activelyDeletingState.py
|
bobjects/dharmacorder
|
082144c3e70a9ae5f788ad4130d79599c2400dfa
|
[
"MIT"
] | null | null | null |
from deletingState import DeletingState
# from selectingFileDeletingState import SelectingFileDeletingState
import waitingDeletingState
class ActivelyDeletingState(DeletingState):
def enter_state(self):
super(ActivelyDeletingState, self).enter_state()
session = self._state_machine.session_to_delete
if session:
self.dharmacorder._recording_collection.delete_recording_session(session)
self.dharmacorder.update_display()
self.transition_to_state_class(waitingDeletingState.WaitingDeletingState)
| 39.785714 | 85 | 0.789946 |
9d5b7576921f10bf4d1fe4b109a8c7b3f9c7c28f
| 259 |
py
|
Python
|
ipc/PoemTokenizer.py
|
farshad-nejati/icp
|
e0f8db33fba23382782a0ac3f26ab6dd3b104553
|
[
"MIT"
] | 1 |
2020-05-17T21:16:34.000Z
|
2020-05-17T21:16:34.000Z
|
ipc/PoemTokenizer.py
|
farshad-nejati/icp
|
e0f8db33fba23382782a0ac3f26ab6dd3b104553
|
[
"MIT"
] | null | null | null |
ipc/PoemTokenizer.py
|
farshad-nejati/icp
|
e0f8db33fba23382782a0ac3f26ab6dd3b104553
|
[
"MIT"
] | null | null | null |
from __future__ import unicode_literals
from hazm import word_tokenize
from .IOManager import *
class PoemTokenizer:
@staticmethod
def tokenize_poem(poem):
poem_content = IOManager.read_file(poem)
return word_tokenize(poem_content)
| 21.583333 | 48 | 0.760618 |
d7299b09bf9d7001bb7936d66f431e82f22b3085
| 40,732 |
py
|
Python
|
src/sage/matrix/matrix_integer_dense_hnf.py
|
mkoeppe/sage-1
|
249fc903897809e1eb081fbacb94741e01b37e73
|
[
"BSL-1.0"
] | 1 |
2020-08-30T04:27:27.000Z
|
2020-08-30T04:27:27.000Z
|
src/sage/matrix/matrix_integer_dense_hnf.py
|
mkoeppe/sage-1
|
249fc903897809e1eb081fbacb94741e01b37e73
|
[
"BSL-1.0"
] | null | null | null |
src/sage/matrix/matrix_integer_dense_hnf.py
|
mkoeppe/sage-1
|
249fc903897809e1eb081fbacb94741e01b37e73
|
[
"BSL-1.0"
] | 1 |
2020-07-23T10:40:14.000Z
|
2020-07-23T10:40:14.000Z
|
"""
Modular algorithm to compute Hermite normal forms of integer matrices
AUTHORS:
- Clement Pernet and William Stein (2008-02-07): initial version
"""
from __future__ import print_function
from six.moves import range
from copy import copy
from sage.misc.misc import verbose, cputime
from sage.matrix.constructor import (random_matrix, matrix, identity_matrix)
from sage.rings.all import ZZ, Integer, RR
from sage.arith.all import previous_prime, CRT_list
def max_det_prime(n):
"""
Return the largest prime so that it is reasonably efficient to
compute modulo that prime with n x n matrices in LinBox.
INPUT:
- ``n`` -- a positive integer
OUTPUT:
a prime number
EXAMPLES::
sage: from sage.matrix.matrix_integer_dense_hnf import max_det_prime
sage: max_det_prime(10000)
8388593
sage: max_det_prime(1000)
8388593
sage: max_det_prime(10)
8388593
"""
# See #14032: LinBox now uses a constant bound of 2^23.
# This is the largest prime less than that bound.
return Integer(8388593)
def det_from_modp_and_divisor(A, d, p, z_mod, moduli, z_so_far=ZZ(1), N_so_far=ZZ(1)):
"""
This is used for internal purposes for computing determinants
quickly (with the hybrid p-adic / multimodular algorithm).
INPUT:
- A -- a square matrix
- d -- a divisor of the determinant of A
- p -- a prime
- z_mod -- values of det/d (mod ...)
- moduli -- the moduli so far
- z_so_far -- for a modulus p in the list moduli,
(z_so_far mod p) is the determinant of A modulo p.
- N_so_far -- N_so_far is the product over the primes in the list moduli.
OUTPUT:
- A triple (det bound, new z_so_far, new N_so_far).
EXAMPLES::
sage: a = matrix(ZZ, 3, [6, 1, 2, -56, -2, -1, -11, 2, -3])
sage: factor(a.det())
-1 * 13 * 29
sage: d = 13
sage: import sage.matrix.matrix_integer_dense_hnf as matrix_integer_dense_hnf
sage: matrix_integer_dense_hnf.det_from_modp_and_divisor(a, d, 97, [], [])
(-377, -29, 97)
sage: a.det()
-377
"""
tm = verbose("Multimodular stage of det calculation -- using p = %s" % p, level=2)
z = A.mod(p).det() / d
z = z.lift()
z_mod.append(z)
moduli.append(p)
z = CRT_list([z_so_far, z], [N_so_far, p])
N = N_so_far*p
if z > N // 2:
z -= N
verbose("Finished multimodular det for p = %s" % p, tm, level=2)
return (d * z, z, N)
def det_given_divisor(A, d, proof=True, stabilize=2):
"""
Given a divisor d of the determinant of A, compute the determinant of A.
INPUT:
- ``A`` -- a square integer matrix
- ``d`` -- a nonzero integer that is assumed to divide the determinant of A
- ``proof`` -- bool (default: True) compute det modulo enough primes
so that the determinant is computed provably correctly (via the
Hadamard bound). It would be VERY hard for ``det()`` to fail even
with proof=False.
- ``stabilize`` -- int (default: 2) if proof = False, then compute
the determinant modulo `p` until ``stabilize`` successive modulo
determinant computations stabilize.
OUTPUT:
integer -- determinant
EXAMPLES::
sage: import sage.matrix.matrix_integer_dense_hnf as matrix_integer_dense_hnf
sage: a = matrix(ZZ,3,[-1, -1, -1, -20, 4, 1, -1, 1, 2])
sage: matrix_integer_dense_hnf.det_given_divisor(a, 3)
-30
sage: matrix_integer_dense_hnf.det_given_divisor(a, 3, proof=False)
-30
sage: matrix_integer_dense_hnf.det_given_divisor(a, 3, proof=False, stabilize=1)
-30
sage: a.det()
-30
Here we illustrate proof=False giving a wrong answer::
sage: p = matrix_integer_dense_hnf.max_det_prime(2)
sage: q = previous_prime(p)
sage: a = matrix(ZZ, 2, [p, 0, 0, q])
sage: p * q
70368442188091
sage: matrix_integer_dense_hnf.det_given_divisor(a, 1, proof=False, stabilize=2)
0
This still works, because we do not work modulo primes that divide
the determinant bound, which is found using a p-adic algorithm::
sage: a.det(proof=False, stabilize=2)
70368442188091
3 primes is enough::
sage: matrix_integer_dense_hnf.det_given_divisor(a, 1, proof=False, stabilize=3)
70368442188091
sage: matrix_integer_dense_hnf.det_given_divisor(a, 1, proof=False, stabilize=5)
70368442188091
sage: matrix_integer_dense_hnf.det_given_divisor(a, 1, proof=True)
70368442188091
TESTS::
sage: m = diagonal_matrix(ZZ, 68, [2]*66 + [1,1])
sage: m.det()
73786976294838206464
"""
p = max_det_prime(A.nrows())
z_mod = []
moduli = []
assert d != 0
z_so_far = 1
N_so_far = 1
if proof:
N = 1
B = (2 * 10**A.hadamard_bound()) // d + 1
dd = d
# bad verbose statement, since computing the log overflows!
est = int(RR(B).log() / RR(p).log()) + 1
cnt = 1
verbose("Multimodular det -- need to use about %s primes." % est,
level=1)
while N < B:
if d % p != 0:
tm = cputime()
dd, z_so_far, N_so_far = det_from_modp_and_divisor(A, d, p, z_mod, moduli, z_so_far, N_so_far)
N *= p
verbose("computed det mod p=%s which is %s (of about %s)" % (p, cnt, est), tm)
p = previous_prime(p)
cnt += 1
return dd
else:
val = []
while True:
if d % p:
tm = cputime()
dd, z_so_far, N_so_far = det_from_modp_and_divisor(A, d, p, z_mod, moduli, z_so_far, N_so_far)
verbose("computed det mod %s" % p, tm)
val.append(dd)
if len(val) >= stabilize and len(set(val[-stabilize:])) == 1:
return val[-1]
p = previous_prime(p)
def det_padic(A, proof=True, stabilize=2):
"""
Return the determinant of A, computed using a p-adic/multimodular
algorithm.
INPUT:
- ``A`` -- a square matrix
- ``proof`` -- boolean
- ``stabilize`` (default: 2) -- if proof False, number of successive primes so that
CRT det must stabilize.
EXAMPLES::
sage: import sage.matrix.matrix_integer_dense_hnf as h
sage: a = matrix(ZZ, 3, [1..9])
sage: h.det_padic(a)
0
sage: a = matrix(ZZ, 3, [1,2,5,-7,8,10,192,5,18])
sage: h.det_padic(a)
-3669
sage: a.determinant(algorithm='ntl')
-3669
"""
if not A.is_square():
raise ValueError("A must be a square matrix")
r = A.rank()
if r < A.nrows():
return ZZ.zero()
v = random_matrix(ZZ, A.nrows(), 1)
d = A._solve_right_nonsingular_square(v, check_rank=False).denominator()
return det_given_divisor(A, d, proof=proof, stabilize=stabilize)
def double_det(A, b, c, proof):
"""
Compute the determinants of the stacked integer matrices
A.stack(b) and A.stack(c).
INPUT:
- A -- an (n-1) x n matrix
- b -- an 1 x n matrix
- c -- an 1 x n matrix
- proof -- whether or not to compute the det modulo enough times to
provably compute the determinant.
OUTPUT:
- a pair of two integers.
EXAMPLES::
sage: from sage.matrix.matrix_integer_dense_hnf import double_det
sage: A = matrix(ZZ, 2, 3, [1,2,3, 4,-2,5])
sage: b = matrix(ZZ, 1, 3, [1,-2,5])
sage: c = matrix(ZZ, 1, 3, [8,2,10])
sage: A.stack(b).det()
-48
sage: A.stack(c).det()
42
sage: double_det(A, b, c, False)
(-48, 42)
"""
# We use the "two for the price of one" algorithm, which I made up. (William Stein)
# This is a clever trick! First we transpose everything. Then
# we use that if [A|b]*v = c then [A|c]*w = b with w easy to write down!
# In fact w is got from v by dividing all entries by -v[n], where n is the
# number of rows of v, and *also* dividing the last entry of w by v[n] again.
# See this as an algebra exercise where you have to think of matrix vector
# multiply as "linear combination of columns".
A = A.transpose()
b = b.transpose()
c = c.transpose()
t = verbose('starting double det')
B = A.augment(b)
v = B.solve_right(-c)
db = det_given_divisor(B, v.denominator(), proof=proof)
n = v.nrows()
vn = v[n - 1, 0]
w = (-1 / vn) * v
w[n - 1] = w[n - 1] / vn
dc = det_given_divisor(A.augment(c), w.denominator(), proof=proof)
verbose('finished double det', t)
return (db, dc)
def add_column_fallback(B, a, proof):
"""
Simplistic version of add_column, in case the powerful clever one
fails (e.g., B is singular).
INPUT:
B -- a square matrix (may be singular)
a -- an n x 1 matrix, where B has n rows
proof -- bool; whether to prove result correct
OUTPUT:
x -- a vector such that H' = H_B.augment(x) is the HNF of A = B.augment(a).
EXAMPLES::
sage: B = matrix(ZZ,3, [-1, -1, 1, -3, 8, -2, -1, -1, -1])
sage: a = matrix(ZZ,3,1, [1,2,3])
sage: import sage.matrix.matrix_integer_dense_hnf as matrix_integer_dense_hnf
sage: matrix_integer_dense_hnf.add_column_fallback(B, a, True)
[-3]
[-7]
[-2]
sage: matrix_integer_dense_hnf.add_column_fallback(B, a, False)
[-3]
[-7]
[-2]
sage: B.augment(a).hermite_form()
[ 1 1 1 -3]
[ 0 11 1 -7]
[ 0 0 2 -2]
"""
tt = verbose('add column fallback...')
W = B.augment(matrix(ZZ,B.nrows(),a.list()))
H, _ = hnf(W, proof)
C = H.matrix_from_columns([H.ncols()-1])
verbose('finished add column fallback', tt)
return C
def solve_system_with_difficult_last_row(B, a):
"""
Solve B*x = a when the last row of `B` contains huge entries using
a clever trick that reduces the problem to solve C*x = a where `C`
is `B` but with the last row replaced by something small, along
with one easy null space computation. The latter are both solved
`p`-adically.
INPUT:
- B -- a square n x n nonsingular matrix with painful big bottom row.
- a -- an n x 1 column matrix
OUTPUT:
- the unique solution to B*x = a.
EXAMPLES::
sage: from sage.matrix.matrix_integer_dense_hnf import solve_system_with_difficult_last_row
sage: B = matrix(ZZ, 3, [1,2,4, 3,-4,7, 939082,2930982,132902384098234])
sage: a = matrix(ZZ,3,1, [1,2,5])
sage: z = solve_system_with_difficult_last_row(B, a)
sage: z
[ 106321906985474/132902379815497]
[132902385037291/1329023798154970]
[ -5221794/664511899077485]
sage: B*z
[1]
[2]
[5]
"""
# Here's how:
# 1. We make a copy of B but with the last *nasty* row of B replaced
# by a random very nice row.
C = copy(B)
while True:
C[C.nrows()-1] = random_matrix(ZZ,1,C.ncols()).row(0)
# 2. Then we find the unique solution to C * x = a
try:
x = C.solve_right(a)
except ValueError:
verbose("Try difficult solve again with different random vector")
else:
break
# 3. We next delete the last row of B and find a basis vector k
# for the 1-dimensional kernel.
D = B.matrix_from_rows(range(C.nrows()-1))
N = D._rational_kernel_iml()
if N.ncols() != 1:
verbose("Try difficult solve again with different random vector")
return solve_system_with_difficult_last_row(B, a)
k = N.matrix_from_columns([0])
# 4. The sought for solution z to B*z = a is some linear combination
#
# z = x + alpha*k
#
# of x and k, where k is the above fixed basis for the kernel of D.
# Setting w to be the last row of B, this column vector z satisfies
#
# w * z = a'
#
# where a' is the last entry of a. Thus
#
# w * (x + alpha*k) = a'
#
# so w * x + alpha*w*k = a'
# so alpha*w*k = a' - w*x.
w = B[-1] # last row of B
a_prime = a[-1]
lhs = w*k
rhs = a_prime - w * x
if lhs[0] == 0:
verbose("Try difficult solve again with different random vector")
return solve_system_with_difficult_last_row(B, a)
alpha = rhs[0] / lhs[0]
z = x + alpha*k
return z
def add_column(B, H_B, a, proof):
"""
The add column procedure.
INPUT:
- B -- a square matrix (may be singular)
- H_B -- the Hermite normal form of B
- a -- an n x 1 matrix, where B has n rows
- proof -- bool; whether to prove result correct, in case we use fallback method.
OUTPUT:
- x -- a vector such that H' = H_B.augment(x) is the HNF of A = B.augment(a).
EXAMPLES::
sage: B = matrix(ZZ, 3, 3, [1,2,5, 0,-5,3, 1,1,2])
sage: H_B = B.echelon_form()
sage: a = matrix(ZZ, 3, 1, [1,8,-2])
sage: import sage.matrix.matrix_integer_dense_hnf as hnf
sage: x = hnf.add_column(B, H_B, a, True); x
[18]
[ 3]
[23]
sage: H_B.augment(x)
[ 1 0 17 18]
[ 0 1 3 3]
[ 0 0 18 23]
sage: B.augment(a).echelon_form()
[ 1 0 17 18]
[ 0 1 3 3]
[ 0 0 18 23]
"""
verbose('starting add_column')
if B.rank() < B.nrows():
return add_column_fallback(B, a, proof)
else:
z = solve_system_with_difficult_last_row(B, a)
zd, d = z._clear_denom()
x = H_B * zd
if d != 1:
for i in range(x.nrows()):
x[i, 0] = x[i, 0] / d
return x
def add_row(A, b, pivots, include_zero_rows):
"""
The add row procedure.
INPUT:
- A -- a matrix in Hermite normal form with n column
- b -- an n x 1 row matrix
- pivots -- sorted list of integers; the pivot positions of A.
OUTPUT:
- H -- the Hermite normal form of A.stack(b).
- new_pivots -- the pivot columns of H.
EXAMPLES::
sage: import sage.matrix.matrix_integer_dense_hnf as hnf
sage: A = matrix(ZZ, 2, 3, [-21, -7, 5, 1,20,-7])
sage: b = matrix(ZZ, 1,3, [-1,1,-1])
sage: hnf.add_row(A, b, A.pivots(), True)
(
[ 1 6 29]
[ 0 7 28]
[ 0 0 46], [0, 1, 2]
)
sage: A.stack(b).echelon_form()
[ 1 6 29]
[ 0 7 28]
[ 0 0 46]
"""
t = verbose('add hnf row')
H, pivs = A._add_row_and_maintain_echelon_form(b.row(0), pivots)
if include_zero_rows and H.nrows() != A.nrows() + 1:
H = H.matrix_from_rows(range(A.nrows() + 1))
verbose('finished add hnf row', t)
return H, pivs
def pivots_of_hnf_matrix(H):
"""
Return the pivot columns of a matrix H assumed to be in HNF.
INPUT:
- H -- a matrix that must be HNF
OUTPUT:
- list -- list of pivots
EXAMPLES::
sage: H = matrix(ZZ, 3, 5, [1, 0, 0, 45, -36, 0, 1, 0, 131, -107, 0, 0, 0, 178, -145]); H
[ 1 0 0 45 -36]
[ 0 1 0 131 -107]
[ 0 0 0 178 -145]
sage: import sage.matrix.matrix_integer_dense_hnf as matrix_integer_dense_hnf
sage: matrix_integer_dense_hnf.pivots_of_hnf_matrix(H)
[0, 1, 3]
"""
pivots = []
r = -1
for j in range(H.ncols()):
# Find first nonzero position (counting from bottom) in the j-th column
for i in reversed(range(H.nrows())):
if H[i, j]:
if i > r:
pivots.append(j)
r = i
else:
break
return pivots
def hnf_square(A, proof):
"""
INPUT:
- a nonsingular n x n matrix A over the integers.
OUTPUT:
- the Hermite normal form of A.
EXAMPLES::
sage: import sage.matrix.matrix_integer_dense_hnf as hnf
sage: A = matrix(ZZ, 3, [-21, -7, 5, 1,20,-7, -1,1,-1])
sage: hnf.hnf_square(A, False)
[ 1 6 29]
[ 0 7 28]
[ 0 0 46]
sage: A.echelon_form()
[ 1 6 29]
[ 0 7 28]
[ 0 0 46]
"""
n = A.nrows()
m = A.ncols()
if n != m:
raise ValueError("A must be square.")
# Small cases -- do not use this algorithm
if n <= 3:
return A.echelon_form(algorithm="pari")
if A.rank() < A.nrows():
raise ValueError("matrix must have full rank")
t = verbose("starting slicings")
B = A.matrix_from_rows(range(m-2)).matrix_from_columns(range(n-1))
c = A.matrix_from_rows([m-2]).matrix_from_columns(range(n-1))
d = A.matrix_from_rows([m-1]).matrix_from_columns(range(n-1))
b = A.matrix_from_columns([n-1]).matrix_from_rows(range(m-2))
verbose("done slicing", t)
try:
d1, d2 = double_det(B, c, d, proof=proof)
except (ValueError, ZeroDivisionError):
d1 = B.stack(c).det(proof=proof)
d2 = B.stack(d).det(proof=proof)
g, k, l = d1._xgcd(d2, minimal=True)
W = B.stack(k * c + l * d)
verbose("submatrix det: g=%s" % g)
CUTOFF = 2147483647 # 2^31-1
if g == 0:
# Big trouble -- matrix is not invertible
# Since we have no good conditioning code at present,
# in this case we just fall back to using pari.
H = W.echelon_form(algorithm='pari')
elif 2 * g > CUTOFF:
# Unlikely that g will be large on even slightly random input
# if it is, we fallback to the traditional algorithm.
# A nasty example is A = n*random_matrix(ZZ,m), where
# this algorithm gets killed. This is not random input though.
f = W.gcd()
g = g / (f**W.nrows())
if 2 * g <= CUTOFF:
verbose("Found common factor of %s -- dividing out; get new g = %s" % (f, g))
W0 = (W / f).change_ring(ZZ)
H = W0._hnf_mod(2 * g)
H *= f
else:
verbose("Falling back to PARI HNF since input matrix is ill conditioned for p-adic hnf algorithm.")
# We need more clever preconditioning?
# It is important to *not* just do the submatrix, since
# the whole rest of the algorithm will likely be very slow in
# weird cases where the det is large.
# E.g., matrix all of whose rows but 1 are multiplied by some
# fixed scalar n.
raise NotImplementedError("fallback to PARI!")
# H = W.hermite_form(algorithm='pari')
else:
H = W._hnf_mod(2 * g)
x = add_column(W, H, b.stack(matrix(1, 1,
[k*A[m-2,m-1] + l*A[m-1,m-1]])),
proof)
Hprime = H.augment(x)
pivots = pivots_of_hnf_matrix(Hprime)
Hprime, pivots = add_row(Hprime, A.matrix_from_rows([m - 2]),
pivots, include_zero_rows=False)
Hprime, pivots = add_row(Hprime, A.matrix_from_rows([m - 1]),
pivots, include_zero_rows=False)
return Hprime.matrix_from_rows(range(m))
def interleave_matrices(A, B, cols1, cols2):
"""
INPUT:
- A, B -- matrices with the same number of rows
- cols1, cols2 -- disjoint lists of integers
OUTPUT:
construct a new matrix C by sticking the columns
of A at the positions specified by cols1 and the
columns of B at the positions specified by cols2.
EXAMPLES::
sage: A = matrix(ZZ, 2, [1,2,3,4]); B = matrix(ZZ, 2, [-1,5,2,3])
sage: A
[1 2]
[3 4]
sage: B
[-1 5]
[ 2 3]
sage: import sage.matrix.matrix_integer_dense_hnf as hnf
sage: hnf.interleave_matrices(A, B, [1,3], [0,2])
[-1 1 5 2]
[ 2 3 3 4]
"""
D = A.augment(B)
w = cols1 + cols2
v = [w.index(i) for i in range(len(cols1) + len(cols2))]
return D.matrix_from_columns(v)
def probable_pivot_rows(A):
"""
Return rows of A that are very likely to be pivots.
This really finds the pivots of A modulo a random prime.
INPUT:
- A -- a matrix
OUTPUT:
a tuple of integers
EXAMPLES::
sage: import sage.matrix.matrix_integer_dense_hnf as matrix_integer_dense_hnf
sage: a = matrix(ZZ,3,[0, -1, -1, 0, -20, 1, 0, 1, 2])
sage: a
[ 0 -1 -1]
[ 0 -20 1]
[ 0 1 2]
sage: matrix_integer_dense_hnf.probable_pivot_rows(a)
(0, 1)
"""
return probable_pivot_columns(A.transpose())
def probable_pivot_columns(A):
"""
INPUT:
- A -- a matrix
OUTPUT:
a tuple of integers
EXAMPLES::
sage: import sage.matrix.matrix_integer_dense_hnf as matrix_integer_dense_hnf
sage: a = matrix(ZZ,3,[0, -1, -1, 0, -20, 1, 0, 1, 2])
sage: a
[ 0 -1 -1]
[ 0 -20 1]
[ 0 1 2]
sage: matrix_integer_dense_hnf.probable_pivot_columns(a)
(1, 2)
"""
p = ZZ.random_element(10007, 46000).next_prime()
return A._reduce(p).pivots()
def ones(H, pivots):
"""
Find all 1 pivot columns of the matrix H in Hermite form, along
with the corresponding rows, and also the non 1 pivot columns and
non-pivot rows. Here a 1 pivot column is a pivot column so that
the leading bottom entry is 1.
INPUT:
- H -- matrix in Hermite form
- pivots -- list of integers (all pivot positions of H).
OUTPUT:
4-tuple of integer lists: onecol, onerow, non_oneol, non_onerow
EXAMPLES::
sage: H = matrix(ZZ, 3, 5, [1, 0, 0, 45, -36, 0, 1, 0, 131, -107, 0, 0, 0, 178, -145]); H
[ 1 0 0 45 -36]
[ 0 1 0 131 -107]
[ 0 0 0 178 -145]
sage: import sage.matrix.matrix_integer_dense_hnf as matrix_integer_dense_hnf
sage: matrix_integer_dense_hnf.ones(H, [0,1,3])
([0, 1], [0, 1], [2], [2])
"""
# Find the "onecol" pivot columns of H, i.e., the columns
# that contain exactly one "1" entry and all other entries 0.
onecol = []
onerow = []
i = 0
for c in pivots:
if H[i, c] == 1:
onecol.append(c)
onerow.append(i)
i += 1
onecol_set = set(onecol)
non_onerow = [j for j in range(len(pivots)) if j not in onerow]
non_onecol = [j for j in range(H.ncols()) if j not in onecol_set][:len(non_onerow)]
return onecol, onerow, non_onecol, non_onerow
def extract_ones_data(H, pivots):
"""
Compute ones data and corresponding submatrices of H.
This is used to optimized the :func:`add_row` function.
INPUT:
- H -- a matrix in HNF
- pivots -- list of all pivot column positions of H
OUTPUT:
C, D, E, onecol, onerow, non_onecol, non_onerow
where onecol, onerow, non_onecol, non_onerow are as for
the ones function, and C, D, E are matrices:
- C -- submatrix of all non-onecol columns and onecol rows
- D -- all non-onecol columns and other rows
- E -- inverse of D
If D is not invertible or there are 0 or more than 2 non onecols,
then C, D, and E are set to None.
EXAMPLES::
sage: H = matrix(ZZ, 3, 4, [1, 0, 0, 7, 0, 1, 5, 2, 0, 0, 6, 6])
sage: import sage.matrix.matrix_integer_dense_hnf as matrix_integer_dense_hnf
sage: matrix_integer_dense_hnf.extract_ones_data(H, [0,1,2])
(
[0]
[5], [6], [1/6], [0, 1], [0, 1], [2], [2]
)
Here we get None's since the (2,2) position submatrix is not invertible.
sage: H = matrix(ZZ, 3, 5, [1, 0, 0, 45, -36, 0, 1, 0, 131, -107, 0, 0, 0, 178, -145]); H
[ 1 0 0 45 -36]
[ 0 1 0 131 -107]
[ 0 0 0 178 -145]
sage: import sage.matrix.matrix_integer_dense_hnf as matrix_integer_dense_hnf
sage: matrix_integer_dense_hnf.extract_ones_data(H, [0,1,3])
(None, None, None, [0, 1], [0, 1], [2], [2])
"""
onecol, onerow, non_onecol, non_onerow = ones(H, pivots)
verbose('extract_ones -- got submatrix of size %s' % len(non_onecol))
if len(non_onecol) in [1, 2]:
# Extract submatrix of all non-onecol columns and onecol rows
C = H.matrix_from_rows_and_columns(onerow, non_onecol)
# Extract submatrix of all non-onecol columns and other rows
D = H.matrix_from_rows_and_columns(non_onerow, non_onecol).transpose()
tt = verbose("extract ones -- INVERT %s x %s" % (len(non_onerow), len(non_onecol)), level=1)
try:
E = D**(-1)
except ZeroDivisionError:
C = D = E = None
verbose("done inverting", tt, level=1)
return C, D, E, onecol, onerow, non_onecol, non_onerow
else:
return None, None, None, onecol, onerow, non_onecol, non_onerow
def is_in_hnf_form(H, pivots):
"""
Return whether the matrix ``H`` is in Hermite normal form
with given pivot columns.
INPUT:
- ``H`` -- matrix
- ``pivots`` -- sorted list of integers
OUTPUT:
boolean
EXAMPLES::
sage: a = matrix(ZZ,3,5,[-2, -6, -3, -17, -1, 2, -1, -1, -2, -1, -2, -2, -6, 9, 2])
sage: import sage.matrix.matrix_integer_dense_hnf as matrix_integer_dense_hnf
sage: matrix_integer_dense_hnf.is_in_hnf_form(a,range(3))
False
sage: e = a.hermite_form(); p = a.pivots()
sage: matrix_integer_dense_hnf.is_in_hnf_form(e, p)
True
"""
tt = verbose('testing if matrix is in HNF')
r = 0
pivots_set = set(pivots)
for j in range(H.ncols()):
if j in pivots_set:
for i in range(r + 1, H.nrows()):
if H[i, j]:
verbose('not HNF because nonzeros below pivot position',tt)
return False
for i in range(r):
if H[i, j] < 0 or H[i, j] >= H[r, j]:
verbose('not HNF because negative or too big above pivot position',tt)
return False
r += 1
else:
for i in range(r, H.nrows()):
if H[i, j]:
verbose('not HNF nonzero in wrong place in nonpivot column',tt)
return False
verbose('done verifying in HNF -- yes', tt)
return True
def probable_hnf(A, include_zero_rows, proof):
"""
Return the HNF of A or raise an exception if something involving
the randomized nature of the algorithm goes wrong along the way.
Calling this function again a few times should result it in it
working, at least if proof=True.
INPUT:
- A -- a matrix
- include_zero_rows -- bool
- proof -- bool
OUTPUT:
the Hermite normal form of A.
cols -- pivot columns
EXAMPLES::
sage: a = matrix(ZZ,4,3,[-1, -1, -1, -20, 4, 1, -1, 1, 2,1,2,3])
sage: import sage.matrix.matrix_integer_dense_hnf as matrix_integer_dense_hnf
sage: matrix_integer_dense_hnf.probable_hnf(a, True, True)
(
[1 0 0]
[0 1 0]
[0 0 1]
[0 0 0], [0, 1, 2]
)
sage: matrix_integer_dense_hnf.probable_hnf(a, False, True)
(
[1 0 0]
[0 1 0]
[0 0 1], [0, 1, 2]
)
sage: matrix_integer_dense_hnf.probable_hnf(a, False, False)
(
[1 0 0]
[0 1 0]
[0 0 1], [0, 1, 2]
)
"""
# Find left-most full rank submatrix by working modulo a prime
rows = list(probable_pivot_rows(A))
B = A.matrix_from_rows(rows)
cols = list(probable_pivot_columns(B))
C = B.matrix_from_columns(cols)
# Now C is a submatrix of A that has full rank and is square.
# We compute the HNF of C, which is a square nonsingular matrix.
try:
H = hnf_square(C, proof=proof)
except NotImplementedError:
# raise
# this signals that we must fallback to PARI
verbose("generic random modular HNF algorithm failed -- we fall back to PARI")
H = A.hermite_form(algorithm='pari', include_zero_rows=include_zero_rows, proof=proof)
return H, H.pivots()
# The transformation matrix to HNF is the unique
# matrix U such that U * C = H, i.e., U = H*C^(-1).
if len(cols) < B.ncols():
# We compute the HNF of B by multiplying the matrix D
# got from the columns not in C by U:
# We want to compute X = U*D. But U = H*C^(-1),
# so X = U*D = H*C^(-1)*D.
# So C*H^(-1)*X = D
# find y s.t C*y = D
# H^(-1)*X = y ===> X = H*y
#
cols_set = set(cols)
cols2 = [i for i in range(B.ncols()) if i not in cols_set]
D = B.matrix_from_columns(cols2)
Y = C.solve_right(D)
H2 = H * Y
H2 = H2.change_ring(ZZ)
# The HNF of B is got by assembling together
# the matrices H and H2.
H = interleave_matrices(H, H2, cols, cols2)
pivots = pivots_of_hnf_matrix(H)
# Now H is the HNF of the matrix B.
# Finally we add all remaining rows of A to H using
# the add_row function.
C, D, E, onecol, onerow, non_onecol, non_onerow = extract_ones_data(H, cols)
if not proof and len(non_onecol) == 0:
# Identity matrix -- done
verbose("hnf -- got identity matrix -- early abort (0)")
if include_zero_rows:
H = pad_zeros(H, A.nrows())
return H, pivots
rows_set = set(rows)
for i in range(A.nrows()):
if i not in rows_set:
v = A.matrix_from_rows([i])
if v == 0:
continue
if E is None:
H, pivots = add_row(H, v, pivots, include_zero_rows=False)
C, D, E, onecol, onerow, non_onecol, non_onerow = extract_ones_data(H, pivots)
if not proof and len(non_onecol) == 0:
# Identity matrix -- done
verbose("hnf -- got identity matrix -- early abort (1)")
if include_zero_rows:
H = pad_zeros(H, A.nrows())
return H, pivots
else:
z = A.matrix_from_rows_and_columns([i], non_onecol)
w = A.matrix_from_rows_and_columns([i], onecol)
tt = verbose("checking denom (%s x %s)" % (D.nrows(),
D.ncols()))
Y = (z - w * C).transpose()
k = E * Y
verbose("done checking denom",tt)
if k.denominator() != 1:
H, pivots = add_row(H, v, pivots, include_zero_rows=False)
D = H.matrix_from_rows_and_columns(non_onerow, non_onecol).transpose()
nn = ones(H, pivots)
if not proof and len(nn[2]) == 0:
verbose("hnf -- got identity matrix -- early abort (2)")
if include_zero_rows:
H = pad_zeros(H, A.nrows())
return H, pivots
if include_zero_rows:
H = pad_zeros(H, A.nrows())
return H, pivots
def pad_zeros(A, nrows):
"""
Add zeros to the bottom of A so that the resulting matrix has nrows.
INPUT:
- A -- a matrix
- nrows -- an integer that is at least as big as the number of rows of A.
OUTPUT:
a matrix with nrows rows.
EXAMPLES::
sage: import sage.matrix.matrix_integer_dense_hnf as matrix_integer_dense_hnf
sage: a = matrix(ZZ, 2, 4, [1, 0, 0, 7, 0, 1, 5, 2])
sage: matrix_integer_dense_hnf.pad_zeros(a, 4)
[1 0 0 7]
[0 1 5 2]
[0 0 0 0]
[0 0 0 0]
sage: matrix_integer_dense_hnf.pad_zeros(a, 2)
[1 0 0 7]
[0 1 5 2]
"""
nz = nrows - A.nrows()
if nz == 0:
return A
if nz < 0:
return A.matrix_from_rows(range(nrows))
return A.stack(matrix(ZZ, nz, A.ncols()))
def hnf(A, include_zero_rows=True, proof=True):
"""
Return the Hermite Normal Form of a general integer matrix A,
along with the pivot columns.
INPUT:
- A -- an n x m matrix A over the integers.
- include_zero_rows -- bool (default: True) whether or not to include zero
rows in the output matrix
- proof -- whether or not to prove the result correct.
OUTPUT:
- matrix -- the Hermite normal form of A
- pivots -- the pivot column positions of A
EXAMPLES::
sage: import sage.matrix.matrix_integer_dense_hnf as matrix_integer_dense_hnf
sage: a = matrix(ZZ,3,5,[-2, -6, -3, -17, -1, 2, -1, -1, -2, -1, -2, -2, -6, 9, 2])
sage: matrix_integer_dense_hnf.hnf(a)
(
[ 2 0 26 -75 -10]
[ 0 1 27 -73 -9]
[ 0 0 37 -106 -13], [0, 1, 2]
)
sage: matrix_integer_dense_hnf.hnf(a.transpose())
(
[1 0 0]
[0 1 0]
[0 0 1]
[0 0 0]
[0 0 0], [0, 1, 2]
)
sage: matrix_integer_dense_hnf.hnf(a.transpose(), include_zero_rows=False)
(
[1 0 0]
[0 1 0]
[0 0 1], [0, 1, 2]
)
"""
if A.nrows() <= 1:
np = A.nonzero_positions()
if not np:
pivots = []
if not include_zero_rows:
A = A.new_matrix(0) # 0 rows
else:
i,j = np[0]
if A[i,j] < 0:
A = -A
pivots = [j]
return A, pivots
if not proof:
H, pivots = probable_hnf(A, include_zero_rows=include_zero_rows,
proof=False)
if not include_zero_rows and len(pivots) > H.nrows():
return H.matrix_from_rows(range(len(pivots))), pivots
while True:
H, pivots = probable_hnf(A, include_zero_rows=include_zero_rows,
proof=True)
if is_in_hnf_form(H, pivots):
if not include_zero_rows and len(pivots) > H.nrows():
H = H.matrix_from_rows(range(len(pivots)))
return H, pivots
verbose("After attempt the return matrix is not in HNF form since pivots must have been wrong. We try again.")
def hnf_with_transformation(A, proof=True):
"""
Compute the HNF H of A along with a transformation matrix U
such that U*A = H.
INPUT:
- A -- an n x m matrix A over the integers.
- proof -- whether or not to prove the result correct.
OUTPUT:
- matrix -- the Hermite normal form H of A
- U -- a unimodular matrix such that U * A = H
EXAMPLES::
sage: import sage.matrix.matrix_integer_dense_hnf as matrix_integer_dense_hnf
sage: A = matrix(ZZ, 2, [1, -5, -10, 1, 3, 197]); A
[ 1 -5 -10]
[ 1 3 197]
sage: H, U = matrix_integer_dense_hnf.hnf_with_transformation(A)
sage: H
[ 1 3 197]
[ 0 8 207]
sage: U
[ 0 1]
[-1 1]
sage: U*A
[ 1 3 197]
[ 0 8 207]
"""
# All we do is augment the input matrix with the identity matrix of the appropriate rank on the right.
C = A.augment(identity_matrix(ZZ, A.nrows()))
H, _ = hnf(C, include_zero_rows=True, proof=proof)
U = H.matrix_from_columns(range(A.ncols(), H.ncols()))
H2 = H.matrix_from_columns(range(A.ncols()))
return H2, U
def hnf_with_transformation_tests(n=10, m=5, trials=10):
"""
Use this to randomly test that hnf with transformation matrix
is working.
EXAMPLES::
sage: from sage.matrix.matrix_integer_dense_hnf import hnf_with_transformation_tests
sage: hnf_with_transformation_tests(n=15, m=10, trials=10)
0 1 2 3 4 5 6 7 8 9
"""
for i in range(trials):
print(i, end=" ")
A = random_matrix(ZZ, n, m)
H, U = hnf_with_transformation(A)
assert H == U * A
H, U = hnf_with_transformation(A, proof=False)
assert H == U * A
# -----------------------------------------------------------------------------
# Code for testing and benchmarking
# ------------------------------------------------------------------------------
def benchmark_hnf(nrange, bits=4):
"""
Run benchmark program.
EXAMPLES::
sage: import sage.matrix.matrix_integer_dense_hnf as hnf
sage: hnf.benchmark_hnf([50,100],32)
('sage', 50, 32, ...),
('sage', 100, 32, ...),
"""
b = 2**bits
for n in nrange:
a = random_matrix(ZZ, n, x=-b, y=b)
t = cputime()
h, _ = hnf(a, proof=False)
tm = cputime(t)
print('%s,' % (('sage', n, bits, tm),))
def benchmark_magma_hnf(nrange, bits=4):
"""
EXAMPLES::
sage: import sage.matrix.matrix_integer_dense_hnf as hnf
sage: hnf.benchmark_magma_hnf([50,100],32) # optional - magma
('magma', 50, 32, ...),
('magma', 100, 32, ...),
"""
from sage.interfaces.all import magma
b = 2**bits
for n in nrange:
a = magma('MatrixAlgebra(IntegerRing(),%s)![Random(%s,%s) : i in [1..%s]]' % (n, -b, b, n**2))
t = magma.cputime()
a.EchelonForm()
tm = magma.cputime(t)
print('%s,' % (('magma', n, bits, tm),))
def sanity_checks(times=50, n=8, m=5, proof=True, stabilize=2,
check_using_magma=True):
"""
Run random sanity checks on the modular p-adic HNF with tall and wide matrices
both dense and sparse.
INPUT:
- times -- number of times to randomly try matrices with each shape
- n -- number of rows
- m -- number of columns
- proof -- test with proof true
- stabilize -- parameter to pass to hnf algorithm when proof is False
- check_using_magma -- if True use Magma instead of PARI to check
correctness of computed HNF's. Since PARI's HNF is buggy and slow (as of
2008-02-16 non-pivot entries sometimes are not normalized to be
nonnegative) the default is Magma.
EXAMPLES::
sage: import sage.matrix.matrix_integer_dense_hnf as matrix_integer_dense_hnf
sage: matrix_integer_dense_hnf.sanity_checks(times=5, check_using_magma=False)
small 8 x 5
0 1 2 3 4 (done)
big 8 x 5
0 1 2 3 4 (done)
small 5 x 8
0 1 2 3 4 (done)
big 5 x 8
0 1 2 3 4 (done)
sparse 8 x 5
0 1 2 3 4 (done)
sparse 5 x 8
0 1 2 3 4 (done)
ill conditioned -- 1000*A -- 8 x 5
0 1 2 3 4 (done)
ill conditioned -- 1000*A but one row -- 8 x 5
0 1 2 3 4 (done)
"""
if check_using_magma:
from sage.interfaces.all import magma
def __do_check(v):
"""
This is used internally by the sanity check code.
"""
for i, a in enumerate(v):
global sanity
sanity = a
print(i, end=" ")
if check_using_magma:
if magma(hnf(a)[0]) != magma(a).EchelonForm():
print("bug computing hnf of a matrix")
print('a = matrix(ZZ, %s, %s, %s)' % (a.nrows(), a.ncols(),
a.list()))
return
else:
if hnf(a)[0] != a.echelon_form(algorithm='pari'):
print("bug computing hnf of a matrix")
print('a = matrix(ZZ, %s, %s, %s)' % (a.nrows(), a.ncols(),
a.list()))
return
print(" (done)")
print("small %s x %s" % (n, m))
__do_check([random_matrix(ZZ, n, m, x=-1, y=1) for _ in range(times)])
print("big %s x %s" % (n, m))
__do_check([random_matrix(ZZ, n, m, x=-2**32, y=2**32)
for _ in range(times)])
print("small %s x %s" % (m, n))
__do_check([random_matrix(ZZ, m, n, x=-1, y=1) for _ in range(times)])
print("big %s x %s" % (m, n))
__do_check([random_matrix(ZZ, m, n, x=-2**32,y=2**32)
for _ in range(times)])
print("sparse %s x %s" % (n, m))
__do_check([random_matrix(ZZ, n, m, density=0.1) for _ in range(times)])
print("sparse %s x %s" % (m, n))
__do_check([random_matrix(ZZ, m, n, density=0.1) for _ in range(times)])
print("ill conditioned -- 1000*A -- %s x %s" % (n, m))
__do_check([1000*random_matrix(ZZ, n, m, x=-1, y=1) for _ in range(times)])
print("ill conditioned -- 1000*A but one row -- %s x %s" % (n, m))
v = []
for _ in range(times):
a = 1000 * random_matrix(ZZ, n, m, x=-1, y=1)
a[a.nrows() - 1] = a[a.nrows() - 1] / 1000
v.append(a)
__do_check(v)
| 31.140673 | 119 | 0.555927 |
3e6a201ba832d4a7202125469af4a72190cfe25e
| 1,213 |
py
|
Python
|
day08.py
|
Matematik411/AdventOfCode2020
|
7af8e2240a7735b3a87777fad6f2dbe10d86ba16
|
[
"MIT"
] | 2 |
2020-12-01T21:03:19.000Z
|
2020-12-04T11:43:21.000Z
|
day08.py
|
Matematik411/AdventOfCode2020
|
7af8e2240a7735b3a87777fad6f2dbe10d86ba16
|
[
"MIT"
] | null | null | null |
day08.py
|
Matematik411/AdventOfCode2020
|
7af8e2240a7735b3a87777fad6f2dbe10d86ba16
|
[
"MIT"
] | null | null | null |
orders = []
while True:
try:
a = input().split()
orders.append([a[0], int(a[1])])
except:
break
def run_program():
acc = 0
visited = [0 for _ in range(len(orders))]
i = 0
finished = True
while True:
if i == len(orders):
print("Finished well!")
break
if visited[i]:
print("Infinite Loop!")
finished = False
break
visited[i] = 1
if orders[i][0] == "nop":
i += 1
elif orders[i][0] == "acc":
acc += orders[i][1]
i += 1
else:
i += orders[i][1]
return visited, acc, finished
#prvi del
vis_fst, acc, flag = run_program()
print(acc)
#drugi del
for i, x in enumerate(vis_fst):
if x == 1 and orders[i][0] == "jmp":
orders[i][0] = "nop"
_, acc, flag = run_program()
if flag:
print(acc)
break
else:
orders[i][0] = "jmp"
elif x == 1 and orders[i][0] == "nop":
orders[i][0] = "jmp"
_, acc, flag = run_program()
if flag:
print(acc)
break
else:
orders[i][0] = "nop"
| 20.913793 | 45 | 0.439406 |
9ae017dbb5422908eae75006fea16fa5597ddef2
| 3,174 |
py
|
Python
|
prob/mhdmodes3d/build.py
|
soumide1102/nubhlight
|
85046add8b7e2c1419538864eb54205d33078772
|
[
"BSD-3-Clause"
] | 16 |
2020-02-05T22:59:21.000Z
|
2022-03-18T11:05:37.000Z
|
prob/mhdmodes3d/build.py
|
soumide1102/nubhlight
|
85046add8b7e2c1419538864eb54205d33078772
|
[
"BSD-3-Clause"
] | 13 |
2020-03-06T02:10:48.000Z
|
2021-06-15T20:00:30.000Z
|
prob/mhdmodes3d/build.py
|
soumide1102/nubhlight
|
85046add8b7e2c1419538864eb54205d33078772
|
[
"BSD-3-Clause"
] | 4 |
2020-02-21T04:59:44.000Z
|
2020-12-10T21:42:12.000Z
|
################################################################################
# #
# 3D LINEAR RMHD MODES #
# #
################################################################################
import sys; sys.path.append('../../script/');
sys.dont_write_bytecode = True
import bhlight as bhl
PROB = 'mhdmodes3d'
MPI = '-mpi' in sys.argv
# Tabulated EOS stuff
GAMMA = 4./3.
TABLE = '-table' in sys.argv
EOS = "EOS_TYPE_TABLE" if TABLE else "EOS_TYPE_GAMMA"
NVAR_PASSIVE = 2 if TABLE else 0
M_UNIT = 1.
L_UNIT = 1.e-2
RHOMIN, RHOMAX, NRHO = 1e-7, 1e2, 234
UMIN, UMAX, NU = 1e-7, 1e2, 136
YEMIN, YEMAX, NYE = 0.0, 0.55, 50
CRASH_ON_SOUND_SPEED = False
if TABLE:
sys.path.append('../../script/analysis')
import make_tabulated_gamma as tab
tablepath = tab.make_filename(GAMMA)
units = tab.UnitSystem(M_UNIT, L_unit = L_UNIT)
tab.make_table_u(RHOMIN, RHOMAX, NRHO,
UMIN, UMAX, NU,
YEMIN, YEMAX, NYE,
units, GAMMA, tablepath,
CRASH_ON_SOUND_SPEED)
### COMPILE TIME PARAMETERS ###
# SPATIAL RESOLUTION AND MPI DECOMPOSITION
bhl.config.set_cparm('N1TOT', 64)
bhl.config.set_cparm('N2TOT', 64)
bhl.config.set_cparm('N3TOT', 64)
bhl.config.set_cparm('N1CPU', 1)
bhl.config.set_cparm('N2CPU', 1)
bhl.config.set_cparm('N3CPU', 1)
# OPENMP PARALLELIZATION
bhl.config.set_cparm('OPENMP', True)
# COORDINATES
bhl.config.set_cparm('METRIC', 'MINKOWSKI')
# FLUID
bhl.config.set_cparm('RECONSTRUCTION', 'WENO')
bhl.config.set_cparm('X1L_GAS_BOUND', 'BC_PERIODIC')
bhl.config.set_cparm('X1R_GAS_BOUND', 'BC_PERIODIC')
bhl.config.set_cparm('X2L_GAS_BOUND', 'BC_PERIODIC')
bhl.config.set_cparm('X2R_GAS_BOUND', 'BC_PERIODIC')
bhl.config.set_cparm('X3L_GAS_BOUND', 'BC_PERIODIC')
bhl.config.set_cparm('X3R_GAS_BOUND', 'BC_PERIODIC')
bhl.config.set_cparm('X1L_INFLOW', False)
bhl.config.set_cparm('X1R_INFLOW', False)
bhl.config.set_cparm('X2L_INFLOW', False)
bhl.config.set_cparm('X2R_INFLOW', False)
bhl.config.set_cparm('X3L_INFLOW', False)
bhl.config.set_cparm('X3R_INFLOW', False)
# EOS
bhl.config.set_cparm("EOS", EOS)
bhl.config.set_cparm('NVAR_PASSIVE', NVAR_PASSIVE)
### RUNTIME PARAMETERS ###
# TFINAL AND DTd ARE HARDCODED
bhl.config.set_rparm('tf', 'double', default = 5.)
bhl.config.set_rparm('dt', 'double', default = 1.e-6)
bhl.config.set_rparm('DTd', 'double', default = 5.e-1)
bhl.config.set_rparm('DTl', 'double', default = 5.e-1)
bhl.config.set_rparm('DTr', 'integer', default = 10000)
bhl.config.set_rparm('nmode', 'integer', default = 1)
#EOS
if TABLE:
bhl.config.set_rparm('eospath', 'string', default = tablepath)
bhl.config.set_rparm('L_unit', 'double', default = L_UNIT)
bhl.config.set_rparm('M_unit', 'double', default = M_UNIT)
else:
bhl.config.set_rparm('gam', 'double', default = GAMMA)
### CONFIGURE AND COMPILE ###
bhl.build(PROB)
| 34.5 | 80 | 0.596723 |
5935af385279c98416227b491606b200c7cb59cb
| 4,514 |
py
|
Python
|
90_Tower/python/tower.py
|
auryn31/basic-computer-games
|
85226984a6ac52daf82d5ab2d16d0c58c02ffaf5
|
[
"Unlicense"
] | null | null | null |
90_Tower/python/tower.py
|
auryn31/basic-computer-games
|
85226984a6ac52daf82d5ab2d16d0c58c02ffaf5
|
[
"Unlicense"
] | null | null | null |
90_Tower/python/tower.py
|
auryn31/basic-computer-games
|
85226984a6ac52daf82d5ab2d16d0c58c02ffaf5
|
[
"Unlicense"
] | null | null | null |
import sys
class Disk:
def __init__(self, size):
self.__size = size
def size(self):
return self.__size
def print(self):
print("[ %s ]" % self.size())
class Tower:
def __init__(self):
self.__disks = []
def empty(self):
return len(self.__disks) == 0
def top(self):
if self.empty():
return None
else:
return self.__disks[-1]
def add(self, disk):
if not self.empty():
t = self.top()
if disk.size() > t.size():
raise Exception(
"YOU CAN'T PLACE A LARGER DISK ON TOP OF A SMALLER ONE, IT MIGHT CRUSH IT!"
)
self.__disks.append(disk)
def pop(self):
if self.empty():
raise Exception("empty pop")
return self.__disks.pop()
def print(self):
r = "Needle: [%s]" % (", ".join([str(x.size()) for x in self.__disks]))
print(r)
print(
"""
IN THIS PROGRAM, WE SHALL REFER TO DISKS BY NUMERICAL CODE.
3 WILL REPRESENT THE SMALLEST DISK, 5 THE NEXT SIZE,
7 THE NEXT, AND SO ON, UP TO 15. IF YOU DO THE PUZZLE WITH
2 DISKS, THEIR CODE NAMES WOULD BE 13 AND 15. WITH 3 DISKS
THE CODE NAMES WOULD BE 11, 13 AND 15, ETC. THE NEEDLES
ARE NUMBERED FROM LEFT TO RIGHT, 1 TO 3. WE WILL
START WITH THE DISKS ON NEEDLE 1, AND ATTEMPT TO MOVE THEM
TO NEEDLE 3.
GOOD LUCK!
"""
)
class Game:
def __init__(self):
# use fewer sizes to make debugging easier
# self.__sizes = [3, 5, 7] # ,9,11,13,15]
self.__sizes = [3, 5, 7, 9, 11, 13, 15]
self.__sizes.sort()
self.__towers = []
self.__moves = 0
self.__towers = [Tower(), Tower(), Tower()]
self.__sizes.reverse()
for size in self.__sizes:
disk = Disk(size)
self.__towers[0].add(disk)
def winner(self):
return self.__towers[0].empty() and self.__towers[1].empty()
def print(self):
for t in self.__towers:
t.print()
def moves(self):
return self.__moves
def which_disk(self):
w = int(input("WHICH DISK WOULD YOU LIKE TO MOVE\n"))
if w in self.__sizes:
return w
else:
raise Exception()
def pick_disk(self):
which = None
while which is None:
try:
which = self.which_disk()
except Exception:
print("ILLEGAL ENTRY... YOU MAY ONLY TYPE 3,5,7,9,11,13, OR 15.\n")
valids = [t for t in self.__towers if t.top() and t.top().size() == which]
assert len(valids) in (0, 1)
if not valids:
print("THAT DISK IS BELOW ANOTHER ONE. MAKE ANOTHER CHOICE.\n")
return None
else:
assert valids[0].top().size() == which
return valids[0]
def which_tower(self):
try:
needle = int(input("PLACE DISK ON WHICH NEEDLE\n"))
tower = self.__towers[needle - 1]
except Exception:
print(
"I'LL ASSUME YOU HIT THE WRONG KEY THIS TIME. BUT WATCH IT,\nI ONLY ALLOW ONE MISTAKE.\n"
)
return None
else:
return tower
def take_turn(self):
from_tower = None
while from_tower is None:
from_tower = self.pick_disk()
to_tower = self.which_tower()
if not to_tower:
to_tower = self.which_tower()
if not to_tower:
print("I TRIED TO WARN YOU, BUT YOU WOULDN'T LISTEN.\nBYE BYE, BIG SHOT.\n")
sys.exit(0)
disk = from_tower.pop()
try:
to_tower.add(disk)
self.__moves += 1
except Exception as err:
print(err)
from_tower.add(disk)
game = Game()
while True:
game.print()
game.take_turn()
if game.winner():
print(
"CONGRATULATIONS!!\nYOU HAVE PERFORMED THE TASK IN %s MOVES.\n"
% game.moves()
)
while True:
yesno = input("TRY AGAIN (YES OR NO)\n")
if yesno.upper() == "YES":
game = Game()
break
elif yesno.upper() == "NO":
print("THANKS FOR THE GAME!\n")
sys.exit(0)
else:
print("'YES' OR 'NO' PLEASE\n")
elif game.moves() > 128:
print("SORRY, BUT I HAVE ORDERS TO STOP IF YOU MAKE MORE THAN 128 MOVES.")
sys.exit(0)
| 26.244186 | 106 | 0.525698 |
714d2b1a6d362701e70159b2349e03d0718a6ed1
| 18,863 |
py
|
Python
|
src/config/svc-monitor/svc_monitor/tests/test_ha_proxy.py
|
biswajit-mandal/contrail-controller
|
80c4a7e8515f7296b18ba4c21a439bd3daefcc4a
|
[
"Apache-2.0"
] | null | null | null |
src/config/svc-monitor/svc_monitor/tests/test_ha_proxy.py
|
biswajit-mandal/contrail-controller
|
80c4a7e8515f7296b18ba4c21a439bd3daefcc4a
|
[
"Apache-2.0"
] | null | null | null |
src/config/svc-monitor/svc_monitor/tests/test_ha_proxy.py
|
biswajit-mandal/contrail-controller
|
80c4a7e8515f7296b18ba4c21a439bd3daefcc4a
|
[
"Apache-2.0"
] | 1 |
2021-03-09T10:44:33.000Z
|
2021-03-09T10:44:33.000Z
|
import mock
from mock import patch
import unittest
from cfgm_common.vnc_db import DBBase
from svc_monitor import config_db
from svc_monitor import loadbalancer_agent
from vnc_api.vnc_api import *
import argparse
import ConfigParser
class HAProxyTest(unittest.TestCase):
def setUp(self):
self.vnc_lib = mock.Mock()
self.cassandra = mock.Mock()
self.logger = mock.Mock()
self.svc = mock.Mock()
self._si_pool = {}
mocked_gsc = mock.MagicMock()
mocked_gsc.uuid = 'fake-gsc-uuid'
self.vnc_lib.global_system_config_read.return_value = mocked_gsc
def no_id_side_effect(fq_name):
raise NoIdError("xxx")
# Return NoIdError while si is read for first time
self.vnc_lib.service_instance_read = \
mock.Mock(side_effect=no_id_side_effect)
self.vnc_lib.kv_retrieve.return_value = "fake-pool-vn 40.1.1.0/24"
self.vnc_lib.service_appliance_set_create.return_value = "opencontrail"
self.vnc_lib.service_appliance_set_read = \
mock.Mock(side_effect=no_id_side_effect)
self._store_si = {}
def read_si(obj_type, uuid):
return (True, [self.obj_to_dict(self._store_si[uuid[0]])])
def store_si_create(obj):
config_db.ServiceInstanceSM._cassandra.object_read = \
mock.Mock(side_effect=read_si)
obj.uuid = 'pool-si'
self._store_si[obj.uuid] = obj
def update_si_side_effect(obj):
self._store_si[obj.uuid] = obj
self.vnc_lib.service_instance_create = \
mock.Mock(side_effect=store_si_create)
self.vnc_lib.service_instance_update = \
mock.Mock(side_effect=update_si_side_effect)
self._db = {}
def read_db(id):
if id in self._db:
return self._db[id]
def put_db(id, data):
self._db[id] = data
def remove_db(id, data=None):
if data is None:
del self._db[id]
return
if self._db[id][data[0]]:
del self._db[id][data[0]]
self.cassandra.pool_driver_info_get = mock.Mock(side_effect=read_db)
self.cassandra.pool_driver_info_insert = mock.Mock(side_effect=put_db)
self.cassandra.pool_remove = mock.Mock(side_effect=remove_db)
def validate_pool_update(obj_type, obj_uuid, ref_type, ref_uuid,
ref_fq_name, operation):
self.assertEqual(obj_type, "loadbalancer-pool")
self.assertEqual(ref_type, "service_instance_refs")
pool = config_db.LoadbalancerPoolSM.get(obj_uuid)
if operation is "ADD":
si = config_db.ServiceInstanceSM.get(ref_uuid)
self.assertIsNotNone(si)
pool.service_instance = si.uuid
si.loadbalancer_pool = pool.uuid
self._si_pool[pool.uuid] = si.uuid
self.assertEqual(si.uuid, "pool-si")
elif operation is "DELETE":
pool.service_instance = None
del self._si_pool[pool.uuid]
else:
self.assertTrue(False)
return
self.vnc_lib.ref_update = mock.Mock(side_effect=validate_pool_update)
conf_parser = argparse.ArgumentParser(add_help=False)
config = ConfigParser.SafeConfigParser({'admin_token': None})
self._args, remaining_argv = conf_parser.parse_known_args()
self._args.config_sections = config
def sas_read_side_effect(obj_type, uuids):
if obj_type == 'service_appliance_set':
return (True, [{
'fq_name': ['default-global-system-config', 'opencontrail'],
'service_appliance_driver': 'svc_monitor.services.loadbalancer\
.drivers.ha_proxy.driver.OpencontrailLoadbalancerDriver'
}])
return (False, None)
DBBase.init(self.svc, None, self.cassandra)
config_db.ServiceApplianceSetSM._cassandra.object_read = \
mock.Mock(side_effect=sas_read_side_effect)
self.lb_agent = loadbalancer_agent.LoadbalancerAgent(self.svc, self.vnc_lib,
self.cassandra, self._args)
self.svc.loadbalancer_agent = self.lb_agent
sas = config_db.ServiceApplianceSetSM.get('opencontrail')
self.assertEqual(sas.driver,
"svc_monitor.services.loadbalancer.drivers.ha_proxy.driver.\
OpencontrailLoadbalancerDriver")
sas.add()
self.assertIsNotNone(self.lb_agent._loadbalancer_driver['opencontrail'])
mock_st_obj = self.create_lb_st()
# end setUp
def create_lb_st(self):
domain_name = 'default-domain'
domain_fq_name = [domain_name]
domain_obj = Domain()
domain_obj.uuid = 'fake-domain'
domain_obj.fq_name = domain_fq_name
svc_properties = ServiceTemplateType()
svc_properties.set_service_type("loadbalancer")
svc_properties.set_service_mode("in-network-nat")
svc_properties.set_service_virtualization_type("network-namespace")
svc_properties.set_image_name(None)
svc_properties.set_flavor(None)
svc_properties.set_ordered_interfaces(True)
svc_properties.set_service_scaling(True)
# set interface list
if_list = [['right', True], ['left', True]]
for itf in if_list:
if_type = ServiceTemplateInterfaceType(shared_ip=itf[1])
if_type.set_service_interface_type(itf[0])
svc_properties.add_interface_type(if_type)
st_obj = ServiceTemplate(name="haproxy-loadbalancer-template",
domain_obj=domain_obj)
st_obj.set_service_template_properties(svc_properties)
st_obj.uuid = 'haproxy-st'
st_dict = self.obj_to_dict(st_obj)
st_uuid = config_db.ServiceTemplateSM.locate(st_obj.uuid, st_dict)
return st_obj
# end
def tearDown(self):
config_db.ServiceApplianceSetSM.delete("opencontrail")
config_db.ServiceTemplateSM.delete('haproxy-st')
config_db.LoadbalancerPoolSM.reset()
config_db.VirtualIpSM.reset()
config_db.InstanceIpSM.reset()
config_db.VirtualMachineInterfaceSM.reset()
config_db.VirtualNetworkSM.reset()
config_db.ProjectSM.reset()
del self._store_si
# end tearDown
def create_pool(self, uuid, fq_name_str, project=None, vip=None, hm=None):
pool_network = self.create_vn("fake-pool-vn", "fake-pool-vn", project)
pool_obj = {}
pool_obj['fq_name'] = fq_name_str.split(':')
pool_obj['uuid'] = uuid
pool_obj['display_name'] = fq_name_str
pool_obj['parent_uuid'] = 'parent_uuid'
pool_obj['id_perms'] = {'enable': 'true', 'description': 'Test pool'}
pool_obj['loadbalancer_pool_provider'] = 'opencontrail'
pool_obj['loadbalancer_pool_properties'] = \
{'protocol': 'HTTP', 'subnet_id': 'subnet-id',
'loadbalancer_method': 'ROUND_ROBIN', 'admin_state': 'true',
'session_persistence': None, 'persistence_cookie_name': None}
if vip:
pool_obj['virtual_ip_back_refs']=[{'uuid': vip.uuid}]
if hm:
pool_obj['loadbalancer_healthmonitor_refs']=[{'uuid': hm.uuid}]
pool = config_db.LoadbalancerPoolSM.locate(pool_obj['uuid'], pool_obj)
return pool
# end create_pool
def create_hm_obj(self, fq_name_str):
hm_obj = {}
hm_obj['fq_name'] = fq_name_str.split(':')
hm_obj['fq_name'] = fq_name_str.split(':')
hm_obj['uuid'] = fq_name_str
hm_obj['display_name'] = fq_name_str
hm_obj['parent_uuid'] = 'parent_uuid'
hm_obj['id_perms'] = {'enable': 'true', 'description': 'Test pool'}
hm_obj['loadbalancer_healthmonitor_properties'] = {'delay': '5',
'expected_codes': '200',
'max_retries': '200',
'http_method': 'GET',
'timeout': '2',
'url_path': '/',
'monitor_type': 'HTTP',
'admin_state': 'true'}
return hm_obj
#end create_hm_obj
def create_hm(self, fq_name_str):
hm_obj = self.create_hm_obj(fq_name_str)
hm = config_db.HealthMonitorSM.locate(hm_obj['uuid'], hm_obj)
return hm
# end create_hm
def update_pool(self, pool_obj, vip=None):
pool_obj.params['loadbalancer_method'] = 'LEAST_CONNECTIONS'
pool_obj.params['protocol'] = 'HTTPS'
pool_obj.params['admin_state'] = 'false'
# end update_pool
def update_vip(self, vip_obj, pool=None):
vip_obj.params['connection_limit'] = '100'
vip_obj.params['persistence_type'] = 'always'
vip_obj.params['admin_state'] = 'false'
# end update_vip
def create_pool_members(self, pool_name, num_members):
for i in range(num_members):
self.create_pool_member(pool_name, 'member_'+str(i),
'10.1.1.'+str(i))
# end create_pool_members
def create_pool_member(self, pool_name, member_name, member_address):
pool_member_obj = {}
pool_member_obj['fq_name'] = member_name
pool_member_obj['uuid'] = member_name
pool_member_obj['display_name'] = member_name
pool_member_obj['parent_uuid'] = pool_name
pool_member_obj['id_perms'] = \
{'enable': 'true', 'description': 'Test pool member'}
pool_member_obj['loadbalancer_member_properties'] = \
{'protocol_port': '80', 'address': member_address,
'weight': '1', 'status': 'up', 'admin_state': 'true'}
member = config_db.LoadbalancerMemberSM.locate(pool_member_obj['uuid'],
pool_member_obj)
# end create_pool_member
def create_project(self, name, uuid):
project = Project(name=name, fq_name=["default-domain", name])
project.uuid = uuid
proj_dict = self.obj_to_dict(project)
config_db.ProjectSM.locate(uuid, proj_dict)
return project
# end create_project
def create_vn(self, name, uuid, parent_obj):
network = VirtualNetwork(name=name, parent_obj=parent_obj)
network.uuid = uuid
net_dict = self.obj_to_dict(network)
config_db.VirtualNetworkSM.locate(uuid, net_dict)
return network
# end create_vn
def obj_to_dict(self, obj):
def to_json(obj):
if hasattr(obj, 'serialize_to_json'):
return obj.serialize_to_json(obj.get_pending_updates())
else:
return dict((k, v) for k, v in obj.__dict__.iteritems())
return json.loads(json.dumps(obj, default=to_json))
# end obj_to_dict
def create_vmi(self, name, uuid, parent_obj, net_obj):
vmi = VirtualMachineInterface(name=name, parent_obj=parent_obj)
vmi.set_virtual_network(net_obj)
vmi.uuid = uuid
vmi_dict = self.obj_to_dict(vmi)
config_db.VirtualMachineInterfaceSM.locate(uuid, vmi_dict)
return vmi
# end create_vmi
def create_iip(self, name, uuid, ip, net_obj, vmi_obj):
iip = InstanceIp(name=name, instance_ip_address=ip,
instance_ip_family="v4")
iip.set_virtual_network(net_obj)
iip.set_virtual_machine_interface(vmi_obj)
iip.uuid = uuid
iip_dict = self.obj_to_dict(iip)
config_db.InstanceIpSM.locate(uuid, iip_dict)
return iip
# end create_iip
def create_vip(self, vip, project, vn, vmi, ip_addr):
vip_obj = {}
vip_obj['fq_name'] = vip.split(':')
vip_obj['uuid'] = vip
vip_obj['display_name'] = vip
vip_obj['id_perms'] = {'enable': 'true', 'description': 'Test pool'}
vip_obj['virtual_ip_properties'] = {'status': 'UP',
'protocol_port': '80',
'subnet_id': 'subnet_id',
'protocol': 'HTTP',
'admin_state': 'true',
'connection_limit': '-1',
'persistence_type': None,
'persistence_cookie_name': None,
'address': ip_addr}
network = self.create_vn(vn, vn, project)
vmi = self.create_vmi(vmi, vmi, project, network)
iip = self.create_iip(ip_addr, ip_addr, ip_addr, network, vmi)
vip_vnc = VirtualIp.from_dict(**vip_obj)
vip_vnc.set_virtual_machine_interface(vmi)
vip_obj = self.obj_to_dict(vip_vnc)
vip_obj['parent_uuid'] = project.uuid
vip = config_db.VirtualIpSM.locate(vip, vip_obj)
return vip
# end create_vip
def test_add_delete_pool_with_members_vip(self):
project = self.create_project("fake-project", "project")
vip = self.create_vip('vip', project, 'fake-vip-vn', 'vmi', '1.1.1.1')
pool = self.create_pool("test-lb-pool",
"default-domain:admin:test-lb-pool", project, vip)
self.create_pool_members("test-lb-pool", 5)
pool.add()
self.assertEqual(len(self._db), 1)
self.assertTrue('test-lb-pool' in self._db)
self.assertEqual(self._db['test-lb-pool']['service_instance'],
'pool-si')
self.assertEqual(len(self._si_pool), 1)
si_uuid = self._si_pool['test-lb-pool']
self.assertEqual(si_uuid, 'pool-si')
si = config_db.ServiceInstanceSM.get(si_uuid)
self.assertEqual(si.service_template, 'haproxy-st')
self.assertEqual(si.params['scale_out']['max_instances'], 2)
self.assertEqual(si.params['scale_out']['auto_scale'], False)
self.assertEqual(si.params['ha_mode'], 'active-standby')
self.assertEqual(si.params['interface_list'][0]['ip_address'],
'1.1.1.1')
self.assertEqual(si.params['interface_list'][0]['virtual_network'],
'default-domain:fake-project:fake-vip-vn')
self.assertEqual(si.params['interface_list'][1]['ip_address'], None)
self.assertEqual(si.params['interface_list'][1]['virtual_network'],
'default-domain:fake-project:fake-pool-vn')
# Cleanup
for i in range(5):
config_db.LoadbalancerMemberSM.delete('member_'+str(i))
config_db.LoadbalancerPoolSM.delete('test-lb-pool')
config_db.VirtualIpSM.delete('vip')
self.assertEqual(len(self._si_pool), 0)
self.assertEqual(len(config_db.ServiceInstanceSM._dict.keys()), 0)
# end test_add_delete_pool_with_members_vip
#
# In this test, update the vip on the pool
# Create a pool and vip
# Create a new vip and link it to the pool
# Expected result is the service instance is updated with new interface list
#
def test_update_vip(self):
project = self.create_project("fake-project", "project")
vip = self.create_vip('vip', project, 'fake-vip-vn', 'vmi', '1.1.1.1')
pool = self.create_pool("test-lb-pool",
"default-domain:admin:test-lb-pool", project, vip)
self.create_pool_members("test-lb-pool", 5)
pool.add()
self.assertEqual(len(self._db), 1)
self.assertTrue('test-lb-pool' in self._db)
self.assertEqual(self._db['test-lb-pool']['service_instance'],
'pool-si')
self.assertEqual(len(self._si_pool), 1)
si_uuid = self._si_pool['test-lb-pool']
self.assertEqual(si_uuid, 'pool-si')
si = config_db.ServiceInstanceSM.get(si_uuid)
self.assertEqual(si.service_template, 'haproxy-st')
self.assertEqual(si.params['scale_out']['max_instances'], 2)
self.assertEqual(si.params['scale_out']['auto_scale'], False)
self.assertEqual(si.params['ha_mode'], 'active-standby')
self.assertEqual(si.params['interface_list'][0]['ip_address'],
'1.1.1.1')
self.assertEqual(si.params['interface_list'][0]['virtual_network'],
'default-domain:fake-project:fake-vip-vn')
self.assertEqual(si.params['interface_list'][1]['ip_address'], None)
self.assertEqual(si.params['interface_list'][1]['virtual_network'],
'default-domain:fake-project:fake-pool-vn')
# Create a new vip
vip_new = self.create_vip('vip-new', project, 'fake-vip-vn-new', 'vmi-new', '99.1.1.1')
pool = config_db.LoadbalancerPoolSM.get('test-lb-pool')
# Link it to the pool created before
pool.virtual_ip = vip_new.uuid
vip_new.loadbalancer_pool = pool.uuid
def read_si_side_effect(id):
return self._store_si[id]
# Return the stored SI data
self.vnc_lib.service_instance_read = \
mock.Mock(side_effect=read_si_side_effect)
pool.add()
self.assertEqual(len(self._db), 1)
self.assertTrue('test-lb-pool' in self._db)
self.assertEqual(self._db['test-lb-pool']['service_instance'],
'pool-si')
self.assertEqual(len(self._si_pool), 1)
si_uuid = self._si_pool['test-lb-pool']
self.assertEqual(si_uuid, 'pool-si')
si = config_db.ServiceInstanceSM.get(si_uuid)
self.assertEqual(si.params['scale_out']['max_instances'], 2)
self.assertEqual(si.params['scale_out']['auto_scale'], False)
self.assertEqual(si.params['ha_mode'], 'active-standby')
self.assertEqual(si.params['interface_list'][0]['ip_address'],
'99.1.1.1')
self.assertEqual(si.params['interface_list'][0]['virtual_network'],
'default-domain:fake-project:fake-vip-vn-new')
self.assertEqual(si.params['interface_list'][1]['ip_address'], None)
self.assertEqual(si.params['interface_list'][1]['virtual_network'],
'default-domain:fake-project:fake-pool-vn')
# Cleanup
for i in range(5):
config_db.LoadbalancerMemberSM.delete('member_'+str(i))
config_db.LoadbalancerPoolSM.delete('test-lb-pool')
config_db.VirtualIpSM.delete('vip')
self.assertEqual(len(self._si_pool), 0)
self.assertEqual(len(config_db.ServiceInstanceSM._dict.keys()), 0)
# end test_update_vip
#end HAProxyTest(unittest.TestCase):
| 44.279343 | 95 | 0.60791 |
d7c3f58abfb0bddabdcb04e60dec779fcb76895a
| 2,646 |
py
|
Python
|
tools/trendx_wrapper.py
|
michaelpdu/adversary_ml
|
6adec09ae5b8e30b9b58df7e8d9476793b4228bf
|
[
"Apache-2.0"
] | 1 |
2021-09-24T19:30:44.000Z
|
2021-09-24T19:30:44.000Z
|
tools/trendx_wrapper.py
|
michaelpdu/adversarial_ml
|
6adec09ae5b8e30b9b58df7e8d9476793b4228bf
|
[
"Apache-2.0"
] | null | null | null |
tools/trendx_wrapper.py
|
michaelpdu/adversarial_ml
|
6adec09ae5b8e30b9b58df7e8d9476793b4228bf
|
[
"Apache-2.0"
] | 1 |
2021-09-24T19:30:49.000Z
|
2021-09-24T19:30:49.000Z
|
import os
from ml_tool_interface import *
from trendx_tool.run import *
from housecallx import *
class TrendXWrapper(MLToolInterface):
""""""
def __init__(self, config):
self.config_ = config
self.hcx_path_ = ''
def set_hcx(self, hcx_path):
self.hcx_path_ = hcx_path
def scan_pe_file(self, sample_path):
"""
Return Value
(decision, probability)
"""
raise NotImplementedError("TrendXWrapper.scan_pe_file does not implemented!")
def scan_pe_dir(self, sample_dir):
"""
Return Value
{
file_path: (decision, probability),
file_path: (decision, probability),
...
}
"""
return_scores = {}
scores = scan_by_housecallx(self.hcx_path_, sample_dir)
for key, value in scores.items():
return_scores[os.path.join(sample_dir, key)] = value
return return_scores
def scan_pe_list(self, sample_list):
"""
Return Value
{
file_path: (decision, probability),
file_path: (decision, probability),
...
}
"""
raise NotImplementedError("TrendXWrapper.scan_pe_list does not implemented!")
def scan_script_file(self, sample_path):
"""
Return Value
(decision, probability)
"""
m = JSModel()
prob = m.predictfile(sample_path)
decision = 0
if prob < 0.5:
decision = 1
return (decision, 1-prob)
def scan_script_dir(self, sample_dir):
"""
Return Value
{
file_path: (decision, probability),
file_path: (decision, probability),
...
}
"""
result = {}
for root, dirs, files in os.walk(sample_dir):
for name in files:
file_path = os.path.abspath(os.path.join(root, name))
result[file_path] = self.scan_script_file(file_path)
return result
def scan_script_list(self, sample_list):
"""
Return Value
{
file_path: (decision, probability),
file_path: (decision, probability),
...
}
"""
result = {}
for sample_path in sample_list:
sample_path = os.path.abspath(sample_path)
result[sample_path] = self.scan_script_file(sample_path)
return result
| 29.4 | 86 | 0.513228 |
6d23b83aa7d8f9a8a9ec1d8275c2811d312e1d43
| 923 |
py
|
Python
|
setup.py
|
eaybek/getthat
|
3ca34902f773ec6a40a1df0b7dac5845a22cc8e4
|
[
"MIT"
] | null | null | null |
setup.py
|
eaybek/getthat
|
3ca34902f773ec6a40a1df0b7dac5845a22cc8e4
|
[
"MIT"
] | null | null | null |
setup.py
|
eaybek/getthat
|
3ca34902f773ec6a40a1df0b7dac5845a22cc8e4
|
[
"MIT"
] | null | null | null |
import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
long_description += "```python3"
with open("sample/sample.py", "r") as fh:
long_description += fh.read()
long_description += "```"
setuptools.setup(
name="getthat",
version="0.0.2",
author="Erdem Aybek",
author_email="[email protected]",
description=" ".join(
[
"getthat i dont care if i don't have it.\n"
"(pip3 install if modulenotfound)"
]
),
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/eaybek/getthat",
packages=setuptools.find_packages(),
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: POSIX :: Linux",
"Development Status :: 1 - Planning",
],
python_requires=">=3.6",
)
| 27.147059 | 55 | 0.611051 |
08a8fada0fc755a8c5323bfd881a3b4f89e3ed30
| 1,100 |
py
|
Python
|
docker-manager-flask/app/libs/opreation_excel.py
|
sssbase/-
|
e4e7105cf0405d493c2ad316c27e2f72ca2c98be
|
[
"MIT"
] | 82 |
2019-11-11T09:36:12.000Z
|
2022-03-23T09:08:31.000Z
|
docker-manager-flask/app/libs/opreation_excel.py
|
sssbase/-
|
e4e7105cf0405d493c2ad316c27e2f72ca2c98be
|
[
"MIT"
] | 28 |
2019-10-10T14:02:48.000Z
|
2022-03-25T19:01:07.000Z
|
docker-manager-flask/app/libs/opreation_excel.py
|
sssbase/-
|
e4e7105cf0405d493c2ad316c27e2f72ca2c98be
|
[
"MIT"
] | 22 |
2019-11-15T01:29:03.000Z
|
2021-12-10T13:33:00.000Z
|
"""
@Time : 2018/8/29 19:08
@Author : 郭家兴
@Email : [email protected]
@File : opreation_excel.py
@Desc :操作excel
"""
import xlrd
from xlutils.copy import copy
class OperationExcel:
def __init__(self,file_name):
self.workbook = self.get_workbook(file_name)
def get_workbook(self,file_name):
workbook = xlrd.open_workbook(file_name)
return workbook
def get_table(self,workbook,sheet_name=None,sheet_id=0):
table = workbook.sheets()[sheet_id]
if sheet_name:
table = workbook.sheet_by_name(sheet_name)
return table
def get_rows(self,table):
rows = table.nrows
return rows
def get_cols(self,table):
cols = table.ncols
return cols
def get_cell_value(self,table,x,y):
cell_value = table.cell_value(x, y)
return cell_value
def write_execel(self,workbook,sheetid,row,col,value):
workbook_copy = copy(workbook)
sheet_write = workbook_copy.get_sheet(sheetid)
sheet_write.write(row,col,value)
workbook_copy.save(self.file_name)
| 26.829268 | 60 | 0.659091 |
d7df8f848929e249f972b059a8e9436eccd08ca1
| 17,241 |
py
|
Python
|
functest/utils/openstack_clean.py
|
hashnfv/hashnfv-functest
|
ff34df7ec7be6cd5fcf0f7557b393bd5d6266047
|
[
"Apache-2.0"
] | null | null | null |
functest/utils/openstack_clean.py
|
hashnfv/hashnfv-functest
|
ff34df7ec7be6cd5fcf0f7557b393bd5d6266047
|
[
"Apache-2.0"
] | null | null | null |
functest/utils/openstack_clean.py
|
hashnfv/hashnfv-functest
|
ff34df7ec7be6cd5fcf0f7557b393bd5d6266047
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
#
# Description:
# Cleans possible leftovers after running functest tests:
# - Nova instances
# - Glance images
# - Cinder volumes
# - Floating IPs
# - Neutron networks, subnets and ports
# - Routers
# - Users and tenants
# - Tacker VNFDs and VNFs
# - Tacker SFCs and SFC classifiers
#
# Author:
# [email protected]
#
#
# All rights reserved. This program and the accompanying materials
# are made available under the terms of the Apache License, Version 2.0
# which accompanies this distribution, and is available at
# http://www.apache.org/licenses/LICENSE-2.0
#
import logging
import time
import yaml
import functest.utils.openstack_utils as os_utils
from functest.utils.constants import CONST
logger = logging.getLogger(__name__)
OS_SNAPSHOT_FILE = CONST.openstack_snapshot_file
def separator():
logger.debug("-------------------------------------------")
def remove_instances(nova_client, default_instances):
logger.debug("Removing Nova instances...")
instances = os_utils.get_instances(nova_client)
if instances is None or len(instances) == 0:
logger.debug("No instances found.")
return
for instance in instances:
instance_name = getattr(instance, 'name')
instance_id = getattr(instance, 'id')
instance_status = getattr(instance, 'status')
instance_state = getattr(instance, 'OS-EXT-STS:task_state')
logger.debug("'%s', ID=%s " % (instance_name, instance_id))
if (instance_id not in default_instances and
instance_name not in default_instances.values() and
instance_status != 'DELETED' and
(instance_status != 'BUILD' or instance_state != 'deleting')):
logger.debug("Removing instance '%s' ..." % instance_id)
if os_utils.delete_instance(nova_client, instance_id):
logger.debug(" > Request sent.")
else:
logger.error("There has been a problem removing the "
"instance %s..." % instance_id)
else:
logger.debug(" > this is a default instance and will "
"NOT be deleted.")
timeout = 50
while timeout > 0:
instances = os_utils.get_instances(nova_client)
for instance in instances:
instance_id = getattr(instance, 'id')
if instance_id not in default_instances:
logger.debug("Waiting for instances to be terminated...")
timeout -= 1
time.sleep(1)
continue
break
def remove_images(glance_client, default_images):
logger.debug("Removing Glance images...")
images = os_utils.get_images(glance_client)
if images is None:
return -1
images = {image.id: image.name for image in images}
if len(images) == 0:
logger.debug("No images found.")
return
for image in images:
image_id = image
image_name = images.get(image_id)
logger.debug("'%s', ID=%s " % (image_name, image_id))
if (image_id not in default_images and
image_name not in default_images.values()):
logger.debug("Removing image '%s', ID=%s ..."
% (image_name, image_id))
if os_utils.delete_glance_image(glance_client, image_id):
logger.debug(" > Done!")
else:
logger.error("There has been a problem removing the"
"image %s..." % image_id)
else:
logger.debug(" > this is a default image and will "
"NOT be deleted.")
def remove_volumes(cinder_client, default_volumes):
logger.debug("Removing Cinder volumes...")
volumes = os_utils.get_volumes(cinder_client)
if volumes is None or len(volumes) == 0:
logger.debug("No volumes found.")
return
for volume in volumes:
volume_id = getattr(volume, 'id')
volume_name = getattr(volume, 'name')
logger.debug("'%s', ID=%s " % (volume_name, volume_id))
if (volume_id not in default_volumes and
volume_name not in default_volumes.values()):
logger.debug("Removing cinder volume %s ..." % volume_id)
if os_utils.delete_volume(cinder_client, volume_id):
logger.debug(" > Done!")
else:
logger.debug("Trying forced removal...")
if os_utils.delete_volume(cinder_client,
volume_id,
forced=True):
logger.debug(" > Done!")
else:
logger.error("There has been a problem removing the "
"volume %s..." % volume_id)
else:
logger.debug(" > this is a default volume and will "
"NOT be deleted.")
def remove_floatingips(neutron_client, default_floatingips):
logger.debug("Removing floating IPs...")
floatingips = os_utils.get_floating_ips(neutron_client)
if floatingips is None or len(floatingips) == 0:
logger.debug("No floating IPs found.")
return
init_len = len(floatingips)
deleted = 0
for fip in floatingips:
fip_id = fip['id']
fip_ip = fip['floating_ip_address']
logger.debug("'%s', ID=%s " % (fip_ip, fip_id))
if (fip_id not in default_floatingips and
fip_ip not in default_floatingips.values()):
logger.debug("Removing floating IP %s ..." % fip_id)
if os_utils.delete_floating_ip(neutron_client, fip_id):
logger.debug(" > Done!")
deleted += 1
else:
logger.error("There has been a problem removing the "
"floating IP %s..." % fip_id)
else:
logger.debug(" > this is a default floating IP and will "
"NOT be deleted.")
timeout = 50
while timeout > 0:
floatingips = os_utils.get_floating_ips(neutron_client)
if floatingips is None or len(floatingips) == (init_len - deleted):
break
else:
logger.debug("Waiting for floating ips to be released...")
timeout -= 1
time.sleep(1)
def remove_networks(neutron_client, default_networks, default_routers):
logger.debug("Removing Neutron objects")
network_ids = []
networks = os_utils.get_network_list(neutron_client)
if networks is None:
logger.debug("There are no networks in the deployment. ")
else:
logger.debug("Existing networks:")
for network in networks:
net_id = network['id']
net_name = network['name']
logger.debug(" '%s', ID=%s " % (net_name, net_id))
if (net_id in default_networks and
net_name in default_networks.values()):
logger.debug(" > this is a default network and will "
"NOT be deleted.")
elif network['router:external'] is True:
logger.debug(" > this is an external network and will "
"NOT be deleted.")
else:
logger.debug(" > this network will be deleted.")
network_ids.append(net_id)
# delete ports
ports = os_utils.get_port_list(neutron_client)
if ports is None:
logger.debug("There are no ports in the deployment. ")
else:
remove_ports(neutron_client, ports, network_ids)
# remove routers
routers = os_utils.get_router_list(neutron_client)
if routers is None:
logger.debug("There are no routers in the deployment. ")
else:
remove_routers(neutron_client, routers, default_routers)
# trozet: wait for Neutron to auto-cleanup HA networks when HA router is
# deleted
time.sleep(5)
# remove networks
if network_ids is not None:
for net_id in network_ids:
networks = os_utils.get_network_list(neutron_client)
if networks is None:
logger.debug("No networks left to remove")
break
elif not any(network['id'] == net_id for network in networks):
logger.debug("Network %s has already been removed" % net_id)
continue
logger.debug("Removing network %s ..." % net_id)
if os_utils.delete_neutron_net(neutron_client, net_id):
logger.debug(" > Done!")
else:
logger.error("There has been a problem removing the "
"network %s..." % net_id)
def remove_ports(neutron_client, ports, network_ids):
for port in ports:
if port['network_id'] in network_ids:
port_id = port['id']
try:
subnet_id = port['fixed_ips'][0]['subnet_id']
except:
logger.debug(" > WARNING: Port %s does not contain fixed_ips"
% port_id)
logger.info(port)
router_id = port['device_id']
if len(port['fixed_ips']) == 0 and router_id == '':
logger.debug("Removing port %s ..." % port_id)
if (os_utils.delete_neutron_port(neutron_client, port_id)):
logger.debug(" > Done!")
else:
logger.error("There has been a problem removing the "
"port %s ..." % port_id)
force_remove_port(neutron_client, port_id)
elif port['device_owner'] == 'network:router_interface':
logger.debug("Detaching port %s (subnet %s) from router %s ..."
% (port_id, subnet_id, router_id))
if os_utils.remove_interface_router(
neutron_client, router_id, subnet_id):
time.sleep(5) # leave 5 seconds to detach
logger.debug(" > Done!")
else:
logger.error("There has been a problem removing the "
"interface %s from router %s..."
% (subnet_id, router_id))
force_remove_port(neutron_client, port_id)
else:
force_remove_port(neutron_client, port_id)
def force_remove_port(neutron_client, port_id):
logger.debug("Clearing device_owner for port %s ..." % port_id)
os_utils.update_neutron_port(neutron_client, port_id,
device_owner='clear')
logger.debug("Removing port %s ..." % port_id)
if os_utils.delete_neutron_port(neutron_client, port_id):
logger.debug(" > Done!")
else:
logger.error("There has been a problem removing the port %s..."
% port_id)
def remove_routers(neutron_client, routers, default_routers):
for router in routers:
router_id = router['id']
router_name = router['name']
if (router_id not in default_routers and
router_name not in default_routers.values()):
logger.debug("Checking '%s' with ID=(%s) ..." % (router_name,
router_id))
if router['external_gateway_info'] is not None:
logger.debug("Router has gateway to external network."
"Removing link...")
if os_utils.remove_gateway_router(neutron_client, router_id):
logger.debug(" > Done!")
else:
logger.error("There has been a problem removing "
"the gateway...")
else:
logger.debug("Router is not connected to anything."
"Ready to remove...")
logger.debug("Removing router %s(%s) ..."
% (router_name, router_id))
if os_utils.delete_neutron_router(neutron_client, router_id):
logger.debug(" > Done!")
else:
logger.error("There has been a problem removing the "
"router '%s'(%s)..." % (router_name, router_id))
def remove_security_groups(neutron_client, default_security_groups):
logger.debug("Removing Security groups...")
secgroups = os_utils.get_security_groups(neutron_client)
if secgroups is None or len(secgroups) == 0:
logger.debug("No security groups found.")
return
for secgroup in secgroups:
secgroup_name = secgroup['name']
secgroup_id = secgroup['id']
logger.debug("'%s', ID=%s " % (secgroup_name, secgroup_id))
if secgroup_id not in default_security_groups:
logger.debug(" Removing '%s'..." % secgroup_name)
if os_utils.delete_security_group(neutron_client, secgroup_id):
logger.debug(" > Done!")
else:
logger.error("There has been a problem removing the "
"security group %s..." % secgroup_id)
else:
logger.debug(" > this is a default security group and will NOT "
"be deleted.")
def remove_users(keystone_client, default_users):
logger.debug("Removing Users...")
users = os_utils.get_users(keystone_client)
if users is None:
logger.debug("There are no users in the deployment. ")
return
for user in users:
user_name = getattr(user, 'name')
user_id = getattr(user, 'id')
logger.debug("'%s', ID=%s " % (user_name, user_id))
if (user_id not in default_users and
user_name not in default_users.values()):
logger.debug(" Removing '%s'..." % user_name)
if os_utils.delete_user(keystone_client, user_id):
logger.debug(" > Done!")
else:
logger.error("There has been a problem removing the "
"user '%s'(%s)..." % (user_name, user_id))
else:
logger.debug(" > this is a default user and will "
"NOT be deleted.")
def remove_tenants(keystone_client, default_tenants):
logger.debug("Removing Tenants...")
tenants = os_utils.get_tenants(keystone_client)
if tenants is None:
logger.debug("There are no tenants in the deployment. ")
return
for tenant in tenants:
tenant_name = getattr(tenant, 'name')
tenant_id = getattr(tenant, 'id')
logger.debug("'%s', ID=%s " % (tenant_name, tenant_id))
if (tenant_id not in default_tenants and
tenant_name not in default_tenants.values()):
logger.debug(" Removing '%s'..." % tenant_name)
if os_utils.delete_tenant(keystone_client, tenant_id):
logger.debug(" > Done!")
else:
logger.error("There has been a problem removing the "
"tenant '%s'(%s)..." % (tenant_name, tenant_id))
else:
logger.debug(" > this is a default tenant and will "
"NOT be deleted.")
def main():
logging.basicConfig()
logger.info("Cleaning OpenStack resources...")
nova_client = os_utils.get_nova_client()
neutron_client = os_utils.get_neutron_client()
keystone_client = os_utils.get_keystone_client()
cinder_client = os_utils.get_cinder_client()
glance_client = os_utils.get_glance_client()
try:
with open(OS_SNAPSHOT_FILE) as f:
snapshot_yaml = yaml.safe_load(f)
except Exception:
logger.info("The file %s does not exist. The OpenStack snapshot must"
" be created first. Aborting cleanup." % OS_SNAPSHOT_FILE)
return 0
default_images = snapshot_yaml.get('images')
default_instances = snapshot_yaml.get('instances')
default_volumes = snapshot_yaml.get('volumes')
default_networks = snapshot_yaml.get('networks')
default_routers = snapshot_yaml.get('routers')
default_security_groups = snapshot_yaml.get('secgroups')
default_floatingips = snapshot_yaml.get('floatingips')
default_users = snapshot_yaml.get('users')
default_tenants = snapshot_yaml.get('tenants')
if not os_utils.check_credentials():
logger.error("Please source the openrc credentials and run "
"the script again.")
return -1
remove_instances(nova_client, default_instances)
separator()
remove_images(glance_client, default_images)
separator()
remove_volumes(cinder_client, default_volumes)
separator()
remove_floatingips(neutron_client, default_floatingips)
separator()
remove_networks(neutron_client, default_networks, default_routers)
separator()
remove_security_groups(neutron_client, default_security_groups)
separator()
remove_users(keystone_client, default_users)
separator()
remove_tenants(keystone_client, default_tenants)
separator()
return 0
| 39.817552 | 79 | 0.579723 |
574942eb0b7b73017fa1738b1f0ae7cfa0c9943a
| 7,635 |
py
|
Python
|
Exerc01.py
|
Numbess/PO-2019-IFCE-Prof.Ronaldo-CT0101
|
4be0bbfedbd4369a6608c994d0a53b45f9b870e1
|
[
"Apache-2.0"
] | null | null | null |
Exerc01.py
|
Numbess/PO-2019-IFCE-Prof.Ronaldo-CT0101
|
4be0bbfedbd4369a6608c994d0a53b45f9b870e1
|
[
"Apache-2.0"
] | null | null | null |
Exerc01.py
|
Numbess/PO-2019-IFCE-Prof.Ronaldo-CT0101
|
4be0bbfedbd4369a6608c994d0a53b45f9b870e1
|
[
"Apache-2.0"
] | null | null | null |
#ATIVIDADE PEDIDA: (Prof.Ronaldo)#######################################################################
#CT0101 – Primeira contribuição da primeira etapa #
#Implementa o algoritmo bubblesort e imprimir os seguintes gráficos: #
# #
# *Tamanho da lista de números x Tempo para ordenar pelo método #
# *Tamanho da lista x Quantidade de operações (swap) #
# #
#As listas geradas devem ser de números aleatórios dos seguintes tamanhos: 10000, 20000, 50000, 100000.#
########################################################################################################
#"Importação das devidas bibliotecas ;)"
import timeit
from random import randint
import matplotlib as mpl
import matplotlib.pyplot as plt
###############################################################################{
#"Declarações iniciais..."
mpl.use('Agg')
plt.style.use('ggplot')
mpl.rc('lines', linewidth=2)
#plt.style.use('dark_background') #<-Faz o plano de fundo ser escuro!(opcional)
###############################################################################}
#"Função geradora de lista, sendo o comprimento desta definido como parâmetro de entrada, de elementos aleatórios"
#Implementação do professor#################################################
def geraLista(tam): #
lista = [] #
for i in range(tam): #
n = randint(1,1*tam) #
if n not in lista: lista.append(n) #
return lista #
############################################################################
#"Função responsável pela criação de gráfico(x,y) para estudo do desempenho de algoritmos"
#Implementação do professor##############################################################################
def desenhaGrafico(x,y, file_name, label, file_title, line_color,xl = "<Entradas/>", yl = "<Saídas/>"): #
fig = plt.figure(figsize=(10, 8)) #
ax = fig.add_subplot(111) #
ax.plot(x,y, color=line_color,label = label) #
plt.stem(x, y, linefmt='b:',markerfmt='C3o',use_line_collection=True) #
ax.legend(bbox_to_anchor=(1, 1),bbox_transform=plt.gcf().transFigure) #
plt.ylabel(yl) #
plt.xlabel(xl) #
plt.title(file_title) #
fig.savefig(file_name) #
#########################################################################################################
#"Função Bubble Sort(Sem o uso da variável 'flag')"
#Implementação do aluno#####################################################
def bubbleSort(lista): #
#
num_iteracoes = 0 #
#
for i in range(0, len(lista)-1): #
#
for j in range(0, len(lista)-1-i): #
#
if lista[ j ] > lista[j + 1]: #
#
lista[j], lista[j + 1] = lista[j + 1], lista[j] #
num_iteracoes += 1 #
#
return num_iteracoes #
############################################################################
#"Função que ordena um número determinado(em função da entrada) de valores gerados aleatoriamente e retorna os devidos gráficos comparativos"
#Implementação do aluno##################################################################################################################################
def cria_Graficos(lista_entrada): #
#
tempos_orden = list() #
numer_iteracoes = list() #
#
for i in lista_entrada: #
#
lista = geraLista(i) #
tempos_orden.append(timeit.timeit("bubbleSort({})".format(lista),setup="from __main__ import bubbleSort",number=1)) #
numer_iteracoes.append(bubbleSort(lista)) #
#
desenhaGrafico(lista_entrada,tempos_orden, "Grafico(Tamanho_Lista-X-Tempo_Ordenacoes).png", "Tempo",'Tamanho_Lista X Tempo_Ordenacoes','yellow') # #
desenhaGrafico(lista_entrada, numer_iteracoes, "Grafico(Tamanho-X-Numero_Iteracoes).png", "Numero_Iteracao",'Tamanho_Lista X Numero_Iteracoes','lime')#
#########################################################################################################################################################
#Inicialização da aplicação:
#########################################
lista_teste = [10000,20000,50000,100000]#
cria_Graficos(lista_teste) #
#########################################
#############################
################
| 81.223404 | 173 | 0.26706 |
b99a529f2ec283eefce459ffb904f148642b5e3d
| 81 |
py
|
Python
|
shell/Outputs/header.py
|
vasco2016/shellsploit-framework
|
04eb4a0449acaba0b70c40a78c61a0d5e2527406
|
[
"MIT"
] | 61 |
2017-06-13T13:48:38.000Z
|
2022-03-02T17:43:45.000Z
|
shell/Outputs/header.py
|
T0mcat3r/shellsploit-framework
|
04eb4a0449acaba0b70c40a78c61a0d5e2527406
|
[
"MIT"
] | null | null | null |
shell/Outputs/header.py
|
T0mcat3r/shellsploit-framework
|
04eb4a0449acaba0b70c40a78c61a0d5e2527406
|
[
"MIT"
] | 28 |
2017-08-15T05:38:27.000Z
|
2020-12-31T03:39:38.000Z
|
from time import strftime
from .logger import logs
from binascii import unhexlify
| 27 | 30 | 0.851852 |
594aa0437688437b03d9337b91563de102fd9a2a
| 10,091 |
py
|
Python
|
tests/unit/test_module_names.py
|
johnj/salt
|
b23656fa5ee24047c43ac702d6796a700570f749
|
[
"Apache-2.0"
] | null | null | null |
tests/unit/test_module_names.py
|
johnj/salt
|
b23656fa5ee24047c43ac702d6796a700570f749
|
[
"Apache-2.0"
] | null | null | null |
tests/unit/test_module_names.py
|
johnj/salt
|
b23656fa5ee24047c43ac702d6796a700570f749
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
'''
tests.unit.test_test_module_name
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
'''
# Import Python libs
from __future__ import absolute_import
import fnmatch
import os
# Import Salt libs
import salt.utils.path
import salt.utils.stringutils
# Import Salt Testing libs
from tests.support.unit import TestCase
from tests.support.paths import CODE_DIR, list_test_mods
EXCLUDED_DIRS = [
os.path.join('tests', 'pkg'),
os.path.join('tests', 'perf'),
os.path.join('tests', 'support'),
os.path.join('tests', 'unit', 'utils', 'cache_mods'),
os.path.join('tests', 'unit', 'modules', 'inspectlib'),
os.path.join('tests', 'unit', 'modules', 'zypp'),
os.path.join('tests', 'unit', 'templates', 'files'),
os.path.join('tests', 'integration', 'files'),
os.path.join('tests', 'unit', 'files'),
os.path.join('tests', 'integration', 'cloud', 'helpers'),
os.path.join('tests', 'kitchen', 'tests'),
]
INCLUDED_DIRS = [
os.path.join('tests', 'kitchen', 'tests', '*', 'tests', '*'),
]
EXCLUDED_FILES = [
os.path.join('tests', 'eventlisten.py'),
os.path.join('tests', 'buildpackage.py'),
os.path.join('tests', 'saltsh.py'),
os.path.join('tests', 'minionswarm.py'),
os.path.join('tests', 'wheeltest.py'),
os.path.join('tests', 'runtests.py'),
os.path.join('tests', 'jenkins.py'),
os.path.join('tests', 'salt-tcpdump.py'),
os.path.join('tests', 'conftest.py'),
os.path.join('tests', 'packdump.py'),
os.path.join('tests', 'consist.py'),
os.path.join('tests', 'modparser.py'),
os.path.join('tests', 'virtualname.py'),
os.path.join('tests', 'committer_parser.py'),
os.path.join('tests', 'zypp_plugin.py'),
os.path.join('tests', 'tox-helper.py'),
os.path.join('tests', 'unit', 'transport', 'mixins.py'),
os.path.join('tests', 'integration', 'utils', 'testprogram.py'),
]
class BadTestModuleNamesTestCase(TestCase):
'''
Unit test case for testing bad names for test modules
'''
maxDiff = None
def _match_dirs(self, reldir, matchdirs):
return any(fnmatch.fnmatchcase(reldir, mdir) for mdir in matchdirs)
def test_module_name(self):
'''
Make sure all test modules conform to the test_*.py naming scheme
'''
excluded_dirs, included_dirs = tuple(EXCLUDED_DIRS), tuple(INCLUDED_DIRS)
tests_dir = os.path.join(CODE_DIR, 'tests')
bad_names = []
for root, _, files in salt.utils.path.os_walk(tests_dir):
reldir = os.path.relpath(root, CODE_DIR)
if (reldir.startswith(excluded_dirs) and not self._match_dirs(reldir, included_dirs)) \
or reldir.endswith('__pycache__'):
continue
for fname in files:
if fname == '__init__.py' or not fname.endswith('.py'):
continue
relpath = os.path.join(reldir, fname)
if relpath in EXCLUDED_FILES:
continue
if not fname.startswith('test_'):
bad_names.append(relpath)
error_msg = '\n\nPlease rename the following files:\n'
for path in bad_names:
directory, filename = path.rsplit(os.sep, 1)
filename, _ = os.path.splitext(filename)
error_msg += ' {} -> {}/test_{}.py\n'.format(path, directory, filename.split('_test')[0])
error_msg += '\nIf you believe one of the entries above should be ignored, please add it to either\n'
error_msg += '\'EXCLUDED_DIRS\' or \'EXCLUDED_FILES\' in \'tests/unit/test_module_names.py\'.\n'
error_msg += 'If it is a tests module, then please rename as suggested.'
self.assertEqual([], bad_names, error_msg)
def test_module_name_source_match(self):
'''
Check all the test mods and check if they correspond to actual files in
the codebase. If this test fails, then a test module is likely not
named correctly, and should be adjusted.
If a test module doesn't have a natural name match (as does this very
file), then its should be included in the "ignore" tuple below.
However, if there is no matching source code file, then you should
consider mapping it to files manually via tests/filename_map.yml.
'''
ignore = (
'unit.test_doc',
'unit.test_mock',
'unit.test_module_names',
'unit.test_virtualname',
'unit.test_simple',
'unit.test_zypp_plugins',
'unit.test_proxy_minion',
'unit.cache.test_cache',
'unit.serializers.test_serializers',
'unit.states.test_postgres',
'integration.cli.test_custom_module',
'integration.cli.test_grains',
'integration.client.test_kwarg',
'integration.client.test_runner',
'integration.client.test_standard',
'integration.client.test_syndic',
'integration.cloud.test_cloud',
'integration.doc.test_man',
'integration.externalapi.test_venafiapi',
'integration.grains.test_custom',
'integration.loader.test_ext_grains',
'integration.loader.test_ext_modules',
'integration.logging.test_jid_logging',
'integration.master.test_event_return',
'integration.minion.test_blackout',
'integration.minion.test_pillar',
'integration.minion.test_executor',
'integration.minion.test_timeout',
'integration.modules.test_decorators',
'integration.modules.test_pkg',
'integration.modules.test_state_jinja_filters',
'integration.modules.test_sysctl',
'integration.netapi.test_client',
'integration.netapi.rest_tornado.test_app',
'integration.netapi.rest_cherrypy.test_app_pam',
'integration.output.test_output',
'integration.pillar.test_pillar_include',
'integration.proxy.test_shell',
'integration.proxy.test_simple',
'integration.reactor.test_reactor',
'integration.returners.test_noop_return',
'integration.runners.test_runner_returns',
'integration.scheduler.test_error',
'integration.scheduler.test_eval',
'integration.scheduler.test_postpone',
'integration.scheduler.test_skip',
'integration.scheduler.test_maxrunning',
'integration.scheduler.test_helpers',
'integration.scheduler.test_run_job',
'integration.shell.test_spm',
'integration.shell.test_cp',
'integration.shell.test_syndic',
'integration.shell.test_proxy',
'integration.shell.test_auth',
'integration.shell.test_call',
'integration.shell.test_arguments',
'integration.shell.test_matcher',
'integration.shell.test_master_tops',
'integration.shell.test_saltcli',
'integration.shell.test_master',
'integration.shell.test_key',
'integration.shell.test_runner',
'integration.shell.test_cloud',
'integration.shell.test_enabled',
'integration.shell.test_minion',
'integration.spm.test_build',
'integration.spm.test_files',
'integration.spm.test_info',
'integration.spm.test_install',
'integration.spm.test_remove',
'integration.spm.test_repo',
'integration.ssh.test_deploy',
'integration.ssh.test_grains',
'integration.ssh.test_jinja_filters',
'integration.ssh.test_master',
'integration.ssh.test_mine',
'integration.ssh.test_pillar',
'integration.ssh.test_raw',
'integration.ssh.test_state',
'integration.states.test_compiler',
'integration.states.test_handle_error',
'integration.states.test_handle_iorder',
'integration.states.test_match',
'integration.states.test_renderers',
'integration.wheel.test_client',
'multimaster.minion.test_event',
)
errors = []
def _format_errors(errors):
msg = (
'The following {0} test module(s) could not be matched to a '
'source code file:\n\n'.format(len(errors))
)
msg += ''.join(errors)
return msg
for mod_name in list_test_mods():
if mod_name in ignore:
# Test module is being ignored, skip it
continue
# Separate the test_foo away from the rest of the mod name, because
# we'll need to remove the "test_" from the beginning and add .py
stem, flower = mod_name.rsplit('.', 1)
# Lop off the integration/unit from the beginning of the mod name
try:
stem = stem.split('.', 1)[1]
except IndexError:
# This test mod was in the root of the unit/integration dir
stem = ''
# The path from the root of the repo
relpath = salt.utils.path.join(
'salt',
stem.replace('.', os.sep),
'.'.join((flower[5:], 'py')))
# The full path to the file we expect to find
abspath = salt.utils.path.join(CODE_DIR, relpath)
if not os.path.isfile(abspath):
# Maybe this is in a dunder init?
alt_relpath = salt.utils.path.join(relpath[:-3], '__init__.py')
alt_abspath = salt.utils.path.join(abspath[:-3], '__init__.py')
if os.path.isfile(alt_abspath):
# Yep, it is. Carry on!
continue
errors.append(
'{0} (expected: {1})\n'.format(mod_name, relpath)
)
assert not errors, _format_errors(errors)
| 41.020325 | 109 | 0.591319 |
95810a8a6d79ab1e78dd1756556e382ad21b677a
| 1,502 |
py
|
Python
|
main.py
|
mavroudisv/Mahalanobis-Classifier
|
9029b2d84215afd02d8ccdbe3be7ea875b83deb6
|
[
"MIT"
] | 1 |
2021-01-12T19:12:06.000Z
|
2021-01-12T19:12:06.000Z
|
main.py
|
mavroudisv/Mahalanobis-Classifier
|
9029b2d84215afd02d8ccdbe3be7ea875b83deb6
|
[
"MIT"
] | null | null | null |
main.py
|
mavroudisv/Mahalanobis-Classifier
|
9029b2d84215afd02d8ccdbe3be7ea875b83deb6
|
[
"MIT"
] | null | null | null |
'''
Multiclass classifier based on Mahalanobis distance (https://en.wikipedia.org/wiki/Mahalanobis_distance)
Vasilios Mavroudis, 2019
Generalized version of the binary classifier shown here: https://www.machinelearningplus.com/statistics/mahalanobis-distance/
'''
from sklearn.model_selection import train_test_split
import pandas as pd
import numpy as np
from classifiers import MahalanobisClassifier
from sklearn.metrics import classification_report, accuracy_score, roc_auc_score, confusion_matrix
# Load and preprocess data
df = pd.read_csv('https://raw.githubusercontent.com/jbrownlee/Datasets/master/wheat-seeds.csv', header=None)
#df.dropna(inplace=True) # Drop missing values.
# Split data
samples = df.iloc[:,0:7]
labels = df.iloc[:,7]
unique_labels = np.unique(labels) #To return the correct labels when predicting
samples, new_samples, labels, new_labels = train_test_split(samples, labels, test_size=0.5, random_state=100)
# "Training"
clf = MahalanobisClassifier(samples, labels)
# Predicting
pred_probs = clf.predict_probability(new_samples)
pred_class = clf.predict_class(new_samples,unique_labels)
pred_actuals = pd.DataFrame([(pred, act) for pred, act in zip(pred_class, new_labels)], columns=['pred', 'true'])
#print(pred_actuals[:25])
truth = pred_actuals.loc[:, 'true']
pred = pred_actuals.loc[:, 'pred']
scores = np.array(pred_probs)[:, 1]
print('\nAccuracy Score: ', accuracy_score(truth, pred))
print('\nClassification Report: \n', classification_report(truth, pred))
| 36.634146 | 125 | 0.783622 |
c8249209a6e2e7486b12dd2df96acb9220199e8d
| 436 |
py
|
Python
|
HLTrigger/Configuration/python/HLT_75e33/modules/hlt3PFPuppiCentralJet45MaxEta2p4_cfi.py
|
PKUfudawei/cmssw
|
8fbb5ce74398269c8a32956d7c7943766770c093
|
[
"Apache-2.0"
] | 1 |
2021-11-30T16:24:46.000Z
|
2021-11-30T16:24:46.000Z
|
HLTrigger/Configuration/python/HLT_75e33/modules/hlt3PFPuppiCentralJet45MaxEta2p4_cfi.py
|
PKUfudawei/cmssw
|
8fbb5ce74398269c8a32956d7c7943766770c093
|
[
"Apache-2.0"
] | 4 |
2021-11-29T13:57:56.000Z
|
2022-03-29T06:28:36.000Z
|
HLTrigger/Configuration/python/HLT_75e33/modules/hlt3PFPuppiCentralJet45MaxEta2p4_cfi.py
|
PKUfudawei/cmssw
|
8fbb5ce74398269c8a32956d7c7943766770c093
|
[
"Apache-2.0"
] | 1 |
2021-11-30T16:16:05.000Z
|
2021-11-30T16:16:05.000Z
|
import FWCore.ParameterSet.Config as cms
hlt3PFPuppiCentralJet45MaxEta2p4 = cms.EDFilter("HLT1PFJet",
MaxEta = cms.double(2.4),
MaxMass = cms.double(-1.0),
MinE = cms.double(-1.0),
MinEta = cms.double(-2.4),
MinMass = cms.double(-1.0),
MinN = cms.int32(3),
MinPt = cms.double(45.0),
inputTag = cms.InputTag("hltAK4PFPuppiJetsCorrected"),
saveTags = cms.bool(True),
triggerType = cms.int32(86)
)
| 29.066667 | 60 | 0.658257 |
b3788fe070f5e4376ef9a185eb0a476706765258
| 8,203 |
py
|
Python
|
DQM/SiPixelCommon/test/client_template_cfg.py
|
ckamtsikis/cmssw
|
ea19fe642bb7537cbf58451dcf73aa5fd1b66250
|
[
"Apache-2.0"
] | 852 |
2015-01-11T21:03:51.000Z
|
2022-03-25T21:14:00.000Z
|
DQM/SiPixelCommon/test/client_template_cfg.py
|
ckamtsikis/cmssw
|
ea19fe642bb7537cbf58451dcf73aa5fd1b66250
|
[
"Apache-2.0"
] | 30,371 |
2015-01-02T00:14:40.000Z
|
2022-03-31T23:26:05.000Z
|
DQM/SiPixelCommon/test/client_template_cfg.py
|
ckamtsikis/cmssw
|
ea19fe642bb7537cbf58451dcf73aa5fd1b66250
|
[
"Apache-2.0"
] | 3,240 |
2015-01-02T05:53:18.000Z
|
2022-03-31T17:24:21.000Z
|
import FWCore.ParameterSet.Config as cms
from DQMServices.Core.DQMEDHarvester import DQMEDHarvester
process = cms.Process("SIPIXELDQM")
# load all appropriate modules:
# get alignment conditions needed for geometry:
process.load("Geometry.TrackerGeometryBuilder.idealForDigiTrackerGeometry_cff")
process.load("Geometry.TrackerSimData.trackerSimGeometryXML_cfi")
process.load("Geometry.TrackerGeometryBuilder.trackerGeometry_cfi")
process.load("Geometry.TrackerNumberingBuilder.trackerNumberingGeometry_cfi")
process.load("Configuration.StandardSequences.MagneticField_cff")
process.load("EventFilter.SiPixelRawToDigi.SiPixelRawToDigi_cfi")
process.load("RecoLocalTracker.SiPixelRecHits.PixelCPEESProducers_cff")
process.load("RecoLocalTracker.SiPixelRecHits.SiPixelRecHits_cfi")
process.load("RecoLocalTracker.SiPixelClusterizer.SiPixelClusterizer_cfi")
process.load("DQM.SiPixelMonitorRawData.SiPixelMonitorRawData_cfi")
process.load("DQM.SiPixelMonitorDigi.SiPixelMonitorDigi_cfi")
process.load("DQM.SiPixelMonitorCluster.SiPixelMonitorCluster_cfi")
process.load("DQM.SiPixelMonitorRecHit.SiPixelMonitorRecHit_cfi")
process.load("DQMServices.Core.DQM_cfg")
process.load("DQMServices.Components.DQMEnvironment_cfi")
process.load("CalibTracker.SiPixelTools.SiPixelErrorsCalibDigis_cfi")
process.load("CalibTracker.SiPixelGainCalibration.SiPixelCalibDigiProducer_cfi")
process.load("CalibTracker.SiPixelSCurveCalibration.SiPixelSCurveCalibrationAnalysis_cfi")
process.load("CalibTracker.SiPixelIsAliveCalibration.SiPixelIsAliveCalibration_cfi")
process.load("CalibTracker.SiPixelGainCalibration.SiPixelGainCalibrationAnalysis_cfi")
# get the global tag with all cabling maps, alignment info, etc.
process.load("Configuration.StandardSequences.FrontierConditions_GlobalTag_noesprefer_cff")
process.GlobalTag.connect = "frontier://FrontierProd/CMS_COND_21X_GLOBALTAG"
process.GlobalTag.globaltag = "GTAG"
# and access the calibration information:
CALIBfrom CondCore.DBCommon.CondDBCommon_cfi import *
#
CALIBprocess.siPixelCalibGlobalTag = cms.ESSource("PoolDBESSource",
CALIB CondDBCommon,
CALIB connect = cms.string("oracle://cms_orcoff_prep/CMS_COND_PIXEL_COMM_21X"),
CALIB globaltag = cms.string("PIXELCALIB_01::TypeGLOBALCALIB"),
CALIB BlobStreamerName = cms.untracked.string("TBufferBlobStreamingService")
CALIB )
CALIBprocess.siPixelCalibGlobalTag.DBParameters.authenticationPath = "/afs/cern.ch/cms/DB/conddb"
process.esprefer_dbcalib = cms.ESPrefer("PoolDBESSource","GlobalTag")
# this is needed by the gain calibration analyzer
process.load("CondTools.SiPixel.SiPixelGainCalibrationService_cfi")
# the input file source
process.source = cms.Source("PoolSource",
debugFlag = cms.untracked.bool(True),
debugVebosity = cms.untracked.uint32(1),
ONEPARAM
TWOPARAM
fileNames = cms.untracked.vstring('FILENAME')
)
process.maxEvents = cms.untracked.PSet(
# input = cms.untracked.int32(-1)
input = cms.untracked.int32(-1)
)
# message logger
process.MessageLogger = cms.Service("MessageLogger",
debugModules = cms.untracked.vstring('siPixelDigis',
'SiPixelRawDataErrorSource',
'SiPixelCalibProducer',
'SiPixelDigiSource',
'SiPixelClusterSource',
'SiPixelRecHitSource',
'sipixelEDAClient'),
TEXTFILE = cms.untracked.PSet(threshold = cms.untracked.string('ERROR')
),
# destinations = cms.untracked.vstring('TEXTFILE')
)
process.AdaptorConfig = cms.Service("AdaptorConfig")
# DQM modules:
process.sipixelEDAClient = DQMEDHarvester("SiPixelEDAClient",
FileSaveFrequency = cms.untracked.int32(50),
StaticUpdateFrequency = cms.untracked.int32(10)
)
from DQMServices.Core.DQMQualityTester import DQMQualityTester
process.qTester = DQMQualityTester(
qtList = cms.untracked.FileInPath('DQM/SiPixelMonitorClient/test/sipixel_qualitytest_config.xml'),
QualityTestPrescaler = cms.untracked.int32(1),
getQualityTestsFromFile = cms.untracked.bool(True)
)
process.ModuleWebRegistry = cms.Service("ModuleWebRegistry")
# define all paths and sequences:
process.Digis = cms.Sequence(process.siPixelDigis)
process.Clusters = cms.Sequence(process.siPixelClusters)
process.Calibration = cms.Sequence(process.siPixelCalibDigis*process.siPixelErrorsDigisToCalibDigis*process.siPixelGainCalibrationAnalysis*process.siPixelIsAliveCalibration*process.siPixelSCurveAnalysis)
process.RAWmonitor = cms.Sequence(process.SiPixelRawDataErrorSource)
process.DIGImonitor = cms.Sequence(process.SiPixelDigiSource)
process.CLUmonitor = cms.Sequence(process.SiPixelClusterSource)
process.HITmonitor = cms.Sequence(process.SiPixelRecHitSource)
process.DQMmodules = cms.Sequence(process.qTester*process.dqmEnv*process.dqmSaver)
process.p = cms.Path(process.DQMmodules*DIGISPOTCLUSPOTRECSPOTCDSPOTSCURVESPOTGAINSPOTPIXELSPOTRAWMONSPOTDIGMONSPOTCLUMONSPOTRECMONSPOTprocess.sipixelEDAClient)
# choose one of these two:
# online-style DQM (runs RECO)
# offline-style DQM (reco in input file)
#process.p = cms.Path(process.DQMmodules*process.DIGImonitor*process.sipixelEDAClient)
# list of replace statements
process.siPixelDigis.InputLabel = 'source'
process.siPixelDigis.IncludeErrors = True
process.SiPixelRawDataErrorSource.saveFile = False
process.SiPixelRawDataErrorSource.isPIB = False
process.SiPixelRawDataErrorSource.slowDown = False
process.SiPixelDigiSource.saveFile = False
process.SiPixelDigiSource.isPIB = False
process.SiPixelDigiSource.slowDown = False
process.SiPixelDigiSource.modOn = True
process.SiPixelDigiSource.ladOn = False
process.SiPixelDigiSource.layOn = False
process.SiPixelDigiSource.phiOn = False
process.SiPixelDigiSource.bladeOn = False
process.SiPixelDigiSource.diskOn = False
process.SiPixelDigiSource.ringOn = False
process.SiPixelClusterSource.saveFile = False
process.SiPixelClusterSource.modOn = True
process.SiPixelClusterSource.ladOn = False
process.SiPixelClusterSource.layOn = False
process.SiPixelClusterSource.phiOn = False
process.SiPixelClusterSource.bladeOn = False
process.SiPixelClusterSource.diskOn = False
process.SiPixelClusterSource.ringOn = False
process.SiPixelRecHitSource.saveFile = False
process.SiPixelRecHitSource.modOn = True
process.SiPixelRecHitSource.ladOn = False
process.SiPixelRecHitSource.layOn = False
process.SiPixelRecHitSource.phiOn = False
process.SiPixelRecHitSource.bladeOn = False
process.SiPixelRecHitSource.ringOn = False
process.SiPixelRecHitSource.diskOn = False
process.DQM.collectorHost = ''
process.dqmSaver.convention = 'Online'
process.dqmSaver.producer = 'DQM'
process.dqmEnv.subSystemFolder = 'Pixel'
process.dqmSaver.dirName = '.'
process.dqmSaver.saveByLumiSection = -1
process.dqmSaver.saveByRun = 1
process.dqmSaver.saveAtJobEnd = True
process.siPixelIsAliveCalibration.DetSetVectorSiPixelCalibDigiTag = 'siPixelCalibDigis'
process.siPixelSCurveAnalysis.DetSetVectorSiPixelCalibDigiTag = 'siPixelCalibDigis'
process.siPixelGainCalibrationAnalysis.DetSetVectorSiPixelCalibDigiTag = 'siPixelCalibDigis'
process.siPixelErrorsDigisToCalibDigis.SiPixelProducerLabelTag = 'siPixelCalibDigis'
process.siPixelIsAliveCalibration.saveFile = False
process.siPixelGainCalibrationAnalysis.saveFile = False
process.siPixelSCurveAnalysis.saveFile = False
process.siPixelErrorsDigisToCalibDigis.saveFile=False
| 50.325153 | 203 | 0.739851 |
f4b17047a8676107c9df6468353b956832a909c9
| 6,948 |
py
|
Python
|
davisputnam/tests/test_parse.py
|
Bram-Hub/DP-Visuals
|
43d9c7f17bbe7b54b8528ebcbcce421727103e7c
|
[
"MIT"
] | null | null | null |
davisputnam/tests/test_parse.py
|
Bram-Hub/DP-Visuals
|
43d9c7f17bbe7b54b8528ebcbcce421727103e7c
|
[
"MIT"
] | 6 |
2016-03-18T03:10:46.000Z
|
2016-05-03T15:05:02.000Z
|
davisputnam/tests/test_parse.py
|
Bram-Hub/DP-Visuals
|
43d9c7f17bbe7b54b8528ebcbcce421727103e7c
|
[
"MIT"
] | 1 |
2019-01-15T01:17:49.000Z
|
2019-01-15T01:17:49.000Z
|
import unittest
from parse import parse
class TestParsing(unittest.TestCase):
def test_no_match(self):
vals = []
vals.append(parse("a"))
vals.append(parse("AB"))
vals.append(parse("A B"))
for val in vals:
self.assertIsNone(val, msg="Invalid pattern Parsed!")
def test_literal(self):
statement = parse("A")
self.assertIsNotNone(statement)
self.assertEqual(statement.type, "lit")
def test_negation(self):
statement = parse("~A")
self.assertIsNotNone(statement)
self.assertEqual(statement.type, "~")
self.assertEqual(statement.value1().type, "lit")
statement = parse("~(A)")
self.assertIsNotNone(statement)
self.assertEqual(statement.type, "~")
self.assertEqual(statement.value1().type, "lit")
def test_disjunction(self):
statement = parse("AvB")
self.assertIsNotNone(statement)
self.assertEqual(statement.type, "v")
self.assertEqual(statement.value1().type, "lit")
self.assertEqual(statement.value2().type, "lit")
self.assertEqual(statement.__str__(), "(AvB)")
statement = parse("Av(BvC)")
self.assertIsNotNone(statement)
self.assertEqual(statement.type, "v")
self.assertEqual(statement.value1().type, "lit")
self.assertEqual(statement.value2().type, "v")
self.assertEqual(statement.__str__(), "(Av(BvC))")
statement = parse("(AvB)vC")
self.assertIsNotNone(statement)
self.assertEqual(statement.type, "v")
self.assertEqual(statement.value1().type, "v")
self.assertEqual(statement.value2().type, "lit")
self.assertEqual(statement.__str__(), "((AvB)vC)")
statement = parse("(AvB)v(CvD)")
self.assertIsNotNone(statement)
self.assertEqual(statement.type, "v")
self.assertEqual(statement.value1().type, "v")
self.assertEqual(statement.value2().type, "v")
self.assertEqual(statement.__str__(), "((AvB)v(CvD))")
statement = parse("Av(Bv(CvD))")
self.assertIsNotNone(statement)
self.assertEqual(statement.__str__(), "(Av(Bv(CvD)))")
def test_conjunction(self):
statement = parse("A^B")
self.assertIsNotNone(statement)
self.assertEqual(statement.type, "^")
self.assertEqual(statement.value1().type, "lit")
self.assertEqual(statement.value2().type, "lit")
self.assertEqual(statement.__str__(), "(A^B)")
statement = parse("A^(B^C)")
self.assertIsNotNone(statement)
self.assertEqual(statement.type, "^")
self.assertEqual(statement.value1().type, "lit")
self.assertEqual(statement.value2().type, "^")
self.assertEqual(statement.__str__(), "(A^(B^C))")
statement = parse("(A^B)^C")
self.assertIsNotNone(statement)
self.assertEqual(statement.type, "^")
self.assertEqual(statement.value1().type, "^")
self.assertEqual(statement.value2().type, "lit")
self.assertEqual(statement.__str__(), "((A^B)^C)")
statement = parse("(A^B)^(C^D)")
self.assertIsNotNone(statement)
self.assertEqual(statement.type, "^")
self.assertEqual(statement.value1().type, "^")
self.assertEqual(statement.value2().type, "^")
self.assertEqual(statement.__str__(), "((A^B)^(C^D))")
statement = parse("A^(B^(C^D))")
self.assertIsNotNone(statement)
self.assertEqual(statement.__str__(), "(A^(B^(C^D)))")
def test_implication(self):
statement = parse("A->B")
self.assertIsNotNone(statement)
self.assertEqual(statement.type, "->")
self.assertEqual(statement.value1().type, "lit")
self.assertEqual(statement.value2().type, "lit")
self.assertEqual(statement.__str__(), "(A->B)")
statement = parse("A->(B->C)")
self.assertIsNotNone(statement)
self.assertEqual(statement.type, "->")
self.assertEqual(statement.value1().type, "lit")
self.assertEqual(statement.value2().type, "->")
self.assertEqual(statement.__str__(), "(A->(B->C))")
statement = parse("(A->B)->C")
self.assertIsNotNone(statement)
self.assertEqual(statement.type, "->")
self.assertEqual(statement.value1().type, "->")
self.assertEqual(statement.value2().type, "lit")
self.assertEqual(statement.__str__(), "((A->B)->C)")
statement = parse("(A->B)->(C->D)")
self.assertIsNotNone(statement)
self.assertEqual(statement.type, "->")
self.assertEqual(statement.value1().type, "->")
self.assertEqual(statement.value2().type, "->")
self.assertEqual(statement.__str__(), "((A->B)->(C->D))")
statement = parse("A->(B->(C->D))")
self.assertIsNotNone(statement)
self.assertEqual(statement.__str__(), "(A->(B->(C->D)))")
def test_biconditional(self):
statement = parse("A<->B")
self.assertIsNotNone(statement)
self.assertEqual(statement.type, "<->")
self.assertEqual(statement.value1().type, "lit")
self.assertEqual(statement.value2().type, "lit")
self.assertEqual(statement.__str__(), "(A<->B)")
statement = parse("A<->(B<->C)")
self.assertIsNotNone(statement)
self.assertEqual(statement.type, "<->")
self.assertEqual(statement.value1().type, "lit")
self.assertEqual(statement.value2().type, "<->")
self.assertEqual(statement.__str__(), "(A<->(B<->C))")
statement = parse("(A<->B)<->C")
self.assertIsNotNone(statement)
self.assertEqual(statement.type, "<->")
self.assertEqual(statement.value1().type, "<->")
self.assertEqual(statement.value2().type, "lit")
self.assertEqual(statement.__str__(), "((A<->B)<->C)")
statement = parse("(A<->B)<->(C<->D)")
self.assertIsNotNone(statement)
self.assertEqual(statement.type, "<->")
self.assertEqual(statement.value1().type, "<->")
self.assertEqual(statement.value2().type, "<->")
self.assertEqual(statement.__str__(), "((A<->B)<->(C<->D))")
statement = parse("A<->(B<->(C<->D))")
self.assertIsNotNone(statement)
self.assertEqual(statement.__str__(), "(A<->(B<->(C<->D)))")
def test_mixed(self):
statement = parse("Av(B^C)")
self.assertIsNotNone(statement)
self.assertEqual(statement.type, "v")
self.assertEqual(statement.value1().type, "lit")
self.assertEqual(statement.value2().type, "^")
statement = parse("A->(B<->C)")
self.assertIsNotNone(statement)
self.assertEqual(statement.type, "->")
self.assertEqual(statement.value1().type, "lit")
self.assertEqual(statement.value2().type, "<->")
if __name__ == '__main__':
print "Test parse():"
unittest.main()
| 38.6 | 68 | 0.600173 |
0b51ded3ef690eedeeb6c79073b94daaf1b13ff7
| 3,582 |
py
|
Python
|
tests/test_schema.py
|
dmsolow/fastavro
|
9912b310107964948a110a8f2f0779d55a1c8528
|
[
"MIT"
] | null | null | null |
tests/test_schema.py
|
dmsolow/fastavro
|
9912b310107964948a110a8f2f0779d55a1c8528
|
[
"MIT"
] | null | null | null |
tests/test_schema.py
|
dmsolow/fastavro
|
9912b310107964948a110a8f2f0779d55a1c8528
|
[
"MIT"
] | null | null | null |
import pytest
import fastavro
from fastavro.schema import SchemaParseException, UnknownType, parse_schema
pytestmark = pytest.mark.usefixtures("clean_schemas")
def test_named_types_have_names():
record_schema = {
"type": "record",
"fields": [{
"name": "field",
"type": "string",
}],
}
with pytest.raises(SchemaParseException):
fastavro.parse_schema(record_schema)
error_schema = {
"type": "error",
"fields": [{
"name": "field",
"type": "string",
}],
}
with pytest.raises(SchemaParseException):
fastavro.parse_schema(error_schema)
fixed_schema = {
"type": "fixed",
"size": 1,
}
with pytest.raises(SchemaParseException):
fastavro.parse_schema(fixed_schema)
enum_schema = {
"type": "enum",
"symbols": ["FOO"],
}
with pytest.raises(SchemaParseException):
fastavro.parse_schema(enum_schema)
# Should parse with name
for schema in (record_schema, error_schema, fixed_schema, enum_schema):
schema["name"] = "test_named_types_have_names"
fastavro.parse_schema(schema)
def test_parse_schema():
schema = {
"type": "record",
"name": "test_parse_schema",
"fields": [{
"name": "field",
"type": "string",
}],
}
parsed_schema = parse_schema(schema)
assert "__fastavro_parsed" in parsed_schema
parsed_schema_again = parse_schema(parsed_schema)
assert parsed_schema_again == parsed_schema
def test_unknown_type():
schema = {
"type": "unknown",
}
with pytest.raises(UnknownType):
parse_schema(schema)
def test_aliases_are_preserved():
schema = {
"type": "record",
"name": "test_parse_schema",
"fields": [{
"name": "field",
"type": "string",
"aliases": ["test"],
}],
}
parsed_schema = parse_schema(schema)
assert "aliases" in parsed_schema["fields"][0]
def test_aliases_is_a_list():
"""https://github.com/fastavro/fastavro/issues/206"""
schema = {
"type": "record",
"name": "test_parse_schema",
"fields": [{
"name": "field",
"type": "string",
"aliases": "foobar",
}],
}
with pytest.raises(SchemaParseException):
parse_schema(schema)
def test_scale_is_an_int():
"""https://github.com/fastavro/fastavro/issues/262"""
schema = {
"type": "record",
"name": "test_scale_is_an_int",
"fields": [{
"name": "field",
"type": {
"logicalType": "decimal",
"precision": 5,
"scale": "2",
"type": "bytes",
},
}],
}
with pytest.raises(SchemaParseException) as exc:
parse_schema(schema)
assert "decimal scale must be a postive integer" in str(exc)
def test_precision_is_an_int():
"""https://github.com/fastavro/fastavro/issues/262"""
schema = {
"type": "record",
"name": "test_scale_is_an_int",
"fields": [{
"name": "field",
"type": {
"logicalType": "decimal",
"precision": "5",
"scale": 2,
"type": "bytes",
},
}],
}
with pytest.raises(SchemaParseException) as exc:
parse_schema(schema)
assert "decimal precision must be a postive integer" in str(exc)
| 23.565789 | 75 | 0.545226 |
4bc44380df8d46602145d9e259f4593f268bb387
| 2,305 |
py
|
Python
|
evasdk/__init__.py
|
mkeegan/eva_python_sdk
|
8dbf4bce5931fe1333586d656554837b9367b0e4
|
[
"Apache-2.0"
] | null | null | null |
evasdk/__init__.py
|
mkeegan/eva_python_sdk
|
8dbf4bce5931fe1333586d656554837b9367b0e4
|
[
"Apache-2.0"
] | null | null | null |
evasdk/__init__.py
|
mkeegan/eva_python_sdk
|
8dbf4bce5931fe1333586d656554837b9367b0e4
|
[
"Apache-2.0"
] | null | null | null |
"""Eva Python SDK
This module provides convenient access to the Automata Eva API from applications written in Python 3.
## Examples
The Eva object allows you to directly control an Eva robot. It provides lots of useful helper function for interacting with the robot.
### Eva
**Connecting**
```python
host = '<your_eva_IP_here>'
token = '<your_token_here>'
eva = Eva(host, token)
```
**GoTo movement**
```python
eva = Eva(host_ip, token)
with eva.lock():
eva.control_wait_for_ready()
eva.control_go_to([0, 0, 0, 0, 0, 0], mode='teach')
```
**Toolpath create and run**
```python
toolpath = {
"metadata":{
"default_velocity":0.7,
"next_label_id":5,
"analog_modes":{ "i0":"voltage", "i1":"voltage", "o0":"voltage", "o1":"voltage" }
},
"waypoints":[
{ "joints":[-0.68147224, 0.3648368, -1.0703622, 9.354615e-05, -2.4358354, -0.6813218], "label_id":3 },
{ "joints":[-0.6350288, 0.25192022, -1.0664424, 0.030407501, -2.2955494, -0.615318], "label_id":2 },
{ "joints":[-0.13414459, 0.5361486, -1.280493, -6.992453e-08, -2.3972468, -0.13414553], "label_id":1 },
{ "joints":[-0.4103904, 0.33332264, -1.5417944, -5.380291e-06, -1.9328799, -0.41031334], "label_id":4 }
],
"timeline":[
{ "type":"home", "waypoint_id":2 },
{ "type":"trajectory", "trajectory":"joint_space", "waypoint_id":1 },
{ "type":"trajectory", "trajectory":"joint_space", "waypoint_id":0 },
{ "type":"trajectory", "trajectory":"joint_space", "waypoint_id":2 }
]
}
eva = Eva(host, token)
with eva.lock():
eva.control_wait_for_ready()
eva.toolpaths_use(toolpath)
eva.control_home()
eva.control_run(loop=1, mode='teach')
```
Please refer to the examples directory for more SDK usage examples.
### evasdk.eva_http and evasdk.eva_ws
These can be used to interact directly with the HTTP and Websocket APIs.
Useful when you don't want the managed websocket connection provided by the evasdk.Eva object.
"""
from .Eva import Eva
from .eva_http_client import EvaHTTPClient
from .eva_ws import ws_connect
from .robot_state import RobotState
from .helpers import strip_ip
from .eva_errors import (
EvaError,
EvaValidationError, EvaAuthError, EvaAutoRenewError,
EvaAdminError, EvaServerError)
| 29.177215 | 134 | 0.66551 |
159fc1c5e7c939a6489df7e5fa549f0d4c070da4
| 1,871 |
py
|
Python
|
pybpodgui_api/models/task/task_file.py
|
pybpod/pybpod-gui-api
|
a69ff79a4d9e4b6b99b618a0b373d7dc2927e156
|
[
"MIT"
] | null | null | null |
pybpodgui_api/models/task/task_file.py
|
pybpod/pybpod-gui-api
|
a69ff79a4d9e4b6b99b618a0b373d7dc2927e156
|
[
"MIT"
] | null | null | null |
pybpodgui_api/models/task/task_file.py
|
pybpod/pybpod-gui-api
|
a69ff79a4d9e4b6b99b618a0b373d7dc2927e156
|
[
"MIT"
] | null | null | null |
# !/usr/bin/python3
# -*- coding: utf-8 -*-
import logging
import re
from pybpodgui_api.models.task.task_io import TaskIO
logger = logging.getLogger(__name__)
class TaskFile(TaskIO):
""" Represents a state machine """
def find_task_variables_from_file(self):
task_variables = []
# match any line begining with v. and get variable name and value until end of line or # sign
pattern = "^v\.(?P<vname>\w+)\s*\=\s*(?P<vvalue>.*(?=#)|.*)"
with open(self.path, "r") as file:
file_content = file.read()
for v_name in re.findall(pattern, file_content, re.MULTILINE):
if not v_name in [var_name for var_name in task_variables]:
logger.debug("Find variable name: %s", v_name)
task_variables.append(v_name) # ignore variables value at this point
return task_variables
def find_states_from_file(self, start=1):
return self.find_pattern(start, search_match="states")
def find_events_from_file(self, start=1):
return self.find_pattern(start, search_match="events")
def find_pattern(self, start=1, search_match=None):
matches = {}
regex = r"{0}\s*=\s*\[(.+?)(?=\])".format(search_match)
try:
found_matches = re.findall(regex, self.code, flags=re.DOTALL)[0]
except:
logger.warning("Invalid {0} format or no {0} defined".format(search_match))
return matches
for idx, read_state in enumerate(found_matches.split(','), start=start):
try:
state_id = str(read_state.replace("'", "")) # if it is string
except:
state_id = str(read_state.replace("'", "")) # if it is a var
matches[idx] = state_id.strip()
logger.debug("Found %s: %s", search_match, matches)
return matches
| 31.711864 | 101 | 0.606627 |
7513da19f828f5dc97fbbb6052359fdc196ead75
| 20,859 |
py
|
Python
|
luigi/parameter.py
|
vgmartinez/luigi
|
b5ad3eba1501bdc25e91e98901bc781128f2d8a7
|
[
"Apache-2.0"
] | null | null | null |
luigi/parameter.py
|
vgmartinez/luigi
|
b5ad3eba1501bdc25e91e98901bc781128f2d8a7
|
[
"Apache-2.0"
] | null | null | null |
luigi/parameter.py
|
vgmartinez/luigi
|
b5ad3eba1501bdc25e91e98901bc781128f2d8a7
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
#
# Copyright 2012-2015 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
''' Parameters are one of the core concepts of Luigi.
All Parameters sit on :class:`~luigi.task.Task` classes.
See :ref:`Parameter` for more info on how to define parameters.
'''
import datetime
import warnings
try:
from ConfigParser import NoOptionError, NoSectionError
except ImportError:
from configparser import NoOptionError, NoSectionError
from luigi import six
from luigi import configuration
from luigi.deprecate_kwarg import deprecate_kwarg
_no_value = object()
class ParameterException(Exception):
"""
Base exception.
"""
pass
class MissingParameterException(ParameterException):
"""
Exception signifying that there was a missing Parameter.
"""
pass
class UnknownParameterException(ParameterException):
"""
Exception signifying that an unknown Parameter was supplied.
"""
pass
class DuplicateParameterException(ParameterException):
"""
Exception signifying that a Parameter was specified multiple times.
"""
pass
class UnknownConfigException(ParameterException):
"""
Exception signifying that the ``config_path`` for the Parameter could not be found.
"""
pass
class Parameter(object):
"""
An untyped Parameter
Parameters are objects set on the Task class level to make it possible to parameterize tasks.
For instance:
class MyTask(luigi.Task):
foo = luigi.Parameter()
This makes it possible to instantiate multiple tasks, eg ``MyTask(foo='bar')`` and
``My(foo='baz')``. The task will then have the ``foo`` attribute set appropriately.
There are subclasses of ``Parameter`` that define what type the parameter has. This is not
enforced within Python, but are used for command line interaction.
The ``config_path`` argument lets you specify a place where the parameter is read from config
in case no value is provided.
When a task is instantiated, it will first use any argument as the value of the parameter, eg.
if you instantiate a = TaskA(x=44) then a.x == 44. If this does not exist, it will use the value
of the Parameter object, which is defined on a class level. This will be resolved in this
order of falling priority:
* Any value provided on the command line on the class level (eg. ``--TaskA-param xyz``)
* Any value provided via config (using the ``config_path`` argument)
* Any default value set using the ``default`` flag.
"""
counter = 0
"""non-atomically increasing counter used for ordering parameters."""
@deprecate_kwarg('is_boolean', 'is_bool', False)
def __init__(self, default=_no_value, is_list=False, is_boolean=False, is_global=False, significant=True, description=None,
config_path=None, positional=True):
"""
:param default: the default value for this parameter. This should match the type of the
Parameter, i.e. ``datetime.date`` for ``DateParameter`` or ``int`` for
``IntParameter``. By default, no default is stored and
the value must be specified at runtime.
:param bool is_list: specify ``True`` if the parameter should allow a list of values rather
than a single value. Default: ``False``. A list has an implicit default
value of ``[]``.
:param bool is_bool: specify ``True`` if the parameter is a bool value. Default:
``False``. Bool's have an implicit default value of ``False``.
:param bool significant: specify ``False`` if the parameter should not be treated as part of
the unique identifier for a Task. An insignificant Parameter might
also be used to specify a password or other sensitive information
that should not be made public via the scheduler. Default:
``True``.
:param str description: A human-readable string describing the purpose of this Parameter.
For command-line invocations, this will be used as the `help` string
shown to users. Default: ``None``.
:param dict config_path: a dictionary with entries ``section`` and ``name``
specifying a config file entry from which to read the
default value for this parameter. DEPRECATED.
Default: ``None``.
:param bool positional: If true, you can set the argument as a
positional argument. Generally we recommend ``positional=False``
as positional arguments become very tricky when
you have inheritance and whatnot.
"""
# The default default is no default
self.__default = default
self.__global = _no_value
self.is_list = is_list
self.is_bool = is_boolean and not is_list # Only BoolParameter should ever use this. TODO(erikbern): should we raise some kind of exception?
if is_global:
warnings.warn("is_global support is removed. Assuming positional=False",
DeprecationWarning,
stacklevel=2)
positional = False
self.significant = significant # Whether different values for this parameter will differentiate otherwise equal tasks
self.positional = positional
self.description = description
if config_path is not None and ('section' not in config_path or 'name' not in config_path):
raise ParameterException('config_path must be a hash containing entries for section and name')
self.__config = config_path
self.counter = Parameter.counter # We need to keep track of this to get the order right (see Task class)
Parameter.counter += 1
def _get_value_from_config(self, section, name):
"""Loads the default from the config. Returns _no_value if it doesn't exist"""
conf = configuration.get_config()
try:
value = conf.get(section, name)
except (NoSectionError, NoOptionError):
return _no_value
if self.is_list:
return tuple(self.parse(p.strip()) for p in value.strip().split('\n'))
else:
return self.parse(value)
def _get_value(self, task_name=None, param_name=None):
if self.__global != _no_value:
return self.__global
if task_name and param_name:
v = self._get_value_from_config(task_name, param_name)
if v != _no_value:
return v
v = self._get_value_from_config(task_name, param_name.replace('_', '-'))
if v != _no_value:
warnings.warn(
'The use of the configuration [%s] %s (with dashes) should be avoided. Please use underscores.' %
(task_name, param_name), DeprecationWarning, stacklevel=2)
return v
if self.__config:
v = self._get_value_from_config(self.__config['section'], self.__config['name'])
if v != _no_value and task_name and param_name:
warnings.warn(
'The use of the configuration [%s] %s is deprecated. Please use [%s] %s' %
(self.__config['section'], self.__config['name'], task_name, param_name),
DeprecationWarning, stacklevel=2)
if v != _no_value:
return v
if self.__default != _no_value:
return self.__default
return _no_value
@property
def has_value(self):
"""
``True`` if a default was specified or if config_path references a valid entry in the conf.
Note that "value" refers to the Parameter object itself - it can be either
1. The default value for this parameter
2. A value read from the config
3. A global value
Any Task instance can have its own value set that overrides this.
"""
return self._get_value() != _no_value
@property
def value(self):
"""
The value for this Parameter.
This refers to any value defined by a default, a config option, or
a global value.
:raises MissingParameterException: if a value is not set.
:return: the parsed value.
"""
value = self._get_value()
if value == _no_value:
raise MissingParameterException("No default specified")
else:
return value
def has_task_value(self, task_name, param_name):
return self._get_value(task_name, param_name) != _no_value
def task_value(self, task_name, param_name):
value = self._get_value(task_name, param_name)
if value == _no_value:
raise MissingParameterException("No default specified")
else:
return value
def set_global(self, value):
"""
Set the global value of this Parameter.
:param value: the new global value.
"""
self.__global = value
def reset_global(self):
self.__global = _no_value
def parse(self, x):
"""
Parse an individual value from the input.
The default implementation is an identify (it returns ``x``), but subclasses should override
this method for specialized parsing. This method is called by :py:meth:`parse_from_input`
if ``x`` exists. If this Parameter was specified with ``is_list=True``, then ``parse`` is
called once for each item in the list.
:param str x: the value to parse.
:return: the parsed value.
"""
return x # default impl
def serialize(self, x): # opposite of parse
"""
Opposite of :py:meth:`parse`.
Converts the value ``x`` to a string.
:param x: the value to serialize.
"""
if self.is_list:
return [str(v) for v in x]
return str(x)
def parse_from_input(self, param_name, x, task_name=None):
"""
Parses the parameter value from input ``x``, handling defaults and is_list.
:param param_name: the name of the parameter. This is used for the message in
``MissingParameterException``.
:param x: the input value to parse.
:raises MissingParameterException: if x is false-y and no default is specified.
"""
if not x:
if self.has_task_value(param_name=param_name, task_name=task_name):
return self.task_value(param_name=param_name, task_name=task_name)
elif self.is_bool:
return False
elif self.is_list:
return []
else:
raise MissingParameterException("No value for '%s' (%s) submitted and no default value has been assigned." %
(param_name, "--" + param_name.replace('_', '-')))
elif self.is_list:
return tuple(self.parse(p) for p in x)
else:
return self.parse(x)
def serialize_to_input(self, x):
if self.is_list:
return tuple(self.serialize(p) for p in x)
else:
return self.serialize(x)
def parser_dest(self, param_name, task_name, glob=False, is_without_section=False):
if is_without_section:
if glob:
return param_name
else:
return None
else:
if glob:
return task_name + '_' + param_name
else:
return param_name
def add_to_cmdline_parser(self, parser, param_name, task_name, optparse=False, glob=False, is_without_section=False):
dest = self.parser_dest(param_name, task_name, glob, is_without_section=is_without_section)
if not dest:
return
flag = '--' + dest.replace('_', '-')
description = []
description.append('%s.%s' % (task_name, param_name))
if glob:
description.append('for all instances of class %s' % task_name)
elif self.description:
description.append(self.description)
if self.has_task_value(param_name=param_name, task_name=task_name):
value = self.task_value(param_name=param_name, task_name=task_name)
description.append(" [default: %s]" % (value,))
if self.is_list:
action = "append"
elif self.is_bool:
action = "store_true"
else:
action = "store"
if optparse:
f = parser.add_option
else:
f = parser.add_argument
f(flag,
help=' '.join(description),
action=action,
dest=dest)
def parse_from_args(self, param_name, task_name, args, params):
# Note: modifies arguments
dest = self.parser_dest(param_name, task_name, glob=False)
if dest is not None:
value = getattr(args, dest, None)
params[param_name] = self.parse_from_input(param_name, value, task_name=task_name)
def set_global_from_args(self, param_name, task_name, args, is_without_section=False):
# Note: side effects
dest = self.parser_dest(param_name, task_name, glob=True, is_without_section=is_without_section)
if dest is not None:
value = getattr(args, dest, None)
if value:
self.set_global(self.parse_from_input(param_name, value, task_name=task_name))
else: # either False (bools) or None (everything else)
self.reset_global()
class DateHourParameter(Parameter):
"""
Parameter whose value is a :py:class:`~datetime.datetime` specified to the hour.
A DateHourParameter is a `ISO 8601 <http://en.wikipedia.org/wiki/ISO_8601>`_ formatted
date and time specified to the hour. For example, ``2013-07-10T19`` specifies July 10, 2013 at
19:00.
"""
date_format = '%Y-%m-%dT%H' # ISO 8601 is to use 'T'
def parse(self, s):
"""
Parses a string to a :py:class:`~datetime.datetime` using the format string ``%Y-%m-%dT%H``.
"""
# TODO(erikbern): we should probably use an internal class for arbitary
# time intervals (similar to date_interval). Or what do you think?
return datetime.datetime.strptime(s, self.date_format)
def serialize(self, dt):
"""
Converts the datetime to a string usnig the format string ``%Y-%m-%dT%H``.
"""
if dt is None:
return str(dt)
return dt.strftime(self.date_format)
class DateMinuteParameter(DateHourParameter):
"""
Parameter whose value is a :py:class:`~datetime.datetime` specified to the minute.
A DateMinuteParameter is a `ISO 8601 <http://en.wikipedia.org/wiki/ISO_8601>`_ formatted
date and time specified to the minute. For example, ``2013-07-10T19H07`` specifies July 10, 2013 at
19:07.
"""
date_format = '%Y-%m-%dT%HH%M' # ISO 8601 is to use 'T' and 'H'
class DateParameter(Parameter):
"""
Parameter whose value is a :py:class:`~datetime.date`.
A DateParameter is a Date string formatted ``YYYY-MM-DD``. For example, ``2013-07-10`` specifies
July 10, 2013.
"""
def parse(self, s):
"""Parses a date string formatted as ``YYYY-MM-DD``."""
return datetime.date(*map(int, s.split('-')))
class IntParameter(Parameter):
"""
Parameter whose value is an ``int``.
"""
def parse(self, s):
"""
Parses an ``int`` from the string using ``int()``.
"""
return int(s)
class FloatParameter(Parameter):
"""
Parameter whose value is a ``float``.
"""
def parse(self, s):
"""
Parses a ``float`` from the string using ``float()``.
"""
return float(s)
class BoolParameter(Parameter):
"""
A Parameter whose value is a ``bool``.
"""
def __init__(self, *args, **kwargs):
"""
This constructor passes along args and kwargs to ctor for :py:class:`Parameter` but
specifies ``is_bool=True``.
"""
super(BoolParameter, self).__init__(*args, is_bool=True, **kwargs)
def parse(self, s):
"""
Parses a ``bool`` from the string, matching 'true' or 'false' ignoring case.
"""
return {'true': True, 'false': False}[str(s).lower()]
class BooleanParameter(BoolParameter):
def __init__(self, *args, **kwargs):
warnings.warn(
'BooleanParameter is deprecated, use BoolParameter instead',
DeprecationWarning,
stacklevel=2
)
super(BooleanParameter, self).__init__(*args, **kwargs)
class DateIntervalParameter(Parameter):
"""
A Parameter whose value is a :py:class:`~luigi.date_interval.DateInterval`.
Date Intervals are specified using the ISO 8601 `Time Interval
<http://en.wikipedia.org/wiki/ISO_8601#Time_intervals>`_ notation.
"""
# Class that maps to/from dates using ISO 8601 standard
# Also gives some helpful interval algebra
def parse(self, s):
"""
Parses a `:py:class:`~luigi.date_interval.DateInterval` from the input.
see :py:mod:`luigi.date_interval`
for details on the parsing of DateIntervals.
"""
# TODO: can we use xml.utils.iso8601 or something similar?
from luigi import date_interval as d
for cls in [d.Year, d.Month, d.Week, d.Date, d.Custom]:
i = cls.parse(s)
if i:
return i
else:
raise ValueError('Invalid date interval - could not be parsed')
class TimeDeltaParameter(Parameter):
"""
Class that maps to timedelta using strings in any of the following forms:
* ``n {w[eek[s]]|d[ay[s]]|h[our[s]]|m[inute[s]|s[second[s]]}`` (e.g. "1 week 2 days" or "1 h")
Note: multiple arguments must be supplied in longest to shortest unit order
* ISO 8601 duration ``PnDTnHnMnS`` (each field optional, years and months not supported)
* ISO 8601 duration ``PnW``
See https://en.wikipedia.org/wiki/ISO_8601#Durations
"""
def _apply_regex(self, regex, input):
from datetime import timedelta
import re
re_match = re.match(regex, input)
if re_match:
kwargs = {}
has_val = False
for k, v in six.iteritems(re_match.groupdict(default="0")):
val = int(v)
has_val = has_val or val != 0
kwargs[k] = val
if has_val:
return timedelta(**kwargs)
def _parseIso8601(self, input):
def field(key):
return "(?P<%s>\d+)%s" % (key, key[0].upper())
def optional_field(key):
return "(%s)?" % field(key)
# A little loose: ISO 8601 does not allow weeks in combination with other fields, but this regex does (as does python timedelta)
regex = "P(%s|%s(T%s)?)" % (field("weeks"), optional_field("days"), "".join([optional_field(key) for key in ["hours", "minutes", "seconds"]]))
return self._apply_regex(regex, input)
def _parseSimple(self, input):
keys = ["weeks", "days", "hours", "minutes", "seconds"]
# Give the digits a regex group name from the keys, then look for text with the first letter of the key,
# optionally followed by the rest of the word, with final char (the "s") optional
regex = "".join(["((?P<%s>\d+) ?%s(%s)?(%s)? ?)?" % (k, k[0], k[1:-1], k[-1]) for k in keys])
return self._apply_regex(regex, input)
def parse(self, input):
"""
Parses a time delta from the input.
See :py:class:`TimeDeltaParameter` for details on supported formats.
"""
result = self._parseIso8601(input)
if not result:
result = self._parseSimple(input)
if result:
return result
else:
raise ParameterException("Invalid time delta - could not parse %s" % input)
| 37.181818 | 150 | 0.611007 |
86fe162658334557824ac072e70d8f8ea71256c6
| 8,361 |
py
|
Python
|
knlp/seq_labeling/hmm/train.py
|
Kevin1906721262/knlp
|
d8a829151a4ac71f6106046c9f2586d8fdd86908
|
[
"MIT"
] | 5 |
2022-01-23T15:41:41.000Z
|
2022-03-19T06:25:17.000Z
|
knlp/seq_labeling/hmm/train.py
|
Rooki1e/knlp
|
b19d8e0d0661b697ff4a73201c30b5a6e8969b6e
|
[
"MIT"
] | 4 |
2022-03-08T16:28:18.000Z
|
2022-03-28T15:11:11.000Z
|
knlp/seq_labeling/hmm/train.py
|
Rooki1e/knlp
|
b19d8e0d0661b697ff4a73201c30b5a6e8969b6e
|
[
"MIT"
] | 4 |
2022-02-12T13:42:07.000Z
|
2022-03-18T02:11:36.000Z
|
# !/usr/bin/python
# -*- coding:UTF-8 -*-
# -----------------------------------------------------------------------#
# File Name: train
# Author: Junyi Li
# Mail: [email protected]
# Created Time: 2021-03-18
# Description:
# -----------------------------------------------------------------------#
"""
针对hmm的训练和推理,要清晰的分为不同的步骤,分步骤进行实现
0 清晰定义好hmm需要的几个参数
1 完成基于训练数据的hmm相关参数的代码并且完成相关的编码实现
2 完成hmm的inference相关的代码
针对序列标注问题,什么是可见状态,什么是隐藏状态,转移概率,发射概率
分词,我们这样应对:
SBME
我 是 一个 大 好 人
S S BE B M E
可见状态:几个汉字
隐藏状态:几个字母,标注结果
转移概率:就是隐藏状态互相之间的概率大小
发射概率:是从隐藏状态到可见状态概率大小
初始状态:
换句话说,在inference的时候:
输入是
我 是 一个 大 好 人
输出则是对应的每个字下的标签是什么
直观感受来说,我们可以假设,不同的汉字对应于不同的标签有不同的概率大小,这个可以通过统计得到,经过大量的统计之后,我们可以知道给定一个汉字,它所对应各种标记的概率大小
则此时我们可以知道P(X|我),P(X|是),P(X|一),P(X|个),P(X|大),P(X|好),P(X|人)的大小分别是多少,例如:
P(X|我)就有四个结果:(随便举例的)
P(S|我) = 0.4
P(M|我) = 0.2
P(B|我) = 0.3
P(E|我) = 0.1
最后我们可以将他们乘起来【P(X|我),P(X|是),P(X|一),P(X|个),P(X|大),P(X|好),P(X|人)】,得到一整句话下使用不同标记的概率大小,找到概率最大的那个即可。
这就是最直觉的基于统计的分词方法。
可是,我们后知后觉的想一想,是不是前一个字的标签对后一个字的标签应该是有一定暗示意义的。比如,如果前一个字是S那么后一个字就不应该出现E和M。
这就涉及到前一个状态对后一个状态的影响,所以我们提出一个转移概率,就有马尔可夫链,而对于一阶马尔可夫,就是假设每一个标签只和前一个标签有关。
Q:一个状态集合
A:转移概率几何
init_P:初始概率。很直观的理解就是第一个字是什么标签,是会有一个统计出来的概率值的。
可是,这个时候我们就发现,我们其实是不知道一个字背后的标签的,所以我们需要把这两个信息合起来考虑。
所以HMM就允许我们同时谈论观测事件(我们看到的words)和隐藏事件(例如pos tag),
我们考虑让他们作为我们概率模型的因果变量。一个HMM模型可以被定义如下:
Q:一个状态集合:隐藏状态和观测状态
A:转移概率集合:隐藏状态之间的转移概率
O:观测序列:输入的序列
B:发射概率,表示从隐藏状态到观测状态的概率
初始状态集合:很直观的理解就是第一个字是什么标签,是会有一个统计出来的概率值的。
以上的这几种信息都是可以从给定的训练数据集中获取到的。
一个个看一下:
状态集合:把所有的汉字记录下来,隐藏状态也是我们自己定义的标签
转移概率集合:我们可以从训练数据中获取相关的信息,P(T_n|T_n-1),那么只要做个相应的统计就可以实现这个需求。
观测序列:这个是我们在inference的时候会使用到的信息
发射概率:这个要统计的就是给定标签下,各个不同的汉字的概率是多少
初始状态集合:所有的标签开头的概率大小
从以上的分析看来,一个分词要做的就是对一对数据的统计代码。
好,最后我们得到的一个模型,存储了以上的信息
然后我们完成一个inference的代码,利用这个模型进行对新输入的观测序列的inference
"""
import json
import sys
from collections import defaultdict
from knlp.common.constant import KNLP_PATH
class Train:
"""
这个类要实现对以下四个信息的获取:
状态集合:把所有的汉字记录下来,隐藏状态也是我们自己定义的标签
转移概率集合:我们可以从训练数据中获取相关的信息,P(T_n|T_n-1),那么只要做个相应的统计就可以实现这个需求。
发射概率:这个要统计的就是给定标签下,各个不同的汉字的概率是多少
初始状态集合:所有的字都会有一个P(X|我),P(X|是),P(X|一),P(X|个),P(X|大),P(X|好),P(X|人)
从以上的分析看来,一个分词要做的就是对一对数据的统计代码。
这个信息是输入信息和上面四个不太一样
观测序列:这个是我们在inference的时候会使用到的信息
"""
def __init__(self, vocab_set_path=None, training_data_path=None, test_data_path=None):
self._state_set = {}
self._transition_pro = {}
self._emission_pro = {}
self._init_state_set = {}
self.vocab_set_path = ""
self.training_data_path = ""
self.vocab_data = []
self.training_data = []
if vocab_set_path and training_data_path:
self.init_variable(vocab_set_path=vocab_set_path, training_data_path=training_data_path,
test_data_path=test_data_path)
def init_variable(self, vocab_set_path=None, training_data_path=None, test_data_path=None):
self.vocab_set_path = KNLP_PATH + "/knlp/data/seg_data/train/pku_vocab.txt" if not vocab_set_path else vocab_set_path
self.training_data_path = KNLP_PATH + "/knlp/data/seg_data/train/pku_hmm_training_data.txt" if not training_data_path else training_data_path
# self.test_data_path = KNLP_PATH + "/knlp/data/seg_data/train/pku_hmm_test_data.txt" if not test_data_path else test_data_path
with open(self.vocab_set_path, encoding='utf-8') as f:
self.vocab_data = f.readlines()
with open(self.training_data_path, encoding='utf-8') as f:
self.training_data = f.readlines()
@property
def state_set(self):
self.set_state()
return self._state_set
@property
def transition_pro(self):
self.set_transition_pro()
return self._transition_pro
@property
def emission_pro(self):
self.set_emission_pro()
return self._emission_pro
@property
def init_state_set(self):
self.set_init_state_set()
return self._init_state_set
def set_state(self):
self._state_set["hidden_state"] = ["S", "B", "E", "M"]
self._state_set["observation_state"] = []
for line in self.vocab_data:
self._state_set["observation_state"].append(line.strip())
def set_transition_pro(self):
"""
统计获取转移概率
S: B E M
Returns:
"""
count_dict = {
"S": defaultdict(int),
"B": defaultdict(int),
"M": defaultdict(int),
"E": defaultdict(int),
}
for idx in range(len(self.training_data) - 1):
line = self.training_data[idx].strip()
if not line:
continue
line = line.strip().split("\t") # 获取到当前正在统计的那个标签
next_line = self.training_data[idx + 1].strip()
if not next_line:
continue
next_line = self.training_data[idx + 1].strip().split("\t") # 获取下一个标签
count_dict[line[-1]][next_line[-1]] += 1
for start_label, end_labels in count_dict.items():
self._transition_pro[start_label] = {}
cnt_sum = sum(list(end_labels.values()))
for end_label, count in end_labels.items():
self._transition_pro[start_label][end_label] = count / cnt_sum
def set_emission_pro(self):
"""
统计获取发射概率
Returns:
"""
count_dict = {
"S": defaultdict(int),
"B": defaultdict(int),
"M": defaultdict(int),
"E": defaultdict(int),
}
for line in self.training_data:
if not line.strip():
continue
line = line.strip().split("\t")
count_dict[line[-1]][line[0]] += 1
for hidden_state, observation_states in count_dict.items():
self._emission_pro[hidden_state] = {}
cnt_sum = sum(list(observation_states.values()))
for observation_state, count in observation_states.items():
self._emission_pro[hidden_state][observation_state] = count / cnt_sum
def set_init_state_set(self):
"""
当这个字开头的时候,有多大的概率是哪个标签
{WORD: {LABEL: PRO}}
Returns:
"""
count_dict = {
"S": 0,
"B": 0,
"M": 0,
"E": 0,
}
for line in self.training_data:
if not line.strip():
continue
line = line.strip().split("\t")
count_dict[line[-1]] += 1
cnt_sum = sum(list(count_dict.values()))
for start_label, cnt in count_dict.items():
self._init_state_set[start_label] = cnt / cnt_sum
@staticmethod
def save_model(file_path, data, format="json"):
if format == "json":
with open(file_path, "w", encoding='utf-8') as f:
json.dump(data, f, ensure_ascii=False, indent=2)
def build_model(self, state_set_save_path=None, transition_pro_save_path=None, emission_pro_save_path=None,
init_state_set_save_path=None):
"""
依次运行以上的几个函数,然后将获取到的结果保存下来
Returns:
"""
state_set = KNLP_PATH + "/knlp/model/hmm/seg/state_set.json" if not state_set_save_path else state_set_save_path + "/state_set.json"
transition_pro = KNLP_PATH + "/knlp/model/hmm/seg/transition_pro.json" if not transition_pro_save_path else transition_pro_save_path + "/transition_pro.json"
emission_pro = KNLP_PATH + "/knlp/model/hmm/seg/emission_pro.json" if not emission_pro_save_path else emission_pro_save_path + "/emission_pro.json"
init_state_set = KNLP_PATH + "/knlp/model/hmm/seg/init_state_set.json" if not init_state_set_save_path else init_state_set_save_path + "/init_state_set.json"
self.save_model(file_path=state_set, data=self.state_set)
self.save_model(file_path=transition_pro, data=self.transition_pro)
self.save_model(file_path=emission_pro, data=self.emission_pro)
self.save_model(file_path=init_state_set, data=self.init_state_set)
if __name__ == '__main__':
# input path for vacab and training data
args = sys.argv
vocab_set_path = None
training_data_path = None
if len(args) > 1:
vocab_set_path = args[1]
training_data_path = args[2]
a = Train(vocab_set_path=vocab_set_path, training_data_path=training_data_path)
a.init_variable()
a.build_model()
| 33.310757 | 165 | 0.646334 |
f24292e13987fb1726538b85e57e1f0dd2d08427
| 7,390 |
py
|
Python
|
cryptoapis/model/list_transactions_by_block_hash_response_item_recipients.py
|
xan187/Crypto_APIs_2.0_SDK_Python
|
a56c75df54ef037b39be1315ed6e54de35bed55b
|
[
"MIT"
] | null | null | null |
cryptoapis/model/list_transactions_by_block_hash_response_item_recipients.py
|
xan187/Crypto_APIs_2.0_SDK_Python
|
a56c75df54ef037b39be1315ed6e54de35bed55b
|
[
"MIT"
] | null | null | null |
cryptoapis/model/list_transactions_by_block_hash_response_item_recipients.py
|
xan187/Crypto_APIs_2.0_SDK_Python
|
a56c75df54ef037b39be1315ed6e54de35bed55b
|
[
"MIT"
] | 1 |
2021-07-21T03:35:18.000Z
|
2021-07-21T03:35:18.000Z
|
"""
CryptoAPIs
Crypto APIs 2.0 is a complex and innovative infrastructure layer that radically simplifies the development of any Blockchain and Crypto related applications. Organized around REST, Crypto APIs 2.0 can assist both novice Bitcoin/Ethereum enthusiasts and crypto experts with the development of their blockchain applications. Crypto APIs 2.0 provides unified endpoints and data, raw data, automatic tokens and coins forwardings, callback functionalities, and much more. # noqa: E501
The version of the OpenAPI document: 2.0.0
Contact: [email protected]
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
from cryptoapis.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
)
class ListTransactionsByBlockHashResponseItemRecipients(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
}
validations = {
}
additional_properties_type = None
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
return {
'address': (str,), # noqa: E501
'amount': (str,), # noqa: E501
}
@cached_property
def discriminator():
return None
attribute_map = {
'address': 'address', # noqa: E501
'amount': 'amount', # noqa: E501
}
_composed_schemas = {}
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, address, amount, *args, **kwargs): # noqa: E501
"""ListTransactionsByBlockHashResponseItemRecipients - a model defined in OpenAPI
Args:
address (str): The address which receives this transaction. In UTXO-based protocols like Bitcoin there could be several senders while in account-based protocols like Ethereum there is always only one recipient.
amount (str): Represents the amount received to this address.
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
self.address = address
self.amount = amount
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
| 42.228571 | 484 | 0.606225 |
d550d39918ef3fd7e6c148e7fb58533cbdde8253
| 72 |
py
|
Python
|
custom_components/panasonic_smart_app/smartApp/utils.py
|
sugoi-wada/panasonic_smart_app
|
78c3e377165b93c415108fa21137067585cfc72d
|
[
"MIT"
] | 27 |
2021-03-04T16:54:27.000Z
|
2022-03-13T05:22:25.000Z
|
custom_components/panasonic_smart_app/smartApp/utils.py
|
sugoi-wada/panasonic_smart_app
|
78c3e377165b93c415108fa21137067585cfc72d
|
[
"MIT"
] | 44 |
2021-03-09T03:29:37.000Z
|
2022-03-29T06:40:22.000Z
|
custom_components/panasonic_smart_app/smartApp/utils.py
|
sugoi-wada/panasonic_smart_app
|
78c3e377165b93c415108fa21137067585cfc72d
|
[
"MIT"
] | 13 |
2021-03-04T15:03:34.000Z
|
2022-03-06T11:03:00.000Z
|
def chunks(L, n):
return [L[x : x + n] for x in range(0, len(L), n)]
| 36 | 54 | 0.527778 |
615618035647358b959ae66fe4f07206164d3b55
| 3,523 |
py
|
Python
|
lab4/training/run_experiment.py
|
Agyey/fsdl-text-recognizer-2021-labs
|
4bd85042ab9f6decd78849bb655c197cc13ffc11
|
[
"MIT"
] | 1 |
2021-02-12T06:10:10.000Z
|
2021-02-12T06:10:10.000Z
|
lab4/training/run_experiment.py
|
Agyey/fsdl-text-recognizer-2021-labs
|
4bd85042ab9f6decd78849bb655c197cc13ffc11
|
[
"MIT"
] | null | null | null |
lab4/training/run_experiment.py
|
Agyey/fsdl-text-recognizer-2021-labs
|
4bd85042ab9f6decd78849bb655c197cc13ffc11
|
[
"MIT"
] | null | null | null |
"""Experiment-running framework."""
import argparse
import importlib
import numpy as np
import torch
import pytorch_lightning as pl
from text_recognizer import lit_models
# In order to ensure reproducible experiments, we must set random seeds.
np.random.seed(42)
torch.manual_seed(42)
def _import_class(module_and_class_name: str) -> type:
"""Import class from a module, e.g. 'text_recognizer.models.MLP'"""
module_name, class_name = module_and_class_name.rsplit(".", 1)
module = importlib.import_module(module_name)
class_ = getattr(module, class_name)
return class_
def _setup_parser():
"""Set up Python's ArgumentParser with data, model, trainer, and other arguments."""
parser = argparse.ArgumentParser(add_help=False)
# Add Trainer specific arguments, such as --max_epochs, --gpus, --precision
trainer_parser = pl.Trainer.add_argparse_args(parser)
trainer_parser._action_groups[1].title = "Trainer Args" # pylint: disable=protected-access
parser = argparse.ArgumentParser(add_help=False, parents=[trainer_parser])
# Basic arguments
parser.add_argument("--data_class", type=str, default="MNIST")
parser.add_argument("--model_class", type=str, default="MLP")
# Get the data and model classes, so that we can add their specific arguments
temp_args, _ = parser.parse_known_args()
data_class = _import_class(f"text_recognizer.data.{temp_args.data_class}")
model_class = _import_class(f"text_recognizer.models.{temp_args.model_class}")
# Get data, model, and LitModel specific arguments
data_group = parser.add_argument_group("Data Args")
data_class.add_to_argparse(data_group)
model_group = parser.add_argument_group("Data Args")
model_class.add_to_argparse(model_group)
lit_model_group = parser.add_argument_group("LitModel Args")
lit_models.BaseLitModel.add_to_argparse(lit_model_group)
parser.add_argument("--help", "-h", action="help")
return parser
def main():
"""
Run an experiment.
Sample command:
```
python training/run_experiment.py --max_epochs=3 --gpus='0,' --num_workers=20 --model_class=MLP --data_class=MNIST
```
"""
parser = _setup_parser()
args = parser.parse_args()
data_class = _import_class(f"text_recognizer.data.{args.data_class}")
model_class = _import_class(f"text_recognizer.models.{args.model_class}")
data = data_class(args)
model = model_class(data_config=data.config(), args=args)
if args.loss not in ('ctc', 'transformer'):
lit_model = lit_models.BaseLitModel(model, args=args)
# Hide lines below until Lab 3
if args.loss == "ctc":
lit_model = lit_models.CTCLitModel(args=args, model=model)
# Hide lines above until Lab 3
# Hide lines below until Lab 4
if args.loss == "transformer":
lit_model = lit_models.TransformerLitModel(args=args, model=model)
# Hide lines above until Lab 4
loggers = [pl.loggers.TensorBoardLogger("training/logs")]
callbacks = [pl.callbacks.EarlyStopping(monitor="val_loss", mode="min", patience=10)]
args.weights_summary = "full" # Print full summary of the model
trainer = pl.Trainer.from_argparse_args(args, callbacks=callbacks, logger=loggers, default_root_dir="training/logs")
trainer.tune(lit_model, datamodule=data) # If passing --auto_lr_find, this will set learning rate
trainer.fit(lit_model, datamodule=data)
trainer.test(lit_model, datamodule=data)
if __name__ == "__main__":
main()
| 35.585859 | 120 | 0.724666 |
3dcc1c7599683b34b1d3ec9cba9617db5327506f
| 753 |
py
|
Python
|
2-1/process/04process_pool.py
|
qumogu/pystudy
|
66bd8848ed625265961808a525e6c33c3cc8d8be
|
[
"Apache-2.0"
] | null | null | null |
2-1/process/04process_pool.py
|
qumogu/pystudy
|
66bd8848ed625265961808a525e6c33c3cc8d8be
|
[
"Apache-2.0"
] | null | null | null |
2-1/process/04process_pool.py
|
qumogu/pystudy
|
66bd8848ed625265961808a525e6c33c3cc8d8be
|
[
"Apache-2.0"
] | null | null | null |
#from multiprocessing import Pool
import multiprocessing
import time
import os
def po_fun(q,num):
pid = os.getpid()
num += 1
for i in range(1,4):
print("当下是第%d个任务,进程%d正在执行%d次." % (num,pid,i))
time.sleep(1)
q.put((num,pid))
print("----第%d个任务,执行完成----" % num)
#2.创建消息队列
#q = multiprocessing.Queue(50)
#q = "test"
def main():
#1.创建进程池
po = multiprocessing.Pool(3) #进程池开3个进程
#2.创建消息队列
q = multiprocessing.Manager().Queue(50)
#3.创建进程函数
print("创建进程函数")
#4.开启进程池
for i in range(5):
print(i)
po.apply_async(po_fun,args=(q,i))
#5.主进程监控信息
time.sleep(1)
print(str(q.get()))
po.close()
po.join()
print("主进程完成")
if __name__ == "__main__":
main()
| 17.928571 | 53 | 0.573705 |
93dc21727e90dcd310c1d8e23d8868e55b3e9714
| 845 |
py
|
Python
|
queue_data_structure.py
|
zhouyuexie/algorithms
|
aa2279b8cd0fc6ddecf8c9d477149d6191eaebfb
|
[
"MIT"
] | null | null | null |
queue_data_structure.py
|
zhouyuexie/algorithms
|
aa2279b8cd0fc6ddecf8c9d477149d6191eaebfb
|
[
"MIT"
] | null | null | null |
queue_data_structure.py
|
zhouyuexie/algorithms
|
aa2279b8cd0fc6ddecf8c9d477149d6191eaebfb
|
[
"MIT"
] | null | null | null |
#-*-coding:utf-8-*-
"""队列:队列是一种先进先出的数据结构,比如排队,先来的先被服务。"""
class Queue():
def __init__(self):# 初始化队列数据
self._queue=[]# 使用数组存储数据
def push(self,*value):# 增加数据
for n in value:
self._queue.append(n)
def out(self):# 推出元素
if(len(self._queue)==0):
print("No data can use.")
return False
else:
t = self._queue[0]
del self._queue[0]
return t
def view(self):# 返回所有队列元素
return self._queue
def number(self):# 返回队列长度
return len(self._queue)
# 使用队列解决一个解密问题,规则是:
# 先删除第一个元素,然后把第二个元素放到最后,再删除下一个元素...
# 一直到数组没有元素,然后输出这个解密后的数组
# 这个问题可以用队列来解决:
def decrypt(queue):
a = []
for i in range(0,queue.number()):
result = queue.out()
if(result):
a.append(result)
queue.push(queue.out())# 将推出的元素加到最后
return a
queue = Queue()
queue.push(6,3,1,7,5,8,9,2,4)
print(decrypt(queue))
| 19.651163 | 39 | 0.629586 |
34165d388ff13e17105be61cb7edd09cf6b36e78
| 4,099 |
py
|
Python
|
circus/tests/test_client.py
|
thedrow/circus
|
4e284eb60eaa365b05546e94c092d6063c00f446
|
[
"Apache-2.0"
] | null | null | null |
circus/tests/test_client.py
|
thedrow/circus
|
4e284eb60eaa365b05546e94c092d6063c00f446
|
[
"Apache-2.0"
] | null | null | null |
circus/tests/test_client.py
|
thedrow/circus
|
4e284eb60eaa365b05546e94c092d6063c00f446
|
[
"Apache-2.0"
] | null | null | null |
import time
from tornado.testing import gen_test
from tornado.gen import coroutine, Return
from circus.tests.support import TestCircus, EasyTestSuite
from circus.client import make_message, CallError
from circus.stream import QueueStream
class TestClient(TestCircus):
@coroutine
def status(self, cmd, **props):
resp = yield self.call(cmd, **props)
raise Return(resp.get('status'))
@coroutine
def numprocesses(self, cmd, **props):
resp = yield self.call(cmd, waiting=True, **props)
raise Return(resp.get('numprocesses'))
@coroutine
def numwatchers(self, cmd, **props):
resp = yield self.call(cmd, **props)
raise Return(resp.get('numwatchers'))
@coroutine
def set(self, name, **opts):
resp = yield self.status("set", name=name, waiting=True, options=opts)
raise Return(resp)
@gen_test
def test_client(self):
# playing around with the watcher
yield self.start_arbiter()
msg = make_message("numwatchers")
resp = yield self.cli.call(msg)
self.assertEqual(resp.get("numwatchers"), 1)
self.assertEqual((yield self.numprocesses("numprocesses")), 1)
self.assertEqual((yield self.set("test", numprocesses=2)), 'ok')
self.assertEqual((yield self.numprocesses("numprocesses")), 2)
self.assertEqual((yield self.set("test", numprocesses=1)), 'ok')
self.assertEqual((yield self.numprocesses("numprocesses")), 1)
self.assertEqual((yield self.numwatchers("numwatchers")), 1)
self.assertEqual((yield self.call("list")).get('watchers'), ['test'])
self.assertEqual((yield self.numprocesses("incr", name="test")), 2)
self.assertEqual((yield self.numprocesses("numprocesses")), 2)
self.assertEqual((yield self.numprocesses("incr", name="test", nb=2)),
4)
self.assertEqual((yield self.numprocesses("decr", name="test", nb=3)),
1)
self.assertEqual((yield self.numprocesses("numprocesses")), 1)
self.assertEqual((yield self.set("test", env={"test": 2})),
'error')
self.assertEqual((yield self.set("test", env={"test": '2'})),
'ok')
resp = yield self.call('get', name='test', keys=['env'])
options = resp.get('options', {})
self.assertEqual(options.get('env'), {'test': '2'})
resp = yield self.call('stats', name='test')
self.assertEqual(resp['status'], 'ok')
resp = yield self.call('globaloptions', name='test')
self.assertEqual(resp['options']['pubsub_endpoint'],
self.arbiter.pubsub_endpoint)
yield self.stop_arbiter()
def long_hook(*args, **kw):
time.sleep(5)
class TestWithHook(TestCircus):
def run_with_hooks(self, hooks):
self.stream = QueueStream()
self.errstream = QueueStream()
dummy_process = 'circus.tests.support.run_process'
return self._create_circus(dummy_process,
stdout_stream={'stream': self.stream},
stderr_stream={'stream': self.errstream},
hooks=hooks)
def test_message_id(self):
hooks = {'before_stop': ('circus.tests.test_client.long_hook', False)}
testfile, arbiter = self.run_with_hooks(hooks)
try:
msg = make_message("numwatchers")
resp = yield self.cli.call(msg)
self.assertEqual(resp.get("numwatchers"), 1)
# this should timeout
self.assertRaises(CallError, self.cli.call, make_message("stop"))
# and we should get back on our feet
del arbiter.watchers[0].hooks['before_stop']
while arbiter.watchers[0].status() != 'stopped':
time.sleep(.1)
resp = self.cli.call(make_message("numwatchers"))
self.assertEqual(resp.get("numwatchers"), 1)
finally:
arbiter.stop()
test_suite = EasyTestSuite(__name__)
| 36.598214 | 78 | 0.600634 |
dbc7397a3d0981dd2f1786945e3cf0f4a695a0f3
| 478 |
py
|
Python
|
read_sophie.py
|
Dartspacephysiker/NOAA_Asymm
|
afe62e1764dddd7204d144a689adb797bef40c8b
|
[
"MIT"
] | null | null | null |
read_sophie.py
|
Dartspacephysiker/NOAA_Asymm
|
afe62e1764dddd7204d144a689adb797bef40c8b
|
[
"MIT"
] | null | null | null |
read_sophie.py
|
Dartspacephysiker/NOAA_Asymm
|
afe62e1764dddd7204d144a689adb797bef40c8b
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Sun Mar 22 13:39:28 2020
@author: jone
"""
#Read sophie
import pandas as pd
s=pd.read_csv('./data_jone/sophie75.txt',delimiter=' ', header=10)
s.loc[:,'datetime'] = pd.to_datetime(s.DATE, format='%Y/%m/%d-%H:%M:%S')
s.index = s.datetime
s.loc[:,'ssphase'] = s['UTC']
s.loc[:,'SMUflag'] = s['-']
s = s.drop(['DATE','UTC','-','PHASE','-.1','FLAG','datetime'], axis=1)
s.to_hdf('./data_jone/sophie75.h5',key='data')
| 25.157895 | 72 | 0.606695 |
26eee9ce1962fe8b817b07c58aed68f7199ca361
| 60,828 |
py
|
Python
|
vw_connection.py
|
Madex7/volkswagencarnet
|
b300dd7de4841d1f5c8c376f96e22345933826f0
|
[
"Apache-2.0"
] | null | null | null |
vw_connection.py
|
Madex7/volkswagencarnet
|
b300dd7de4841d1f5c8c376f96e22345933826f0
|
[
"Apache-2.0"
] | null | null | null |
vw_connection.py
|
Madex7/volkswagencarnet
|
b300dd7de4841d1f5c8c376f96e22345933826f0
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Communicate with We Connect services."""
import base64
import os
"""Modified to utilize API calls derived from Android Apps instead of Web API"""
import re
import time
import logging
import asyncio
import hashlib
import jwt
from sys import version_info, argv
from datetime import timedelta, datetime
from urllib.parse import urljoin, parse_qs, urlparse
from json import dumps as to_json
import aiohttp
from bs4 import BeautifulSoup
from base64 import b64encode
from vw_utilities import read_config, json_loads
from vw_vehicle import Vehicle
from aiohttp import ClientSession, ClientTimeout
from aiohttp.hdrs import METH_GET, METH_POST
from vw_const import (
BRAND,
COUNTRY,
HEADERS_SESSION,
HEADERS_AUTH,
BASE_SESSION,
BASE_AUTH,
CLIENT,
XCLIENT_ID,
XAPPVERSION,
XAPPNAME,
USER_AGENT,
APP_URI,
)
version_info >= (3, 0) or exit('Python 3 required')
_LOGGER = logging.getLogger(__name__)
TIMEOUT = timedelta(seconds=30)
JWT_ALGORITHMS = ['RS256']
class Connection:
""" Connection to VW-Group Connect services """
# Init connection class
def __init__(self, session, username, password, fulldebug=False, country=COUNTRY, interval=timedelta(minutes=5)):
""" Initialize """
self._x_client_id = None
self._session = session
self._session_fulldebug = fulldebug
self._session_headers = HEADERS_SESSION.copy()
self._session_base = BASE_SESSION
self._session_auth_headers = HEADERS_AUTH.copy()
self._session_auth_base = BASE_AUTH
self._session_refresh_interval = interval
self._session_auth_ref_url = BASE_SESSION
self._session_spin_ref_url = BASE_SESSION
self._session_logged_in = False
self._session_first_update = False
self._session_auth_username = username
self._session_auth_password = password
self._session_tokens = {}
self._session_country = country.upper()
self._vin = ""
self._vehicles = []
_LOGGER.debug(f'Using service {self._session_base}')
self._jarCookie = ""
self._state = {}
def _clear_cookies(self):
self._session._cookie_jar._cookies.clear()
# API Login
async def doLogin(self):
"""Login method, clean login"""
_LOGGER.debug('Initiating new login')
# Remove cookies and re-init headers as we are doing a new login
self._clear_cookies()
self._session_headers = HEADERS_SESSION.copy()
self._session_auth_headers = HEADERS_AUTH.copy()
if not await self._login('Legacy'):
_LOGGER.info('Something failed')
self._session_logged_in = False
return False
else:
_LOGGER.info('Successfully logged in')
self._session_tokens['identity'] = self._session_tokens['Legacy'].copy()
self._session_logged_in = True
# Get VW-Group API tokens
if not await self._getAPITokens():
self._session_logged_in = False
return False
# Get list of vehicles from account
_LOGGER.debug('Fetching vehicles associated with account')
await self.set_token('vwg')
self._session_headers.pop('Content-Type', None)
loaded_vehicles = await self.get(
url=f'https://msg.volkswagen.de/fs-car/usermanagement/users/v1/{BRAND}/{self._session_country}/vehicles'
)
# Add Vehicle class object for all VIN-numbers from account
if loaded_vehicles.get('userVehicles') is not None:
_LOGGER.debug('Found vehicle(s) associated with account.')
for vehicle in loaded_vehicles.get('userVehicles').get('vehicle'):
self._vehicles.append(Vehicle(self, vehicle))
else:
_LOGGER.warning('Failed to login to We Connect API.')
self._session_logged_in = False
return False
# Update all vehicles data before returning
await self.set_token('vwg')
await self.update()
return True
async def _login(self, client='Legacy'):
"""Login function."""
# Helper functions
def getNonce():
ts = "%d" % (time.time())
sha256 = hashlib.sha256()
sha256.update(ts.encode())
return b64encode(sha256.digest()).decode('utf-8')[:-1]
def base64URLEncode(s):
return base64.urlsafe_b64encode(s).rstrip(b'=')
def extract_csrf(req):
return re.compile('<meta name="_csrf" content="([^"]*)"/>').search(req).group(1)
def extract_guest_language_id(req):
return req.split('_')[1].lower()
# Login starts here
try:
# Get OpenID config:
self._clear_cookies()
self._session_headers = HEADERS_SESSION.copy()
self._session_auth_headers = HEADERS_AUTH.copy()
if self._session_fulldebug:
_LOGGER.debug(f'Requesting openid config')
req = await self._session.get(
url='https://identity.vwgroup.io/.well-known/openid-configuration'
)
if req.status != 200:
return False
response_data = await req.json()
authorization_endpoint = response_data['authorization_endpoint']
auth_issuer = response_data['issuer']
# Get authorization page (login page)
# https://identity.vwgroup.io/oidc/v1/authorize?nonce={NONCE}&state={STATE}&response_type={TOKEN_TYPES}&scope={SCOPE}&redirect_uri={APP_URI}&client_id={CLIENT_ID}
if self._session_fulldebug:
_LOGGER.debug(f'Get authorization page from "{authorization_endpoint}"')
self._session_auth_headers.pop('Referer', None)
self._session_auth_headers.pop('Origin', None)
_LOGGER.debug(f'Request headers: "{self._session_auth_headers}"')
try:
code_verifier = base64URLEncode(os.urandom(32))
if len(code_verifier) < 43:
raise ValueError("Verifier too short. n_bytes must be > 30.")
elif len(code_verifier) > 128:
raise ValueError("Verifier too long. n_bytes must be < 97.")
challenge = base64URLEncode(hashlib.sha256(code_verifier).digest())
req = await self._session.get(
url=authorization_endpoint + \
'?redirect_uri=' + APP_URI + \
'&prompt=login' + \
'&nonce=' + getNonce() + \
'&state=' + getNonce() + \
'&code_challenge_method=s256' + \
'&code_challenge=' + challenge.decode() + \
'&response_type=' + CLIENT[client].get('TOKEN_TYPES') + \
'&client_id=' + CLIENT[client].get('CLIENT_ID') + \
'&scope=' + CLIENT[client].get('SCOPE'),
headers=self._session_auth_headers,
allow_redirects=False
)
if req.headers.get('Location', False):
ref = urljoin(authorization_endpoint, req.headers.get('Location', ''))
if 'error' in ref:
error = parse_qs(urlparse(ref).query).get('error', '')[0]
if 'error_description' in ref:
error_description = parse_qs(urlparse(ref).query).get('error_description', '')[0]
_LOGGER.info(f'Unable to login, {error_description}')
else:
_LOGGER.info(f'Unable to login.')
raise Exception(error)
else:
if self._session_fulldebug:
_LOGGER.debug(f'Got redirect to "{ref}"')
req = await self._session.get(
url=ref,
headers=self._session_auth_headers,
allow_redirects=False
)
else:
_LOGGER.warning(f'Unable to fetch authorization endpoint.')
raise Exception('Missing "location" header')
except Exception as error:
_LOGGER.warning('Failed to get authorization endpoint')
raise error
if req.status != 200:
raise Exception('Fetching authorization endpoint failed')
else:
_LOGGER.debug('Got authorization endpoint')
try:
response_data = await req.text()
response_soup = BeautifulSoup(response_data, 'html.parser')
mailform = dict([(t['name'], t['value']) for t in
response_soup.find('form', id='emailPasswordForm').find_all('input', type='hidden')])
mailform['email'] = self._session_auth_username
pe_url = auth_issuer + response_soup.find('form', id='emailPasswordForm').get('action')
except Exception as e:
_LOGGER.error('Failed to extract user login form.')
raise e
# POST email
# https://identity.vwgroup.io/signin-service/v1/{CLIENT_ID}/login/identifier
self._session_auth_headers['Referer'] = authorization_endpoint
self._session_auth_headers['Origin'] = auth_issuer
req = await self._session.post(
url=pe_url,
headers=self._session_auth_headers,
data=mailform
)
if req.status != 200:
raise Exception('POST password request failed')
try:
response_data = await req.text()
response_soup = BeautifulSoup(response_data, 'html.parser')
pw_form: dict[str, str] = {}
post_action = None
client_id = None
for d in response_soup.find_all('script'):
if 'src' in d.attrs:
continue
if 'window._IDK' in d.string:
if re.match('"errorCode":"', d.string) is not None:
raise Exception('Error code in response')
pw_form['relayState'] = re.search('"relayState":"([a-f0-9]*)"', d.string)[1]
pw_form['hmac'] = re.search('"hmac":"([a-f0-9]*)"', d.string)[1]
pw_form['email'] = re.search('"email":"([^"]*)"', d.string)[1]
pw_form['_csrf'] = re.search('csrf_token:\\s*\'([^"\']*)\'', d.string)[1]
post_action = re.search('"postAction":\\s*"([^"\']*)"', d.string)[1]
client_id = re.search('"clientId":\\s*"([^"\']*)"', d.string)[1]
break
if pw_form['hmac'] is None or post_action is None:
raise Exception('Failed to find authentication data in response')
pw_form['password'] = self._session_auth_password
pw_url = "{host}/signin-service/v1/{clientId}/{postAction}".format(
host=auth_issuer,
clientId=client_id,
postAction=post_action
)
except Exception as e:
_LOGGER.error('Failed to extract password login form.')
raise e
# POST password
# https://identity.vwgroup.io/signin-service/v1/{CLIENT_ID}/login/authenticate
self._session_auth_headers['Referer'] = pe_url
self._session_auth_headers['Origin'] = auth_issuer
_LOGGER.debug('Authenticating with email and password.')
if self._session_fulldebug:
_LOGGER.debug(f'Using login action url: "{pw_url}"')
req = await self._session.post(
url=pw_url,
headers=self._session_auth_headers,
data=pw_form,
allow_redirects=False
)
_LOGGER.debug('Parsing login response.')
# Follow all redirects until we get redirected back to "our app"
try:
max_depth = 10
ref = urljoin(pw_url, req.headers['Location'])
while not ref.startswith(APP_URI):
if self._session_fulldebug:
_LOGGER.debug(f'Following redirect to "{ref}"')
response = await self._session.get(
url=ref,
headers=self._session_auth_headers,
allow_redirects=False
)
if not response.headers.get('Location', False):
_LOGGER.info(f'Login failed, does this account have any vehicle with connect services enabled?')
raise Exception('User appears unauthorized')
ref = urljoin(ref, response.headers['Location'])
# Set a max limit on requests to prevent forever loop
max_depth -= 1
if max_depth == 0:
_LOGGER.warning('Should have gotten a token by now.')
raise Exception('Too many redirects')
except Exception as e:
# If we get excepted it should be because we can't redirect to the APP_URI URL
if 'error' in ref:
error = parse_qs(urlparse(ref).query).get('error', '')[0]
if error == 'login.error.throttled':
timeout = parse_qs(urlparse(ref).query).get('enableNextButtonAfterSeconds', '')[0]
_LOGGER.warning(f'Login failed, login is disabled for another {timeout} seconds')
elif error == 'login.errors.password_invalid':
_LOGGER.warning(f'Login failed, invalid password')
else:
_LOGGER.warning(f'Login failed: {error}')
raise error
if 'code' in ref:
_LOGGER.debug('Got code: %s' % ref)
pass
else:
_LOGGER.debug(f'Exception occurred while logging in.')
raise e
_LOGGER.debug('Login successful, received authorization code.')
# Extract code and tokens
jwt_auth_code = parse_qs(urlparse(ref).fragment).get('code')[0]
jwt_id_token = parse_qs(urlparse(ref).fragment).get('id_token')[0]
# Exchange Auth code and id_token for new tokens with refresh_token (so we can easier fetch new ones later)
token_body = {
'auth_code': jwt_auth_code,
'id_token': jwt_id_token,
'code_verifier': code_verifier.decode(),
'brand': BRAND
}
_LOGGER.debug('Trying to fetch user identity tokens.')
token_url = 'https://tokenrefreshservice.apps.emea.vwapps.io/exchangeAuthCode'
req = await self._session.post(
url=token_url,
headers=self._session_auth_headers,
data=token_body,
allow_redirects=False
)
if req.status != 200:
raise Exception('Token exchange failed')
# Save tokens as "identity", theese are tokens representing the user
self._session_tokens[client] = await req.json()
if 'error' in self._session_tokens[client]:
error = self._session_tokens[client].get('error', '')
if 'error_description' in self._session_tokens[client]:
error_description = self._session_tokens[client].get('error_description', '')
raise Exception(f'{error} - {error_description}')
else:
raise Exception(error)
if self._session_fulldebug:
for token in self._session_tokens.get(client, {}):
_LOGGER.debug(f'Got token {token}')
if not await self.verify_tokens(self._session_tokens[client].get('id_token', ''), 'identity'):
_LOGGER.warning('User identity token could not be verified!')
else:
_LOGGER.debug('User identity token verified OK.')
except Exception as error:
_LOGGER.error(f'Login failed for {BRAND} account, {error}')
_LOGGER.exception(error)
self._session_logged_in = False
return False
return True
async def _getAPITokens(self):
try:
# Get VW Group API tokens
# https://mbboauth-1d.prd.ece.vwg-connect.com/mbbcoauth/mobile/oauth2/v1/token
tokenBody2 = {
'grant_type': 'id_token',
'token': self._session_tokens['identity']['id_token'],
'scope': 'sc2:fal'
}
_LOGGER.debug('Trying to fetch api tokens.')
req = await self._session.post(
url='https://mbboauth-1d.prd.ece.vwg-connect.com/mbbcoauth/mobile/oauth2/v1/token',
headers={
'User-Agent': USER_AGENT,
'X-App-Version': XAPPVERSION,
'X-App-Name': XAPPNAME,
'X-Client-Id': XCLIENT_ID,
},
data=tokenBody2,
allow_redirects=False
)
if req.status > 400:
_LOGGER.debug('API token request failed.')
raise Exception(f'API token request returned with status code {req.status}')
else:
# Save tokens as "vwg", use theese for get/posts to VW Group API
self._session_tokens['vwg'] = await req.json()
if 'error' in self._session_tokens['vwg']:
error = self._session_tokens['vwg'].get('error', '')
if 'error_description' in self._session_tokens['vwg']:
error_description = self._session_tokens['vwg'].get('error_description', '')
raise Exception(f'{error} - {error_description}')
else:
raise Exception(error)
if self._session_fulldebug:
for token in self._session_tokens.get('vwg', {}):
_LOGGER.debug(f'Got token {token}')
if not await self.verify_tokens(self._session_tokens['vwg'].get('access_token', ''), 'vwg'):
_LOGGER.warning('VW-Group API token could not be verified!')
else:
_LOGGER.debug('VW-Group API token verified OK.')
# Update headers for requests, defaults to using VWG token
self._session_headers['Authorization'] = 'Bearer ' + self._session_tokens['vwg']['access_token']
except Exception as error:
_LOGGER.error(f'Failed to fetch VW-Group API tokens, {error}')
self._session_logged_in = False
return False
return True
async def terminate(self):
"""Log out from connect services"""
_LOGGER.info(f'Initiating logout')
await self.logout()
async def logout(self):
"""Logout, revoke tokens."""
self._session_headers.pop('Authorization', None)
if self._session_logged_in:
if self._session_headers.get('vwg', {}).get('access_token'):
_LOGGER.info('Revoking API Access Token...')
self._session_headers['token_type_hint'] = 'access_token'
params = {"token": self._session_tokens['vwg']['access_token']}
revoke_at = await self.post(
'https://mbboauth-1d.prd.ece.vwg-connect.com/mbbcoauth/mobile/oauth2/v1/revoke', data=params)
if self._session_headers.get('vwg', {}).get('refresh_token'):
_LOGGER.info('Revoking API Refresh Token...')
self._session_headers['token_type_hint'] = 'refresh_token'
params = {"token": self._session_tokens['vwg']['refresh_token']}
revoke_rt = await self.post(
'https://mbboauth-1d.prd.ece.vwg-connect.com/mbbcoauth/mobile/oauth2/v1/revoke', data=params)
self._session_headers.pop('token_type_hint', None)
if self._session_headers.get('identity', {}).get('identity_token'):
_LOGGER.info('Revoking Identity Access Token...')
# params = {
# "token": self._session_tokens['identity']['access_token'],
# "brand": BRAND
# }
# revoke_at = await self.post('https://tokenrefreshservice.apps.emea.vwapps.io/revokeToken', data = params)
if self._session_headers.get('identity', {}).get('refresh_token'):
_LOGGER.info('Revoking Identity Refresh Token...')
params = {
"token": self._session_tokens['identity']['refresh_token'],
"brand": BRAND
}
revoke_rt = await self.post('https://tokenrefreshservice.apps.emea.vwapps.io/revokeToken', data=params)
# HTTP methods to API
async def _request(self, method, url, **kwargs):
"""Perform a query to the VW-Group API"""
_LOGGER.debug(f'HTTP {method} "{url}"')
async with self._session.request(
method,
url,
headers=self._session_headers,
timeout=ClientTimeout(total=TIMEOUT.seconds),
cookies=self._jarCookie,
raise_for_status=False,
**kwargs
) as response:
response.raise_for_status()
# Update cookie jar
if self._jarCookie != '':
self._jarCookie.update(response.cookies)
else:
self._jarCookie = response.cookies
try:
if response.status == 204:
res = {'status_code': response.status}
elif response.status >= 200 or response.status <= 300:
res = await response.json(loads=json_loads)
else:
res = {}
_LOGGER.debug(f'Not success status code [{response.status}] response: {response}')
if 'X-RateLimit-Remaining' in response.headers:
res['rate_limit_remaining'] = response.headers.get('X-RateLimit-Remaining', '')
except:
res = {}
_LOGGER.debug(f'Something went wrong [{response.status}] response: {response}')
return res
if self._session_fulldebug:
_LOGGER.debug(f'Request for "{url}" returned with status code [{response.status}], response: {res}')
else:
_LOGGER.debug(f'Request for "{url}" returned with status code [{response.status}]')
return res
async def get(self, url, vin=''):
"""Perform a get query."""
try:
response = await self._request(METH_GET, self._make_url(url, vin))
return response
except aiohttp.client_exceptions.ClientResponseError as error:
if error.status == 401:
_LOGGER.warning(f'Received "unauthorized" error while fetching data: {error}')
self._session_logged_in = False
elif error.status == 400:
_LOGGER.error(
f'Got HTTP 400 "Bad Request" from server, this request might be malformed or not implemented correctly for this vehicle')
elif error.status == 500:
_LOGGER.info('Got HTTP 500 from server, service might be temporarily unavailable')
elif error.status == 502:
_LOGGER.info('Got HTTP 502 from server, this request might not be supported for this vehicle')
else:
_LOGGER.error(f'Got unhandled error from server: {error.status}')
return {'status_code': error.status}
async def post(self, url, vin='', **data):
"""Perform a post query."""
if data:
return await self._request(METH_POST, self._make_url(url, vin), **data)
else:
return await self._request(METH_POST, self._make_url(url, vin))
# Construct URL from request, home region and variables
def _make_url(self, ref, vin=''):
replacedUrl = re.sub('\$vin', vin, ref)
if '://' in replacedUrl:
# already server contained in URL
return replacedUrl
elif 'rolesrights' in replacedUrl:
return urljoin(self._session_spin_ref_url, replacedUrl)
else:
return urljoin(self._session_auth_ref_url, replacedUrl)
# Update data for all Vehicles
async def update(self):
"""Update status."""
if not self.logged_in:
if not await self._login():
_LOGGER.warning(f'Login for {BRAND} account failed!')
return False
try:
if not await self.validate_tokens:
_LOGGER.info(f'Session expired. Initiating new login for {BRAND} account.')
if not await self.doLogin():
_LOGGER.warning(f'Login for {BRAND} account failed!')
raise Exception(f'Login for {BRAND} account failed')
_LOGGER.debug('Going to call vehicle updates')
# Get all Vehicle objects and update in parallell
updatelist = []
for vehicle in self.vehicles:
updatelist.append(vehicle.update())
# Wait for all data updates to complete
await asyncio.gather(*updatelist)
return True
except (IOError, OSError, LookupError, Exception) as error:
_LOGGER.warning(f'Could not update information: {error}')
return False
#### Data collect functions ####
async def getHomeRegion(self, vin):
"""Get API requests base url for VIN."""
if not await self.validate_tokens:
return False
try:
await self.set_token('vwg')
response = await self.get('https://mal-1a.prd.ece.vwg-connect.com/api/cs/vds/v1/vehicles/$vin/homeRegion', vin)
self._session_auth_ref_url = response['homeRegion']['baseUri']['content'].split('/api')[0].replace('mal-', 'fal-') if \
response['homeRegion']['baseUri']['content'] != 'https://mal-1a.prd.ece.vwg-connect.com/api' else 'https://msg.volkswagen.de'
self._session_spin_ref_url = response['homeRegion']['baseUri']['content'].split('/api')[0]
return response['homeRegion']['baseUri']['content']
except Exception as error:
_LOGGER.debug(f'Could not get homeregion, error {error}')
self._session_logged_in = False
return False
async def getOperationList(self, vin):
"""Collect operationlist for VIN, supported/licensed functions."""
if not await self.validate_tokens:
return False
try:
await self.set_token('vwg')
response = await self.get('/api/rolesrights/operationlist/v3/vehicles/$vin', vin)
if response.get('operationList', False):
data = response.get('operationList', {})
elif response.get('status_code', {}):
_LOGGER.warning(f'Could not fetch operation list, HTTP status code: {response.get("status_code")}')
data = response
else:
_LOGGER.info(f'Could not fetch operation list: {response}')
data = {'error': 'unknown'}
except Exception as error:
_LOGGER.warning(f'Could not fetch operation list, error: {error}')
data = {'error': 'unknown'}
return data
async def getRealCarData(self, vin):
"""Get car information from customer profile, VIN, nickname, etc."""
if not await self.validate_tokens:
return False
try:
_LOGGER.debug("Attempting extraction of subject from identity token.")
atoken = self._session_tokens['identity']['access_token']
subject = jwt.decode(atoken, options={"verify_signature": False}, algorithms=JWT_ALGORITHMS).get('sub', None)
await self.set_token('identity')
self._session_headers['Accept'] = 'application/json'
response = await self.get(
f'https://customer-profile.apps.emea.vwapps.io/v1/customers/{subject}/realCarData'
)
if response.get('realCars', {}):
data = {
'carData': next(
item for item in response.get('realCars', []) if item['vehicleIdentificationNumber'] == vin)
}
return data
elif response.get('status_code', {}):
_LOGGER.warning(f'Could not fetch realCarData, HTTP status code: {response.get("status_code")}')
else:
_LOGGER.info('Unhandled error while trying to fetch realcar data')
except Exception as error:
_LOGGER.warning(f'Could not fetch realCarData, error: {error}')
return False
async def getCarportData(self, vin):
"""Get carport data for vehicle, model, model year etc."""
if not await self.validate_tokens:
return False
try:
await self.set_token('vwg')
self._session_headers[
'Accept'] = 'application/vnd.vwg.mbb.vehicleDataDetail_v2_1_0+json, application/vnd.vwg.mbb.genericError_v1_0_2+json'
response = await self.get(
f'fs-car/vehicleMgmt/vehicledata/v2/{BRAND}/{self._session_country}/vehicles/$vin',
vin=vin
)
self._session_headers['Accept'] = 'application/json'
if response.get('vehicleDataDetail', {}).get('carportData', {}):
data = {
'carportData': response.get('vehicleDataDetail', {}).get('carportData', {})
}
return data
elif response.get('status_code', {}):
_LOGGER.warning(f'Could not fetch carportdata, HTTP status code: {response.get("status_code")}')
else:
_LOGGER.info('Unhandled error while trying to fetch carport data')
except Exception as error:
_LOGGER.warning(f'Could not fetch carportData, error: {error}')
return False
async def getVehicleStatusData(self, vin):
"""Get stored vehicle data response."""
try:
await self.set_token('vwg')
response = await self.get(
f'fs-car/bs/vsr/v1/{BRAND}/{self._session_country}/vehicles/$vin/status',
vin=vin
)
if response.get('StoredVehicleDataResponse', {}).get('vehicleData', {}).get('data', {})[0].get('field', {})[
0]:
data = {
'StoredVehicleDataResponse': response.get('StoredVehicleDataResponse', {}),
'StoredVehicleDataResponseParsed': dict([(e['id'], e if 'value' in e else '') for f in
[s['field'] for s in
response['StoredVehicleDataResponse']['vehicleData'][
'data']] for e in f])
}
return data
elif response.get('status_code', {}):
_LOGGER.warning(
f'Could not fetch vehicle status report, HTTP status code: {response.get("status_code")}')
else:
_LOGGER.info('Unhandled error while trying to fetch status data')
except Exception as error:
_LOGGER.warning(f'Could not fetch StoredVehicleDataResponse, error: {error}')
return False
async def getTripStatistics(self, vin):
"""Get short term trip statistics."""
if not await self.validate_tokens:
return False
try:
await self.set_token('vwg')
response = await self.get(
f'fs-car/bs/tripstatistics/v1/{BRAND}/{self._session_country}/vehicles/$vin/tripdata/shortTerm?newest',
vin=vin
)
if response.get('tripData', {}):
data = {'tripstatistics': response.get('tripData', {})}
return data
elif response.get('status_code', {}):
_LOGGER.warning(f'Could not fetch trip statistics, HTTP status code: {response.get("status_code")}')
else:
_LOGGER.info(f'Unhandled error while trying to fetch trip statistics')
except Exception as error:
_LOGGER.warning(f'Could not fetch trip statistics, error: {error}')
return False
async def getPosition(self, vin):
"""Get position data."""
if not await self.validate_tokens:
return False
try:
await self.set_token('vwg')
response = await self.get(
f'fs-car/bs/cf/v1/{BRAND}/{self._session_country}/vehicles/$vin/position',
vin=vin
)
if response.get('findCarResponse', {}):
data = {
'findCarResponse': response.get('findCarResponse', {}),
'isMoving': False
}
return data
elif response.get('status_code', {}):
if response.get('status_code', 0) == 204:
_LOGGER.debug(f'Seems car is moving, HTTP 204 received from position')
data = {
'isMoving': True,
'rate_limit_remaining': 15
}
return data
else:
_LOGGER.warning(f'Could not fetch position, HTTP status code: {response.get("status_code")}')
else:
_LOGGER.info('Unhandled error while trying to fetch positional data')
except Exception as error:
_LOGGER.warning(f'Could not fetch position, error: {error}')
return False
async def getTimers(self, vin):
"""Get departure timers."""
if not await self.validate_tokens:
return False
try:
await self.set_token('vwg')
response = await self.get(
f'fs-car/bs/departuretimer/v1/{BRAND}/{self._session_country}/vehicles/$vin/timer',
vin=vin
)
if response.get('timer', {}):
data = {'timers': response.get('timer', {})}
return data
elif response.get('status_code', {}):
_LOGGER.warning(f'Could not fetch timers, HTTP status code: {response.get("status_code")}')
else:
_LOGGER.info('Unknown error while trying to fetch data for departure timers')
except Exception as error:
_LOGGER.warning(f'Could not fetch timers, error: {error}')
return False
async def getClimater(self, vin):
"""Get climatisation data."""
if not await self.validate_tokens:
return False
try:
await self.set_token('vwg')
response = await self.get(
f'fs-car/bs/climatisation/v1/{BRAND}/{self._session_country}/vehicles/$vin/climater',
vin=vin
)
if response.get('climater', {}):
data = {'climater': response.get('climater', {})}
return data
elif response.get('status_code', {}):
_LOGGER.warning(f'Could not fetch climatisation, HTTP status code: {response.get("status_code")}')
else:
_LOGGER.info('Unhandled error while trying to fetch climatisation data')
except Exception as error:
_LOGGER.warning(f'Could not fetch climatisation, error: {error}')
return False
async def getCharger(self, vin):
"""Get charger data."""
if not await self.validate_tokens:
return False
try:
await self.set_token('vwg')
response = await self.get(
f'fs-car/bs/batterycharge/v1/{BRAND}/{self._session_country}/vehicles/$vin/charger',
vin=vin
)
if response.get('charger', {}):
data = {'charger': response.get('charger', {})}
return data
elif response.get('status_code', {}):
_LOGGER.warning(f'Could not fetch pre-heating, HTTP status code: {response.get("status_code")}')
else:
_LOGGER.info('Unhandled error while trying to fetch charger data')
except Exception as error:
_LOGGER.warning(f'Could not fetch charger, error: {error}')
return False
async def getPreHeater(self, vin):
"""Get parking heater data."""
if not await self.validate_tokens:
return False
try:
await self.set_token('vwg')
response = await self.get(
f'fs-car/bs/rs/v1/{BRAND}/{self._session_country}/vehicles/$vin/status',
vin=vin
)
if response.get('statusResponse', {}):
data = {'heating': response.get('statusResponse', {})}
return data
elif response.get('status_code', {}):
_LOGGER.warning(f'Could not fetch pre-heating, HTTP status code: {response.get("status_code")}')
else:
_LOGGER.info('Unhandled error while trying to fetch pre-heating data')
except Exception as error:
_LOGGER.warning(f'Could not fetch pre-heating, error: {error}')
return False
async def get_request_status(self, vin, sectionId, requestId):
"""Return status of a request ID for a given section ID."""
if self.logged_in == False:
if not await self.doLogin():
_LOGGER.warning(f'Login for {BRAND} account failed!')
raise Exception(f'Login for {BRAND} account failed')
try:
if not await self.validate_tokens:
_LOGGER.info(f'Session expired. Initiating new login for {BRAND} account.')
if not await self.doLogin():
_LOGGER.warning(f'Login for {BRAND} account failed!')
raise Exception(f'Login for {BRAND} account failed')
await self.set_token('vwg')
if sectionId == 'climatisation':
url = f'fs-car/bs/$sectionId/v1/{BRAND}/{self._session_country}/vehicles/$vin/climater/actions/$requestId'
elif sectionId == 'batterycharge':
url = f'fs-car/bs/$sectionId/v1/{BRAND}/{self._session_country}/vehicles/$vin/charger/actions/$requestId'
elif sectionId == 'departuretimer':
url = f'fs-car/bs/$sectionId/v1/{BRAND}/{self._session_country}/vehicles/$vin/timer/actions/$requestId'
elif sectionId == 'vsr':
url = f'fs-car/bs/$sectionId/v1/{BRAND}/{self._session_country}/vehicles/$vin/requests/$requestId/jobstatus'
else:
url = f'fs-car/bs/$sectionId/v1/{BRAND}/{self._session_country}/vehicles/$vin/requests/$requestId/status'
url = re.sub('\$sectionId', sectionId, url)
url = re.sub('\$requestId', requestId, url)
response = await self.get(url, vin)
# Pre-heater, ???
if response.get('requestStatusResponse', {}).get('status', False):
result = response.get('requestStatusResponse', {}).get('status', False)
# For electric charging, climatisation and departure timers
elif response.get('action', {}).get('actionState', False):
result = response.get('action', {}).get('actionState', False)
else:
result = 'Unknown'
# Translate status messages to meaningful info
if result == 'request_in_progress' or result == 'queued' or result == 'fetched':
status = 'In progress'
elif result == 'request_fail' or result == 'failed':
status = 'Failed'
elif result == 'unfetched':
status = 'No response'
elif result == 'request_successful' or result == 'succeeded':
status = 'Success'
else:
status = result
return status
except Exception as error:
_LOGGER.warning(f'Failure during get request status: {error}')
raise Exception(f'Failure during get request status: {error}')
async def get_sec_token(self, vin, spin, action):
"""Get a security token, required for certain set functions."""
urls = {
'lock': '/api/rolesrights/authorization/v2/vehicles/$vin/services/rlu_v1/operations/LOCK/security-pin-auth-requested',
'unlock': '/api/rolesrights/authorization/v2/vehicles/$vin/services/rlu_v1/operations/UNLOCK/security-pin-auth-requested',
'heating': '/api/rolesrights/authorization/v2/vehicles/$vin/services/rheating_v1/operations/P_QSACT/security-pin-auth-requested',
'timer': '/api/rolesrights/authorization/v2/vehicles/$vin/services/timerprogramming_v1/operations/P_SETTINGS_AU/security-pin-auth-requested',
'rclima': '/api/rolesrights/authorization/v2/vehicles/$vin/services/rclima_v1/operations/P_START_CLIMA_AU/security-pin-auth-requested'
}
if not spin:
raise Exception('SPIN is required')
try:
if not urls.get(action, False):
raise Exception(f'Security token for "{action}" is not implemented')
response = await self.get(self._make_url(urls.get(action), vin=vin))
secToken = response['securityPinAuthInfo']['securityToken']
challenge = response['securityPinAuthInfo']['securityPinTransmission']['challenge']
spinHash = self.hash_spin(challenge, spin)
body = {
'securityPinAuthentication': {
'securityPin': {
'challenge': challenge,
'securityPinHash': spinHash
},
'securityToken': secToken
}
}
self._session_headers['Content-Type'] = 'application/json'
response = await self.post(
self._make_url('/api/rolesrights/authorization/v2/security-pin-auth-completed', vin=vin), json=body)
self._session_headers.pop('Content-Type', None)
if response.get('securityToken', False):
return response['securityToken']
else:
_LOGGER.warning('Did not receive a valid security token')
raise Exception('Did not receive a valid security token')
except Exception as error:
_LOGGER.error(f'Could not generate security token (maybe wrong SPIN?), error: {error}')
raise
#### Data set functions ####
async def dataCall(self, query, vin='', **data):
"""Function to execute actions through VW-Group API."""
if self.logged_in == False:
if not await self.doLogin():
_LOGGER.warning(f'Login for {BRAND} account failed!')
raise Exception(f'Login for {BRAND} account failed')
try:
if not await self.validate_tokens:
_LOGGER.info(f'Session expired. Initiating new login for {BRAND} account.')
if not await self.doLogin():
_LOGGER.warning(f'Login for {BRAND} account failed!')
raise Exception(f'Login for {BRAND} account failed')
response = await self.post(query, vin=vin, **data)
_LOGGER.debug(f'Data call returned: {response}')
return response
except aiohttp.client_exceptions.ClientResponseError as error:
if error.status == 401:
_LOGGER.error('Unauthorized')
self._session_logged_in = False
elif error.status == 400:
_LOGGER.error(f'Bad request')
elif error.status == 429:
_LOGGER.warning(
'Too many requests. Further requests can only be made after the end of next trip in order to protect your vehicles battery.')
return 429
elif error.status == 500:
_LOGGER.error('Internal server error, server might be temporarily unavailable')
elif error.status == 502:
_LOGGER.error('Bad gateway, this function may not be implemented for this vehicle')
else:
_LOGGER.error(f'Unhandled HTTP exception: {error}')
# return False
except Exception as error:
_LOGGER.error(f'Failure to execute: {error}')
return False
async def setRefresh(self, vin):
""""Force vehicle data update."""
try:
await self.set_token('vwg')
response = await self.dataCall(f'fs-car/bs/vsr/v1/{BRAND}/{self._session_country}/vehicles/$vin/requests', vin, data=None)
if not response:
raise Exception('Invalid or no response')
elif response == 429:
return dict({'id': None, 'state': 'Throttled', 'rate_limit_remaining': 0})
else:
request_id = response.get('CurrentVehicleDataResponse', {}).get('requestId', 0)
request_state = response.get('CurrentVehicleDataResponse', {}).get('requestState', 'queued')
remaining = response.get('rate_limit_remaining', -1)
_LOGGER.debug(
f'Request to refresh data returned with state "{request_state}", request id: {request_id}, remaining requests: {remaining}')
return dict({'id': str(request_id), 'state': request_state, 'rate_limit_remaining': remaining})
except:
raise
return False
async def setCharger(self, vin, data):
"""Start/Stop charger."""
try:
await self.set_token('vwg')
response = await self.dataCall(
f'fs-car/bs/batterycharge/v1/{BRAND}/{self._session_country}/vehicles/$vin/charger/actions', vin, json=data)
if not response:
raise Exception('Invalid or no response')
elif response == 429:
return dict({'id': None, 'state': 'Throttled', 'rate_limit_remaining': 0})
else:
request_id = response.get('action', {}).get('actionId', 0)
request_state = response.get('action', {}).get('actionState', 'unknown')
remaining = response.get('rate_limit_remaining', -1)
_LOGGER.debug(
f'Request for charger action returned with state "{request_state}", request id: {request_id}, remaining requests: {remaining}')
return dict({'id': str(request_id), 'state': request_state, 'rate_limit_remaining': remaining})
except:
raise
return False
async def setClimater(self, vin, data, spin):
"""Execute climatisation actions."""
try:
await self.set_token('vwg')
# Only get security token if auxiliary heater is to be started
if data.get('action', {}).get('settings', {}).get('heaterSource', None) == 'auxiliary':
self._session_headers['X-securityToken'] = await self.get_sec_token(vin=vin, spin=spin, action='rclima')
response = await self.dataCall(
f'fs-car/bs/climatisation/v1/{BRAND}/{self._session_country}/vehicles/$vin/climater/actions', vin, json=data)
self._session_headers.pop('X-securityToken', None)
if not response:
raise Exception('Invalid or no response')
elif response == 429:
return dict({'id': None, 'state': 'Throttled', 'rate_limit_remaining': 0})
else:
request_id = response.get('action', {}).get('actionId', 0)
request_state = response.get('action', {}).get('actionState', 'unknown')
remaining = response.get('rate_limit_remaining', -1)
_LOGGER.debug(
f'Request for climater action returned with state "{request_state}", request id: {request_id}, remaining requests: {remaining}')
return dict({'id': str(request_id), 'state': request_state, 'rate_limit_remaining': remaining})
except:
self._session_headers.pop('X-securityToken', None)
raise
return False
async def setPreHeater(self, vin, data, spin):
"""Petrol/diesel parking heater actions."""
try:
await self.set_token('vwg')
if 'Content-Type' in self._session_headers:
contType = self._session_headers['Content-Type']
else:
contType = ''
self._session_headers['Content-Type'] = 'application/vnd.vwg.mbb.RemoteStandheizung_v2_0_2+json'
if not 'quickstop' in data:
self._session_headers['x-mbbSecToken'] = await self.get_sec_token(vin=vin, spin=spin, action='heating')
response = await self.dataCall(f'fs-car/bs/rs/v1/{BRAND}/{self._session_country}/vehicles/$vin/action', vin=vin,
json=data)
# Clean up headers
self._session_headers.pop('x-mbbSecToken', None)
self._session_headers.pop('Content-Type', None)
if contType: self._session_headers['Content-Type'] = contType
if not response:
raise Exception('Invalid or no response')
elif response == 429:
return dict({'id': None, 'state': 'Throttled', 'rate_limit_remaining': 0})
else:
request_id = response.get('performActionResponse', {}).get('requestId', 0)
remaining = response.get('rate_limit_remaining', -1)
_LOGGER.debug(
f'Request for parking heater is queued with request id: {request_id}, remaining requests: {remaining}')
return dict({'id': str(request_id), 'state': None, 'rate_limit_remaining': remaining})
except Exception as error:
self._session_headers.pop('x-mbbSecToken', None)
self._session_headers.pop('Content-Type', None)
if contType: self._session_headers['Content-Type'] = contType
raise
return False
async def setLock(self, vin, data, spin):
"""Remote lock and unlock actions."""
try:
await self.set_token('vwg')
# Prepare data, headers and fetch security token
if 'Content-Type' in self._session_headers:
contType = self._session_headers['Content-Type']
else:
contType = ''
if 'unlock' in data:
self._session_headers['X-mbbSecToken'] = await self.get_sec_token(vin=vin, spin=spin, action='unlock')
else:
self._session_headers['X-mbbSecToken'] = await self.get_sec_token(vin=vin, spin=spin, action='lock')
self._session_headers['Content-Type'] = 'application/vnd.vwg.mbb.RemoteLockUnlock_v1_0_0+xml'
response = await self.dataCall(f'fs-car/bs/rlu/v1/{BRAND}/{self._session_country}/vehicles/$vin/actions', vin, data=data)
# Clean up headers
self._session_headers.pop('X-mbbSecToken', None)
self._session_headers.pop('Content-Type', None)
if contType: self._session_headers['Content-Type'] = contType
if not response:
raise Exception('Invalid or no response')
elif response == 429:
return dict({'id': None, 'state': 'Throttled', 'rate_limit_remaining': 0})
else:
request_id = response.get('rluActionResponse', {}).get('requestId', 0)
request_state = response.get('rluActionResponse', {}).get('requestId', 'unknown')
remaining = response.get('rate_limit_remaining', -1)
_LOGGER.debug(
f'Request for lock action returned with state "{request_state}", request id: {request_id}, remaining requests: {remaining}')
return dict({'id': str(request_id), 'state': request_state, 'rate_limit_remaining': remaining})
except:
self._session_headers.pop('X-mbbSecToken', None)
self._session_headers.pop('Content-Type', None)
if contType: self._session_headers['Content-Type'] = contType
raise
return False
#### Token handling ####
@property
async def validate_tokens(self):
"""Function to validate expiry of tokens."""
idtoken = self._session_tokens['identity']['id_token']
atoken = self._session_tokens['vwg']['access_token']
id_exp = jwt.decode(idtoken, options={"verify_signature": False, 'verify_aud': False}, algorithms=JWT_ALGORITHMS).get('exp', None)
at_exp = jwt.decode(atoken, options={"verify_signature": False, 'verify_aud': False}, algorithms=JWT_ALGORITHMS).get('exp', None)
id_dt = datetime.fromtimestamp(int(id_exp))
at_dt = datetime.fromtimestamp(int(at_exp))
now = datetime.now()
later = now + self._session_refresh_interval
# Check if tokens have expired, or expires now
if now >= id_dt or now >= at_dt:
_LOGGER.debug('Tokens have expired. Try to fetch new tokens.')
if await self.refresh_tokens():
_LOGGER.debug('Successfully refreshed tokens')
else:
return False
# Check if tokens expires before next update
elif later >= id_dt or later >= at_dt:
_LOGGER.debug('Tokens about to expire. Try to fetch new tokens.')
if await self.refresh_tokens():
_LOGGER.debug('Successfully refreshed tokens')
else:
return False
return True
async def verify_tokens(self, token, type, client='Legacy'):
"""Function to verify JWT against JWK(s)."""
if type == 'identity':
req = await self._session.get(url='https://identity.vwgroup.io/oidc/v1/keys')
keys = await req.json()
audience = [
CLIENT[client].get('CLIENT_ID'),
'VWGMBB01DELIV1',
'https://api.vas.eu.dp15.vwg-connect.com',
'https://api.vas.eu.wcardp.io'
]
elif type == 'vwg':
req = await self._session.get(url='https://mbboauth-1d.prd.ece.vwg-connect.com/mbbcoauth/public/jwk/v1')
keys = await req.json()
audience = 'mal.prd.ece.vwg-connect.com'
else:
_LOGGER.debug('Not implemented')
return False
try:
pubkeys = {}
for jwk in keys['keys']:
kid = jwk['kid']
if jwk['kty'] == 'RSA':
pubkeys[kid] = jwt.algorithms.RSAAlgorithm.from_jwk(to_json(jwk))
token_kid = jwt.get_unverified_header(token)['kid']
if type == 'vwg':
token_kid = 'VWGMBB01DELIV1.' + token_kid
pubkey = pubkeys[token_kid]
payload = jwt.decode(token, key=pubkey, algorithms=JWT_ALGORITHMS, audience=audience)
return True
except Exception as error:
_LOGGER.debug(f'Failed to verify token, error: {error}')
return False
async def refresh_tokens(self):
"""Function to refresh tokens."""
try:
tHeaders = {
'Accept-Encoding': 'gzip, deflate, br',
'Connection': 'keep-alive',
'Content-Type': 'application/x-www-form-urlencoded',
'User-Agent': USER_AGENT,
'X-App-Version': XAPPVERSION,
'X-App-Name': XAPPNAME,
'X-Client-Id': XCLIENT_ID
}
body = {
'grant_type': 'refresh_token',
'brand': BRAND,
'refresh_token': self._session_tokens['identity']['refresh_token']
}
response = await self._session.post(
url='https://tokenrefreshservice.apps.emea.vwapps.io/refreshTokens',
headers=tHeaders,
data=body
)
if response.status == 200:
tokens = await response.json()
# Verify Token
if not await self.verify_tokens(tokens['id_token'], 'identity'):
_LOGGER.warning('Token could not be verified!')
for token in tokens:
self._session_tokens['identity'][token] = tokens[token]
else:
_LOGGER.warning(f'Something went wrong when refreshing {BRAND} account tokens.')
return False
body = {
'grant_type': 'id_token',
'scope': 'sc2:fal',
'token': self._session_tokens['identity']['id_token']
}
response = await self._session.post(
url='https://mbboauth-1d.prd.ece.vwg-connect.com/mbbcoauth/mobile/oauth2/v1/token',
headers=tHeaders,
data=body,
allow_redirects=True
)
if response.status == 200:
tokens = await response.json()
if not await self.verify_tokens(tokens['access_token'], 'vwg'):
_LOGGER.warning('Token could not be verified!')
for token in tokens:
self._session_tokens['vwg'][token] = tokens[token]
else:
resp = await response.text()
_LOGGER.warning('Something went wrong when refreshing API tokens. %s' % resp)
return False
return True
except Exception as error:
_LOGGER.warning(f'Could not refresh tokens: {error}')
return False
async def set_token(self, type):
"""Switch between tokens."""
self._session_headers['Authorization'] = 'Bearer ' + self._session_tokens[type]['access_token']
return
#### Class helpers ####
@property
def vehicles(self):
"""Return list of Vehicle objects."""
return self._vehicles
@property
def logged_in(self):
return self._session_logged_in
def vehicle(self, vin):
"""Return vehicle object for given vin."""
return next(
(
vehicle
for vehicle in self.vehicles
if vehicle.unique_id.lower() == vin.lower()
), None
)
def hash_spin(self, challenge, spin):
"""Convert SPIN and challenge to hash."""
spinArray = bytearray.fromhex(spin);
byteChallenge = bytearray.fromhex(challenge);
spinArray.extend(byteChallenge)
return hashlib.sha512(spinArray).hexdigest()
@property
async def validate_login(self):
try:
if not await self.validate_tokens:
return False
return True
except (IOError, OSError) as error:
_LOGGER.warning('Could not validate login: %s', error)
return False
async def main():
"""Main method."""
if '-v' in argv:
logging.basicConfig(level=logging.INFO)
elif '-vv' in argv:
logging.basicConfig(level=logging.DEBUG)
else:
logging.basicConfig(level=logging.ERROR)
async with ClientSession(headers={'Connection': 'keep-alive'}) as session:
connection = Connection(session, **read_config())
if await connection.doLogin():
if await connection.update():
for vehicle in connection.vehicles:
print(f'Vehicle id: {vehicle}')
print('Supported sensors:')
for instrument in vehicle.dashboard().instruments:
print(f' - {instrument.name} (domain:{instrument.component}) - {instrument.str_state}')
if __name__ == '__main__':
loop = asyncio.get_event_loop()
loop.run_until_complete(main())
| 47.336965 | 174 | 0.563047 |
72e542fa40470107db4b9fc30dd4704d62871fb3
| 532 |
py
|
Python
|
tests/extract_errors_test.py
|
Guy-Markman/Wikipedia-API
|
240128d72d92501a3019182fa4380bcaae373453
|
[
"MIT"
] | 323 |
2017-12-11T19:38:46.000Z
|
2022-03-28T19:25:44.000Z
|
tests/extract_errors_test.py
|
Guy-Markman/Wikipedia-API
|
240128d72d92501a3019182fa4380bcaae373453
|
[
"MIT"
] | 46 |
2017-12-16T01:22:47.000Z
|
2022-01-25T09:57:04.000Z
|
tests/extract_errors_test.py
|
Guy-Markman/Wikipedia-API
|
240128d72d92501a3019182fa4380bcaae373453
|
[
"MIT"
] | 61 |
2018-01-11T03:36:25.000Z
|
2022-03-28T05:55:49.000Z
|
# -*- coding: utf-8 -*-
import unittest
from tests.mock_data import wikipedia_api_request
import wikipediaapi
class TestErrorsExtracts(unittest.TestCase):
def setUp(self):
self.wiki = wikipediaapi.Wikipedia("en")
self.wiki._query = wikipedia_api_request
def test_title_before_fetching(self):
page = self.wiki.page('NonExisting')
self.assertEqual(page.title, 'NonExisting')
def test_pageid(self):
page = self.wiki.page('NonExisting')
self.assertEqual(page.pageid, -1)
| 26.6 | 51 | 0.695489 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.