id
stringlengths 1
265
| text
stringlengths 6
5.19M
| dataset_id
stringclasses 7
values |
---|---|---|
1693338 | #
# Copyright (c) 2013-2018 Quarkslab.
# This file is part of IRMA project.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License in the top-level directory
# of this distribution and at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# No part of the project, including this file, may be copied,
# modified, propagated, or distributed except according to the
# terms contained in the LICENSE file.
from lib.plugins import PluginBase
from lib.irma.common.utils import IrmaProbeType
class AntivirusFormatterPlugin(PluginBase):
# =================
# plugin metadata
# =================
_plugin_name_ = "AntivirusDefault"
_plugin_display_name_ = _plugin_name_
_plugin_author_ = "IRMA (c) Quarkslab"
_plugin_version_ = "1.0.0"
_plugin_category_ = IrmaProbeType.antivirus
_plugin_description_ = "Default Formatter for Antivirus category"
_plugin_dependencies_ = []
# ===========
# Formatter
# ===========
@staticmethod
def can_handle_results(raw_result):
# New probe result format (from version 1.0.4)
return raw_result.get('type', None) == IrmaProbeType.antivirus
@staticmethod
def format(raw_result):
# New probe result format (from version 1.0.4)
# As the raw_result has almost the same structure as the json for the
# output, we simply delete antivirus specific fields and we return the
# object
raw_result.pop('database', None)
return raw_result
| StarcoderdataPython |
79697 | import numpy as np
from scipy import ndimage
__all__ = ['gabor_kernel', 'gabor_filter']
def _sigma_prefactor(bandwidth):
b = bandwidth
# See http://www.cs.rug.nl/~imaging/simplecell.html
return 1.0 / np.pi * np.sqrt(np.log(2)/2.0) * (2.0**b + 1) / (2.0**b - 1)
def gabor_kernel(frequency, theta=0, bandwidth=1, sigma_x=None, sigma_y=None,
offset=0):
"""Return complex 2D Gabor filter kernel.
Frequency and orientation representations of the Gabor filter are similar
to those of the human visual system. It is especially suitable for texture
classification using Gabor filter banks.
Parameters
----------
frequency : float
Frequency of the harmonic function.
theta : float
Orientation in radians. If 0, the harmonic is in the x-direction.
bandwidth : float
The bandwidth captured by the filter. For fixed bandwidth, `sigma_x`
and `sigma_y` will decrease with increasing frequency. This value is
ignored if `sigma_x` and `sigma_y` are set by the user.
sigma_x, sigma_y : float
Standard deviation in x- and y-directions. These directions apply to
the kernel *before* rotation. If `theta = pi/2`, then the kernel is
rotated 90 degrees so that `sigma_x` controls the *vertical* direction.
offset : float, optional
Phase offset of harmonic function in radians.
Returns
-------
g : complex array
Complex filter kernel.
References
----------
.. [1] http://en.wikipedia.org/wiki/Gabor_filter
.. [2] http://mplab.ucsd.edu/tutorials/gabor.pdf
"""
if sigma_x is None:
sigma_x = _sigma_prefactor(bandwidth) / frequency
if sigma_y is None:
sigma_y = _sigma_prefactor(bandwidth) / frequency
n_stds = 3
x0 = np.ceil(max(np.abs(n_stds * sigma_x * np.cos(theta)),
np.abs(n_stds * sigma_y * np.sin(theta)), 1))
y0 = np.ceil(max(np.abs(n_stds * sigma_y * np.cos(theta)),
np.abs(n_stds * sigma_x * np.sin(theta)), 1))
y, x = np.mgrid[-y0:y0+1, -x0:x0+1]
rotx = x * np.cos(theta) + y * np.sin(theta)
roty = -x * np.sin(theta) + y * np.cos(theta)
g = np.zeros(y.shape, dtype=np.complex)
g[:] = np.exp(-0.5 * (rotx**2 / sigma_x**2 + roty**2 / sigma_y**2))
g /= 2 * np.pi * sigma_x * sigma_y
g *= np.exp(1j * (2 * np.pi * frequency * rotx + offset))
return g
def gabor_filter(image, frequency, theta=0, bandwidth=1, sigma_x=None,
sigma_y=None, offset=0, mode='reflect', cval=0):
"""Return real and imaginary responses to Gabor filter.
The real and imaginary parts of the Gabor filter kernel are applied to the
image and the response is returned as a pair of arrays.
Frequency and orientation representations of the Gabor filter are similar
to those of the human visual system. It is especially suitable for texture
classification using Gabor filter banks.
Parameters
----------
image : array
Input image.
frequency : float
Frequency of the harmonic function.
theta : float
Orientation in radians. If 0, the harmonic is in the x-direction.
bandwidth : float
The bandwidth captured by the filter. For fixed bandwidth, `sigma_x`
and `sigma_y` will decrease with increasing frequency. This value is
ignored if `sigma_x` and `sigma_y` are set by the user.
sigma_x, sigma_y : float
Standard deviation in x- and y-directions. These directions apply to
the kernel *before* rotation. If `theta = pi/2`, then the kernel is
rotated 90 degrees so that `sigma_x` controls the *vertical* direction.
offset : float, optional
Phase offset of harmonic function in radians.
Returns
-------
real, imag : arrays
Filtered images using the real and imaginary parts of the Gabor filter
kernel.
References
----------
.. [1] http://en.wikipedia.org/wiki/Gabor_filter
.. [2] http://mplab.ucsd.edu/tutorials/gabor.pdf
"""
g = gabor_kernel(frequency, theta, bandwidth, sigma_x, sigma_y, offset)
filtered_real = ndimage.convolve(image, np.real(g), mode=mode, cval=cval)
filtered_imag = ndimage.convolve(image, np.imag(g), mode=mode, cval=cval)
return filtered_real, filtered_imag
| StarcoderdataPython |
3306524 | <reponame>Rousan99/Azazaya
from manim import *
class Introduction(Scene):
config.background_color = "#1b1b1b"
def construct(self):
title = Title("Area of Dodecagon - Proof without talk").set_color_by_gradient(RED,ORANGE,YELLOW,GREEN,BLUE)
title.scale(0.7)
self.play(Write(title))
self.wait(2)
mean = Tex("Dodecagon $\\to$ $12$ sided regular polygon").set_color(RED)
self.play(Write(mean),run_time=2)
self.wait(5)
plane = NumberPlane(x_range=[-3.5,3.5],y_range=[-3.3,3.3],background_line_style={
"stroke_color": TEAL,
"stroke_opacity": 0.1
})
plane.shift(0.5*DOWN)
SS = SurroundingRectangle(plane,buff=0)
self.play(ReplacementTransform(mean,VGroup(plane,SS)),run_time=2)
self.wait(4)
cir = Circle(radius=3,color=BLUE).move_to(plane.c2p(0,0))
self.play(Write(cir),run_time=2)
self.wait(3)
arrow = DoubleArrow(plane.c2p(0,0),plane.c2p(3,0),buff=0).set_color(GREEN)
arr_tex = MathTex("r").set_color(YELLOW)
arr_tex.next_to(arrow,DOWN)
self.play(Write(VGroup(arrow,arr_tex)),run_time=2)
self.wait(6)
radius_val = Tex("radius = r").set_color(YELLOW).to_edge(LEFT).shift(2*UP)
self.play(ReplacementTransform(VGroup(arr_tex,arrow),radius_val),run_time=2)
self.wait(3)
gen_eq = MathTex("x_n = r\\cdot e^{\\frac{2\\pi i k}{12}}").set_color(BLUE)
con = MathTex("k\\in \\{0,1,2,\cdots ,10,11\\}").scale(0.6)
gen_eq.next_to(radius_val,DOWN)
con.next_to(gen_eq,DOWN)
self.play(FadeIn(VGroup(gen_eq,con)),run_time=2)
self.wait(4)
x_cod = [3*np.cos(2*PI*i/12) for i in range(12)]
y_cod = [3*np.sin(2*PI*i/12) for i in range(12)]
points = VGroup()
cen = Dot(radius=0.07, point = plane.c2p(0,0),color=GREEN,z_index=4)
for i in range(12):
dot = Dot(radius=0.07, point = plane.c2p(x_cod[i],y_cod[i]),color=YELLOW,z_index=4)
points.add(dot)
self.play(ReplacementTransform(VGroup(gen_eq,con),points),FadeIn(cen),run_time=2)
self.wait(5)
poly = RegularPolygon(n=12,color=GREEN).move_to(plane.c2p(0,0))
poly.scale(3)
self.play(Write(poly),run_time=2)
self.wait(3)
self.play(FadeOut(cir))
self.wait(2)
direct_line_index = [0,2,3,4,6,7,8,10,11]
tri_index = [0,2,4,6,8,10]
dir_lines = VGroup()
for i in direct_line_index:
line = Line(plane.c2p(0,0),points[i],color=RED,z_index=2)
dir_lines.add(line)
self.play(Write(dir_lines),run_time=5)
self.wait(5)
tri_line = VGroup()
for i in [0,2,4]:
line = Line(points[tri_index[i]],points[tri_index[i+1]],color=PURPLE,z_index=2)
tri_line.add(line)
self.play(Write(tri_line),run_time=3)
self.wait(8)
class pasrt_2(MovingCameraScene):
config.background_color = "#1b1b1b"
def construct(self):
plane = NumberPlane(x_range=[-3.5,3.5],y_range=[-3.3,3.3],background_line_style={
"stroke_color": TEAL,
"stroke_opacity": 0.1
})
plane.shift(3.5*LEFT)
ss = SurroundingRectangle(plane,buff=0)
self.add(plane,ss)
x_cod = [3*np.cos(2*PI*i/12) for i in range(12)]
y_cod = [3*np.sin(2*PI*i/12) for i in range(12)]
points = VGroup()
cen = Dot(radius=0.07, point = plane.c2p(0,0),color=GREEN,z_index=4)
for i in range(12):
dot = Dot(radius=0.07, point = plane.c2p(x_cod[i],y_cod[i]),color=YELLOW,z_index=4)
points.add(dot)
poly = RegularPolygon(n=12,color=GREEN).move_to(plane.c2p(0,0))
poly.scale(3)
self.add(points,cen,poly)
direct_line_index = [0,2,3,4,6,7,8,10,11]
tri_index = [0,2,4,6,8,10]
dir_lines = VGroup()
for i in direct_line_index:
line = Line(plane.c2p(0,0),points[i],color=RED,z_index=2)
dir_lines.add(line)
tri_line = VGroup()
for i in [0,2,4]:
line = Line(points[tri_index[i]],points[tri_index[i+1]],color=PURPLE,z_index=2)
tri_line.add(line)
self.add(dir_lines,tri_line)
o = cen.get_center()
pol1 = Polygon(o,points[0].get_center(),points[2].get_center(),color=RED,fill_opacity=0.2,stroke_width=2)
pol2 = Polygon(points[0].get_center(),points[1].get_center(),points[2].get_center(),color=PURPLE,fill_opacity=0.2,z_index=3,stroke_width=2)
pol3 = Polygon(o,points[2].get_center(),points[3].get_center(),color=RED,fill_opacity=0.2,stroke_width=2)
pol4 = Polygon(o,points[3].get_center(),points[4].get_center(),color=RED,fill_opacity=0.2,stroke_width=2)
pol5 = Polygon(o,points[4].get_center(),points[6].get_center(),color=RED,fill_opacity=0.2,stroke_width=2)
pol6 = Polygon(points[4].get_center(),points[5].get_center(),points[6].get_center(),color=PURPLE,fill_opacity=0.2,z_index=3,stroke_width=2)
pol7 = Polygon(o,points[6].get_center(),points[7].get_center(),color=RED,fill_opacity=0.2,stroke_width=2)
pol8 = Polygon(o,points[7].get_center(),points[8].get_center(),color=RED,fill_opacity=0.2,stroke_width=2)
pol9 = Polygon(o,points[8].get_center(),points[10].get_center(),color=RED,fill_opacity=0.2,stroke_width=2)
pol10 = Polygon(points[8].get_center(),points[9].get_center(),points[10].get_center(),color=PURPLE,fill_opacity=0.2,z_index=3,stroke_width=2)
pol11 = Polygon(o,points[10].get_center(),points[11].get_center(),color=RED,fill_opacity=0.2,stroke_width=2)
pol12 = Polygon(o,points[11].get_center(),points[0].get_center(),color=RED,fill_opacity=0.2,stroke_width=2)
polly = VGroup(pol1,pol2,pol3,pol4,pol5,pol6,pol7,pol8,pol9,pol10,pol11,pol12)
self.play(Write(polly),run_time=2)
self.wait(4)
self.play(Uncreate(VGroup(plane,ss)),FadeOut(points),FadeOut(VGroup(cen,poly)),FadeOut(tri_line),FadeOut(dir_lines),run_time=2)
self.wait()
first_shift = VGroup(pol1,pol2,pol7,pol8)
second_shift = VGroup(pol3,pol4,pol9,pol10)
self.play(second_shift.animate.shift(4*RIGHT),run_time=2)
self.wait(4)
self.play(self.camera.frame.animate.scale(1.3),run_time=2)
self.play(self.camera.frame.animate.shift(RIGHT),run_time=2)
self.wait(2)
self.play(first_shift.animate.shift(9*RIGHT),run_time=2)
self.wait(5)
# self.remove(pol12)
self.play(pol12.animate.shift(dir_lines[4].get_vector()*1.016))
self.wait(2)
self.play(pol11.animate.shift(dir_lines[3].get_vector()*1.016))
self.wait(2)
small_tri_shift = VGroup(
Line(points[5].get_center(),o),
Line(o,points[9].get_center()),
Line(o,points[1].get_center())
)
self.play(pol6.animate.shift(small_tri_shift[0].get_vector()*1.016))
self.wait(4)
self.play(pol3.animate.shift(dir_lines[6].get_vector()*1.016),
pol4.animate.shift(dir_lines[7].get_vector()*1.016),
pol10.animate.shift(small_tri_shift[1].get_vector()*-1),
run_time=2)
self.wait(4)
self.play(pol7.animate.shift(dir_lines[0].get_vector()*1.016),
pol8.animate.shift(dir_lines[1].get_vector()*1.016),
pol2.animate.shift(small_tri_shift[2].get_vector()*-1.016),
run_time=2)
self.wait(4)
# self.add(NumberPlane())
arr = DoubleArrow([-1,-2.9,0],[2.01,-2.9,0],buff=0).set_color(BLUE)
arr_tex = Tex("r").set_color(YELLOW)
arr_tex.next_to(arr,DOWN)
self.play(Write(arr),FadeIn(arr_tex),run_time=2)
self.wait(4)
block1 = VGroup(pol12,pol11,pol6,pol5)
block2 = VGroup(pol7,pol8,pol2,pol1)
block3 = VGroup(pol3,pol4,pol10,pol9)
rot_fact = PI/6
self.play(Rotating(block1,radians=rot_fact),
Rotating(block2,radians=-rot_fact),
run_time=2)
self.wait()
self.play(block1.animate.next_to(block3,LEFT,buff=0),
block2.animate.next_to(block3,RIGHT,buff=0),run_time=2)
self.wait(2)
squ = Rectangle(height=3,width=9.1,z_index=6,stroke_width=5).move_to(block3.get_center())
self.play(Write(squ),run_time=2)
self.wait(4)
arr1 = DoubleArrow(squ.get_vertices()[0],squ.get_vertices()[1],buff=-0.1,color=YELLOW).shift(0.2*UP)
arr1_val = MathTex("3r").next_to(arr1,UP)
self.play(Write(VGroup(arr1,arr1_val)),run_time=2)
self.wait(2)
arr2 = DoubleArrow(squ.get_vertices()[1],squ.get_vertices()[2],buff=-0.1,color=YELLOW).shift(0.2*LEFT)
arr2_val = MathTex("r").next_to(arr2,LEFT)
self.play(Write(VGroup(arr2,arr2_val)),run_time=2)
self.wait(7)
eqs = MathTex("=").rotate(PI/2)
eqs.next_to(arr1_val,UP)
self.play(Write(eqs))
self.wait()
self.play(self.camera.frame.animate.shift(1.5*UP),run_time=2)
poly = RegularPolygon(n=12,color=GREEN,fill_color=WHITE,fill_opacity=0.4).scale(2).next_to(eqs,UP)
self.play(Write(poly),run_time=2)
self.wait(2)
ras = DoubleArrow(poly.get_center(),poly.get_vertices()[2],buff=0,color=BLUE)
ras_v = MathTex("r").set_color(RED).next_to(ras,RIGHT).shift(0.2*LEFT+0.1*DOWN)
self.play(Write(VGroup(ras,ras_v)),Write(Dot(radius=0.09,color=YELLOW,point=poly.get_center())),run_time=2)
self.wait(3)
area = Tex("Area(Dodecagon) = Area(Rectangle) = $3r \\times r = 3r^2$").scale(0.8).next_to(eqs)
area.set_color(YELLOW)
self.play(Write(area),run_time=2)
self.wait(10)
| StarcoderdataPython |
1728317 | <gh_stars>10-100
from Job import Job
import binascii #TODO: move outside ?
# TODO: move into other file if another implementation is done
# Subscription state
class Subscription(object):
'''Encapsulates the Subscription state from the JSON-RPC2 server'''
# Subclasses should override this
def ProofOfWork(header):
raise Exception('Do not use the Subscription class directly, subclass it')
class StateException(Exception): pass
def __init__(self):
self._id = None
#self._difficulty = None
#self._target = None
self._worker_name = None
self._mining_thread = None
@property
def id(self): return self._id
@property
def worker_name(self): return self._worker_name
#@property
#def difficulty(self): return self._difficulty
#@property
#def target(self): return self._target
def set_worker_name(self, worker_name):
if self._worker_name:
raise self.StateException('Already authenticated as %r (requesting %r)' % (self._username, username))
self._worker_name = worker_name
def set_subscription(self, subscription_id):
if self._id is not None:
raise self.StateException('Already subscribed')
self._id = subscription_id
def create_job(self, job_id, blob, target):
'''Creates a new Job object populated with all the goodness it needs to mine.'''
if self._id is None:
raise self.StateException('Not subscribed')
return Job(
subscription_id = self.id,
job_id = job_id,
blob = blob,
target = target,
proof_of_work = self.ProofOfWork
)
def __str__(self):
return '<Subscription id={}, worker_name={}>'.format(self.id, self.worker_name)
##############
import os
import ctypes
if os.name == 'nt':
lib = ctypes.cdll.LoadLibrary('cryptonight_lib/project/Release/cryptonight_lib.dll')
else:
lib = ctypes.cdll.LoadLibrary('cryptonight_lib/libcryptonight_lib.so')
c_pow = lib.cryptonight_hash
c_pow.argtypes = [ctypes.POINTER(ctypes.c_char), ctypes.POINTER(ctypes.c_char), ctypes.c_int]
def cryptonight_proof_of_work(data):
output = ctypes.create_string_buffer(32)
c_pow(output, data, 76)
outputhex = binascii.hexlify(output).decode() #TODO: move outside?
return outputhex
class SubscriptionCryptonight(Subscription):
'''Subscription for Cryptonight-based coins, like XMR (Monero).'''
ProofOfWork = lambda s, h: (cryptonight_proof_of_work(h))
| StarcoderdataPython |
1635533 | import pytest
import numpy as np
from sklearn.neighbors import KNeighborsClassifier
from sklearn.linear_model import LinearRegression, Ridge, LogisticRegression
from sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor
from sklego.common import flatten
from sklego.meta import DecayEstimator
from tests.conftest import general_checks, classifier_checks, regressor_checks, nonmeta_checks
@pytest.mark.parametrize("test_fn", flatten([
general_checks,
nonmeta_checks,
regressor_checks
]))
def test_estimator_checks_regression(test_fn):
trf = DecayEstimator(LinearRegression())
test_fn(DecayEstimator.__name__, trf)
@pytest.mark.parametrize("test_fn", flatten([
general_checks,
nonmeta_checks,
classifier_checks
]))
def test_estimator_checks_classification(test_fn):
trf = DecayEstimator(LogisticRegression(solver='lbfgs'))
test_fn(DecayEstimator.__name__, trf)
@pytest.mark.parametrize("mod", flatten([LinearRegression(), Ridge(), DecisionTreeRegressor()]))
def test_decay_weight_regr(mod):
X, y = np.random.normal(0, 1, (100, 100)), np.random.normal(0, 1, (100, ))
mod = DecayEstimator(mod, decay=0.95).fit(X, y)
assert mod.weights_[0] == pytest.approx(0.95**100, abs=0.001)
@pytest.mark.parametrize("mod", flatten([DecisionTreeClassifier(), LogisticRegression(solver='lbfgs')]))
def test_decay_weight_clf(mod):
X, y = np.random.normal(0, 1, (100, 100)), (np.random.normal(0, 1, (100, )) < 0).astype(np.int)
mod = DecayEstimator(mod, decay=0.95).fit(X, y)
assert mod.weights_[0] == pytest.approx(0.95**100, abs=0.001)
@pytest.mark.parametrize("mod", flatten([
KNeighborsClassifier(),
]))
def test_throw_warning(mod):
X, y = np.random.normal(0, 1, (100, 100)), np.random.normal(0, 1, (100, )) < 0
with pytest.raises(TypeError) as e:
DecayEstimator(mod, decay=0.95).fit(X, y)
assert "sample_weight" in str(e)
assert type(mod).__name__ in str(e)
| StarcoderdataPython |
1640859 | <gh_stars>0
# The MIT License (MIT)
#
# Copyright (c) 2017 <NAME> for Adafruit Industries.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""
``adafruit_si7021``
===================
This is a CircuitPython driver for the SI7021 temperature and humidity sensor.
* Author(s): <NAME>
Implementation Notes
--------------------
**Hardware:**
* Adafruit `Si7021 Temperature & Humidity Sensor Breakout Board
<https://www.adafruit.com/product/3251>`_ (Product ID: 3251)
**Software and Dependencies:**
* Adafruit CircuitPython firmware for the ESP8622 and M0-based boards:
https://github.com/adafruit/circuitpython/releases
* Adafruit's Bus Device library: https://github.com/adafruit/Adafruit_CircuitPython_BusDevice
"""
try:
import struct
except ImportError:
import ustruct as struct
from adafruit_bus_device.i2c_device import I2CDevice
from micropython import const
__version__ = "0.0.0-auto.0"
__repo__ = "https://github.com/adafruit/Adafruit_CircuitPython_SI7021.git"
HUMIDITY = const(0xF5)
TEMPERATURE = const(0xF3)
_RESET = const(0xFE)
_READ_USER1 = const(0xE7)
_USER1_VAL = const(0x3A)
def _crc(data):
crc = 0
for byte in data:
crc ^= byte
for _ in range(8):
if crc & 0x80:
crc <<= 1
crc ^= 0x131
else:
crc <<= 1
return crc
class SI7021:
"""
A driver for the SI7021 temperature and humidity sensor.
:param i2c_bus: The `busio.I2C` object to use. This is the only required parameter.
:param int address: (optional) The I2C address of the device.
"""
def __init__(self, i2c_bus, address=0x40):
self.i2c_device = I2CDevice(i2c_bus, address)
self._command(_RESET)
# Make sure the USER1 settings are correct.
while True:
# While restarting, the sensor doesn't respond to reads or writes.
try:
data = bytearray([_READ_USER1])
with self.i2c_device as i2c:
i2c.write_then_readinto(data, data)
value = data[0]
except OSError:
pass
else:
break
if value != _USER1_VAL:
raise RuntimeError("bad USER1 register (%x!=%x)" % (value, _USER1_VAL))
self._measurement = 0
def _command(self, command):
with self.i2c_device as i2c:
i2c.write(struct.pack("B", command))
def _data(self):
data = bytearray(3)
data[0] = 0xFF
while True:
# While busy, the sensor doesn't respond to reads.
try:
with self.i2c_device as i2c:
i2c.readinto(data)
except OSError:
pass
else:
if data[0] != 0xFF: # Check if read succeeded.
break
value, checksum = struct.unpack(">HB", data)
if checksum != _crc(data[:2]):
raise ValueError("CRC mismatch")
return value
@property
def relative_humidity(self):
"""The measured relative humidity in percent."""
self.start_measurement(HUMIDITY)
value = self._data()
self._measurement = 0
return min(100.0, value * 125.0 / 65536.0 - 6.0)
@property
def temperature(self):
"""The measured temperature in degrees Celcius."""
self.start_measurement(TEMPERATURE)
value = self._data()
self._measurement = 0
return value * 175.72 / 65536.0 - 46.85
def start_measurement(self, what):
"""
Starts a measurement.
Starts a measurement of either ``HUMIDITY`` or ``TEMPERATURE``
depending on the ``what`` argument. Returns immediately, and the
result of the measurement can be retrieved with the
``temperature`` and ``relative_humidity`` properties. This way it
will take much less time.
This can be useful if you want to start the measurement, but don't
want the call to block until the measurement is ready -- for instance,
when you are doing other things at the same time.
"""
if what not in (HUMIDITY, TEMPERATURE):
raise ValueError()
if not self._measurement:
self._command(what)
elif self._measurement != what:
raise RuntimeError("other measurement in progress")
self._measurement = what
| StarcoderdataPython |
3205612 | <filename>gen_sample.py
import numpy as np
def sinewave(f):
srate = 16000
return lambda i: np.sin(2 * np.pi * f * i / srate)
def zero_spectrum(fs):
sines = map(sinewave, fs)
return lambda i: sum(map(lambda f: f(i), sines)) / len(fs)
def spectrum(fs):
amplitude = 32767
zsp = zero_spectrum(fs)
return lambda i: (zsp(i) + 1) * amplitude
def gen_sample():
fs = [1000, 2400]
sp = spectrum(fs)
waveform = np.fromfunction(sp, (16000,))
intform = np.array(np.floor(waveform), dtype=np.uint16)
return np.reshape(intform, (50, 320))
| StarcoderdataPython |
3381219 | #!/usr/bin/env python
def main():
print "=" * 120
print " If you can see that all your cores are running at 100% then you are using optimised ATLAS library."
print "=" * 120
print
import numpy
# numpy.test() #this should run with no errors (skipped tests and known-fails are ok)
size = 8000
if id(numpy.dot) == id(numpy.core.multiarray.dot):
# A way to know if you use fast blas/lapack or not. However, it wont tell
# you whetehr it is generric ATLAS or machine optimised version.
print "Not using blas/lapack!"
print "creating matrix"
a = numpy.random.randn(size, size)
print "multiplying matrix"
numpy.dot(a.T, a)
print "adding identity matrix"
i = numpy.identity(size)
a += i
print "inverting matrix"
inva = numpy.linalg.inv(a)
if __name__ == '__main__':
main()
| StarcoderdataPython |
193949 | <gh_stars>1-10
# -*- coding:utf-8 -*-
import requests, re, os, configparser, time, hashlib, json, shutil, traceback
from PIL import Image
# 调用百度翻译API接口
def tran(api_id, key, word, to_lang):
# init salt and final_sign
salt = str(time.time())[:10]
final_sign = api_id + word + salt + key
final_sign = hashlib.md5(final_sign.encode("utf-8")).hexdigest()
#表单paramas
paramas = {
'q': word,
'from': 'jp',
'to': to_lang,
'appid': '%s' % api_id,
'salt': '%s' % salt,
'sign': '%s' % final_sign
}
response = requests.get('http://api.fanyi.baidu.com/api/trans/vip/translate', params=paramas, timeout=10).content
content = str(response, encoding="utf-8")
try:
json_reads = json.loads(content)
return json_reads['trans_result'][0]['dst']
except json.decoder.JSONDecodeError:
print(' >翻译简介失败,请截图给作者,检查是否有非法字符:', word)
return '无法翻译该简介,请手动去arzon.jp查找简介并翻译。'
except:
print(' >正在尝试重新日译中...')
return tran(api_id, key, word, to_lang)
# 获取一个arzon_cookie,返回cookie
def get_acook(prox):
if prox:
session = requests.Session()
session.get('https://www.arzon.jp/index.php?action=adult_customer_agecheck&agecheck=1&redirect=https%3A%2F%2Fwww.arzon.jp%2F', proxies=prox, timeout=10)
return session.cookies.get_dict()
else:
session = requests.Session()
session.get('https://www.arzon.jp/index.php?action=adult_customer_agecheck&agecheck=1&redirect=https%3A%2F%2Fwww.arzon.jp%2F', timeout=10)
return session.cookies.get_dict()
# 获取网页源码,返回网页text;假装python的“重载”函数
def get_jav_html(url_list):
if len(url_list) == 1:
rqs = requests.get(url_list[0], timeout=10)
else:
rqs = requests.get(url_list[0], proxies=url_list[1], timeout=10)
rqs.encoding = 'utf-8'
return rqs.text
def get_arzon_html(url_list):
if len(url_list) == 2:
rqs = requests.get(url_list[0], cookies=url_list[1], timeout=10)
else:
rqs = requests.get(url_list[0], cookies=url_list[1], proxies=url_list[2], timeout=10)
rqs.encoding = 'utf-8'
return rqs.text
# 下载图片,无返回
def download_pic(cov_list):
# 0错误次数 1图片url 2图片路径 3proxies
if cov_list[0] < 5:
try:
if len(cov_list) == 3:
r = requests.get(cov_list[1], stream=True, timeout=(3, 7))
with open(cov_list[2], 'wb') as pic:
for chunk in r:
pic.write(chunk)
else:
r = requests.get(cov_list[1], proxies=cov_list[3], stream=True, timeout=(3, 7))
with open(cov_list[2], 'wb') as pic:
for chunk in r:
pic.write(chunk)
except:
print(' >下载失败,重新下载...')
cov_list[0] += 1
download_pic(cov_list)
try:
Image.open(cov_list[2])
except OSError:
print(' >下载失败,重新下载....')
cov_list[0] += 1
download_pic(cov_list)
else:
raise Exception(' >下载多次,仍然失败!')
# main开始
print('1、避开21:00-1:00,访问javlibrary和arzon很慢。\n'
'2、简体繁体取决于复制粘贴的网址是cn还是tw!\n'
'3、不要用www.javlibrary.com/xx/xxxx!用防屏蔽地址\n')
# 读取配置文件,这个ini文件用来给用户设置重命名的格式和jav网址
config_settings = configparser.RawConfigParser()
print('正在读取ini中的设置...', end='')
try:
config_settings.read('ini的设置会影响所有exe的操作结果.ini', encoding='utf-8-sig')
if_nfo = config_settings.get("收集nfo", "是否收集nfo?")
if_review = config_settings.get("收集nfo", "是否收集javlibrary上的影评?")
custom_title = config_settings.get("收集nfo", "nfo中title的格式")
if_mp4 = config_settings.get("重命名影片", "是否重命名影片?")
rename_mp4 = config_settings.get("重命名影片", "重命名影片的格式")
if_jpg = config_settings.get("下载封面", "是否下载封面海报?")
custom_fanart = config_settings.get("下载封面", "DVD封面的格式")
custom_poster = config_settings.get("下载封面", "海报的格式")
if_sculpture = config_settings.get("kodi专用", "是否收集女优头像")
if_proxy = config_settings.get("代理", "是否使用代理?")
proxy = config_settings.get("代理", "代理IP及端口")
if_plot = config_settings.get("百度翻译API", "是否需要日语简介?")
if_tran = config_settings.get("百度翻译API", "是否翻译为中文?")
ID = config_settings.get("百度翻译API", "APP ID")
SK = config_settings.get("百度翻译API", "密钥")
simp_trad = config_settings.get("其他设置", "简繁中文?")
bus_url = config_settings.get("其他设置", "javbus网址")
title_len = int(config_settings.get("其他设置", "重命名中的标题长度(50~150)"))
movie_type = config_settings.get("原影片文件的性质", "有码")
except:
print(traceback.format_exc())
print('\n无法读取ini文件,请修改它为正确格式,或者打开“【ini】重新创建ini.exe”创建全新的ini!')
os.system('pause')
# 确认:女优头像ini及头像文件夹
if if_sculpture == '是':
if not os.path.exists('女优头像'):
print('\n“女优头像”文件夹丢失!请把它放进exe的文件夹中!\n')
os.system('pause')
if not os.path.exists('【缺失的女优头像统计For Kodi】.ini'):
config_actor = configparser.ConfigParser()
config_actor.add_section("缺失的女优头像")
config_actor.set("缺失的女优头像", "女优姓名", "N(次数)")
config_actor.add_section("说明")
config_actor.set("说明", "上面的“女优姓名 = N(次数)”的表达式", "后面的N数字表示你有N部(次)影片都在找她的头像,可惜找不到")
config_actor.set("说明", "你可以去保存一下她的头像jpg到“女优头像”文件夹", "以后就能保存她的头像到影片的文件夹了")
config_actor.write(open('【缺失的女优头像统计For Kodi】.ini', "w", encoding='utf-8-sig'))
print('\n >“【缺失的女优头像统计For Kodi】.ini”文件被你玩坏了...正在重写ini...成功!')
print('正在重新读取...', end='')
print('\n读取ini文件成功! ')
# 确认:arzon的cookie,通过成人验证
proxies = {"http": "http://" + proxy, "https": "https://" + proxy}
acook = {}
if if_plot == '是' and if_nfo == '是':
print('正在尝试通过“https://www.arzon.jp”的成人验证...')
try:
if if_proxy == '是' and proxy != '':
acook = get_acook(proxies)
else:
acook = get_acook({})
print('通过arzon的成人验证!\n')
except:
print('连接arzon失败,请避开网络高峰期!请重启程序!\n')
os.system('pause')
# 确认:代理哪些站点
if if_proxy == '是' and proxy != '': # 是否需要代理,设置requests请求时的状态
jav_list = ['', proxies]
arzon_list = ['', acook, proxies] # 代理arzon
cover_list = [0, '', '', proxies] # 代理dmm
else:
jav_list = ['']
arzon_list = ['', acook]
cover_list = [0, '', '']\
# http://www.x39n.com/ https://www.buscdn.work/
if not bus_url.endswith('/'):
bus_url += '/'
# 确认:百度翻译,简繁中文
if simp_trad == '简':
t_lang = 'zh'
else:
t_lang = 'cht'
# 初始化其他
nfo_dict = {'空格': ' ', '车牌': 'ABC-123', '标题': '未知标题', '完整标题': '完整标题', '导演': '未知导演',
'发行年月日': '1970-01-01', '发行年份': '1970', '月': '01', '日': '01', '是否中字': '', '是否xx': '',
'片商': '未知片商', '评分': '0', '首个女优': '未知演员', '全部女优': '未知演员', '车牌前缀': 'ABC',
'片长': '0', '\\': '\\', '视频': 'ABC-123', '影片类型': movie_type, '系列': '未知系列'} # 用于暂时存放影片信息,女优,标题等
rename_mp4_list = rename_mp4.split('+')
title_list = custom_title.replace('标题', '完整标题', 1).split('+')
fanart_list = custom_fanart.split('+')
poster_list = custom_poster.split('+')
for j in rename_mp4_list:
if j not in nfo_dict:
nfo_dict[j] = j
for j in title_list:
if j not in nfo_dict:
nfo_dict[j] = j
for j in fanart_list:
if j not in nfo_dict:
nfo_dict[j] = j
for j in poster_list:
if j not in nfo_dict:
nfo_dict[j] = j
root = os.path.join(os.path.expanduser("~"), 'Desktop')
# 获取nfo信息的javlib搜索网页
while 1:
try:
input_url = input('\n请输入javlibrary上的某一部影片的网址:')
print()
jav_list[0] = input_url
try:
javlib_html = get_jav_html(jav_list)
except:
print('>>尝试打开页面失败,正在尝试第二次打开...')
try: #用网高峰期,经常打不开javlib,尝试第二次
javlib_html = get_jav_html(jav_list)
print(' >第二次尝试成功!')
except:
print('>>网址正确吗?打不开啊!')
continue
# 搜索结果的网页,大部分情况就是这个影片的网页,也有可能是多个结果的网页
# 尝试找标题,第一种情况:找得到,就是这个影片的网页[a-zA-Z]{1,6}-\d{1,5}.+?
titleg = re.search(r'<title>(.+?) - JAVLibrary</title>', javlib_html) # 匹配处理“标题”
# 搜索结果就是AV的页面
if str(titleg) != 'None':
title = titleg.group(1)
# 第二种情况:搜索结果可能是两个以上,所以这种匹配找不到标题,None!
else: # 继续找标题,但匹配形式不同,这是找“可能是多个结果的网页”上的第一个标题
print('>>网址正确吗?找不到影片信息啊!')
continue
print('>>正在处理:', title)
# 去除title中的特殊字符
title = title.replace('\n', '').replace('&', '和').replace('\\', '#')\
.replace('/', '#').replace(':', ':').replace('*', '#').replace('?', '?')\
.replace('"', '#').replace('<', '【').replace('>', '】')\
.replace('|', '#').replace('<', '【').replace('>', '】')\
.replace('〈', '【').replace('〉', '】').replace('&', '和').replace('\t', '').replace('\r', '')
# 正则匹配 影片信息 开始!
# title的开头是车牌号,想要后面的纯标题
car_titleg = re.search(r'(.+?) (.+)', title) # 这边匹配番号,[a-z]可能很奇怪,
# 车牌号
nfo_dict['车牌'] = car_titleg.group(1)
# 给用户用的标题是 短的title_easy
nfo_dict['完整标题'] = car_titleg.group(2)
# 处理影片的标题过长
if len(nfo_dict['完整标题']) > title_len:
nfo_dict['标题'] = nfo_dict['完整标题'][:title_len]
else:
nfo_dict['标题'] = nfo_dict['完整标题']
# 处理特殊车牌 t28-573
if nfo_dict['车牌'].startswith('T-28'):
nfo_dict['车牌'] = nfo_dict['车牌'].replace('T-28', 'T28-', 1)
nfo_dict['车牌前缀'] = nfo_dict['车牌'].split('-')[0]
# 片商
studiog = re.search(r'rel="tag">(.+?)</a> <span id="maker_', javlib_html)
if str(studiog) != 'None':
nfo_dict['片商'] = studiog.group(1)
else:
nfo_dict['片商'] = '未知片商'
# 上映日
premieredg = re.search(r'<td class="text">(\d\d\d\d-\d\d-\d\d)</td>', javlib_html)
if str(premieredg) != 'None':
nfo_dict['发行年月日'] = premieredg.group(1)
nfo_dict['发行年份'] = nfo_dict['发行年月日'][0:4]
nfo_dict['月'] = nfo_dict['发行年月日'][5:7]
nfo_dict['日'] = nfo_dict['发行年月日'][8:10]
else:
nfo_dict['发行年月日'] = '1970-01-01'
nfo_dict['发行年份'] = '1970'
nfo_dict['月'] = '01'
nfo_dict['日'] = '01'
# 片长 <td><span class="text">150</span> 分钟</td>
runtimeg = re.search(r'<td><span class="text">(\d+?)</span>', javlib_html)
if str(runtimeg) != 'None':
nfo_dict['片长'] = runtimeg.group(1)
else:
nfo_dict['片长'] = '0'
# 导演
directorg = re.search(r'rel="tag">(.+?)</a> <span id="director', javlib_html)
if str(directorg) != 'None':
nfo_dict['导演'] = directorg.group(1)
else:
nfo_dict['导演'] = '未知导演'
# 演员们 和 # 第一个演员
actors_prag = re.search(r'<span id="cast(.+?)</td>', javlib_html, re.DOTALL)
if str(actors_prag) != 'None':
actors = re.findall(r'rel="tag">(.+?)</a></span> <span id', actors_prag.group(1))
if len(actors) != 0:
if len(actors) > 7:
actors = actors[:7]
nfo_dict['首个女优'] = actors[0]
nfo_dict['全部女优'] = ' '.join(actors)
else:
nfo_dict['首个女优'] = nfo_dict['全部女优'] = '未知演员'
actors = ['未知演员']
else:
nfo_dict['首个女优'] = nfo_dict['全部女优'] = '未知演员'
actors = ['未知演员']
nfo_dict['标题'] = nfo_dict['标题'].rstrip(nfo_dict['全部女优'])
# 特点
genres = re.findall(r'category tag">(.+?)</a>', javlib_html)
# DVD封面cover
coverg = re.search(r'src="(.+?)" width="600" height="403"', javlib_html) # 封面图片的正则对象
if str(coverg) != 'None':
cover_url = coverg.group(1)
else:
cover_url = ''
# 评分
scoreg = re.search(r' <span class="score">\((.+?)\)</span>', javlib_html)
if str(scoreg) != 'None':
score = float(scoreg.group(1))
score = (score - 4) * 5 / 3 # javlib上稍微有人关注的影片评分都是6分以上(10分制),强行把它差距拉大
if score >= 0:
score = '%.1f' % score
nfo_dict['评分'] = str(score)
else:
nfo_dict['评分'] = '0'
else:
nfo_dict['评分'] = '0'
criticrating = str(float(nfo_dict['评分'])*10)
# javlib的精彩影评 (.+?\s*.*?\s*.*?\s*.*?) 用javlib上的精彩影片,下面的匹配可能很奇怪,没办法,就这么奇怪
plot_review = ''
if if_review == '是':
review = re.findall(r'(hidden">.+?</textarea>)</td>\s*?<td class="scores"><table>\s*?<tr><td><span class="scoreup">\d\d+?</span>', javlib_html, re.DOTALL)
if len(review) != 0:
plot_review = '\n【精彩影评】:'
for rev in review:
right_review = re.findall(r'hidden">(.+?)</textarea>', rev, re.DOTALL)
if len(right_review) != 0:
plot_review = plot_review + right_review[-1].replace('&', '和') + '////'
continue
plot_review = plot_review.replace('\n', '').replace('&', '和').replace('\\', '#') \
.replace(':', ':').replace('*', '#').replace('?', '?') \
.replace('"', '#').replace('<', '【').replace('>', '】') \
.replace('|', '#').replace('<', '【').replace('>', '】') \
.replace('〈', '【').replace('〉', '】').replace('&', '和').replace('\t', '').replace('\r', '')
#print(plot_review)
# 企划javlib上没有企划set
#######################################################################
# arzon的简介
plot = series = ''
if if_nfo == '是' and if_plot == '是':
while 1:
arz_search_url = 'https://www.arzon.jp/itemlist.html?t=&m=all&s=&q=' + nfo_dict['车牌']
print(' >正在查找简介:', arz_search_url)
arzon_list[0] = arz_search_url
try:
search_html = get_arzon_html(arzon_list)
except:
print(' >尝试打开“', arz_search_url, '”搜索页面失败,正在尝试第二次打开...')
try:
search_html = get_arzon_html(arzon_list)
print(' >第二次尝试成功!')
except:
print(' >连接arzon失败:' + arz_search_url)
plot = '【连接arzon失败!看到此提示请重新整理nfo!】'
break
if plot == '':
# <dt><a href="https://www.arzon.jp/item_1376110.html" title="限界集落に越してきた人妻 ~村民"><img src=
AVs = re.findall(r'<h2><a href="(/item.+?)" title=', search_html) # 所有搜索结果链接
# 搜索结果为N个AV的界面
if AVs: # arzon有搜索结果
results_num = len(AVs)
for i in range(results_num):
arz_url = 'https://www.arzon.jp' + AVs[i] # 第i+1个链接
arzon_list[0] = arz_url
try:
jav_html = get_arzon_html(arzon_list)
except:
print(' >打开“', arz_url, '”第' + str(i + 1) + '个搜索结果失败,正在尝试第二次打开...')
try:
jav_html = get_arzon_html(arzon_list)
print(' >第二次尝试成功!')
except:
print(' >无法进入第' + str(i + 1) + '个搜索结果:' + arz_url)
plot = '【连接arzon失败!看到此提示请重新整理nfo!】'
break
if plot == '':
# 在该arz_url网页上查找简介
plotg = re.search(r'<h2>作品紹介</h2>([\s\S]*?)</div>', jav_html)
# 成功找到plot
if str(plotg) != 'None':
plot_br = plotg.group(1)
plot = ''
for line in plot_br.split('<br />'):
line = line.strip()
plot += line
plot = plot.replace('\n', '').replace('&', '和').replace('\\', '#') \
.replace('/', '#').replace(':', ':').replace('*', '#').replace('?', '?') \
.replace('"', '#').replace('<', '【').replace('>', '】') \
.replace('|', '#').replace('<', '【').replace('>', '】') \
.replace('〈', '【').replace('〉', '】').replace('&', '和').replace('\t', '').replace('\r', '')
# 系列<a href="/itemlist.html?mkr=10149&series=43">麗しのノーブラ先生</a>
seriesg = re.search(r'series=\d+">(.+?)</a>', jav_html)
if str(seriesg) != 'None':
series = nfo_dict['系列'] = seriesg.group(1)
else:
nfo_dict['系列'] = '未知系列'
break
# 几个搜索结果查找完了,也没有找到简介
if plot == '':
print(' >arzon有' + str(results_num) + '个搜索结果:' + arz_search_url + ',但找不到简介!')
plot = '【arzon有该影片,但找不到简介】'
break
# arzon搜索页面实际是18岁验证
else:
adultg = re.search(r'18歳未満', search_html)
if str(adultg) != 'None':
print(' >成人验证,请重启程序!')
os.system('pause')
else: # 不是成人验证,也没有简介
print(' >arzon找不到该影片简介,可能被下架!')
plot = '【影片下架,再无简介】'
break
if if_tran == '是':
plot = tran(ID, SK, plot, t_lang)
#######################################################################
# 1重命名视频
new_mp4 = nfo_dict['车牌'] # 默认为车牌
if if_mp4 == '是':
# 新文件名new_mp4
new_mp4 = ''
for j in rename_mp4_list:
new_mp4 += nfo_dict[j]
new_mp4 = new_mp4.rstrip(' ')
# nfo_dict['视频']用于图片的命名
nfo_dict['视频'] = new_mp4
new_root = root # 为了能和其他py一样
# 2重命名文件夹
# 3写入nfo开始
if if_nfo == '是':
cus_title = ''
for i in title_list:
cus_title += nfo_dict[i]
# 写入nfo开始
info_path = root + '\\' + new_mp4 + '.nfo' #nfo存放的地址
# 开始写入nfo,这nfo格式是参考的emby的nfo
f = open(info_path, 'w', encoding="utf-8")
f.write("<?xml version=\"1.0\" encoding=\"UTF-8\" standalone=\"yes\" ?>\n"
"<movie>\n"
" <plot>" + plot + plot_review + "</plot>\n"
" <title>" + cus_title + "</title>\n"
" <director>" + nfo_dict['导演'] + "</director>\n"
" <rating>" + nfo_dict['评分'] + "</rating>\n"
" <criticrating>" + criticrating + "</criticrating>\n"
" <year>" + nfo_dict['发行年份'] + "</year>\n"
" <mpaa>NC-17</mpaa>\n"
" <customrating>NC-17</customrating>\n"
" <countrycode>JP</countrycode>\n"
" <premiered>" + nfo_dict['发行年月日'] + "</premiered>\n"
" <release>" + nfo_dict['发行年月日'] + "</release>\n"
" <runtime>" + nfo_dict['片长'] + "</runtime>\n"
" <country>日本</country>\n"
" <studio>" + nfo_dict['片商'] + "</studio>\n"
" <id>" + nfo_dict['车牌'] + "</id>\n"
" <num>" + nfo_dict['车牌'] + "</num>\n"
" <set>" + series + "</set>\n")
for i in genres:
f.write(" <genre>" + i + "</genre>\n")
if series:
f.write(" <genre>系列:" + series + "</genre>\n")
f.write(" <genre>片商:" + nfo_dict['片商'] + "</genre>\n")
for i in genres:
f.write(" <tag>" + i + "</tag>\n")
if series:
f.write(" <tag>系列:" + series + "</tag>\n")
f.write(" <tag>片商:" + nfo_dict['片商'] + "</tag>\n")
for i in actors:
f.write(" <actor>\n <name>" + i + "</name>\n <type>Actor</type>\n <thumb></thumb>\n </actor>\n")
f.write("</movie>\n")
f.close()
print(' >nfo收集完成')
# 4需要两张图片
if if_jpg == '是':
# 下载海报的地址 cover
cover_url = 'http:' + cover_url
# fanart和poster路径
fanart_path = new_root + '\\'
poster_path = new_root + '\\'
for i in fanart_list:
fanart_path += nfo_dict[i]
for i in poster_list:
poster_path += nfo_dict[i]
# 下载 海报
print(' >正在下载封面:', cover_url)
cover_list[0] = 0
cover_list[1] = cover_url
cover_list[2] = fanart_path
try:
download_pic(cover_list)
print(' >fanart.jpg下载成功')
except:
print(' >从javlibrary下载fanart.jpg失败,正在前往javbus...')
# 在javbus上找图片url
bus_search_url = bus_url + nfo_dict['车牌']
jav_list[0] = bus_search_url
try:
bav_html = get_jav_html(jav_list)
except:
print(' >连接javbus失败,下载fanart失败:' + bus_search_url)
continue
# DVD封面cover
coverg = re.search(r'<a class="bigImage" href="(.+?)">', bav_html) # 封面图片的正则对象
if str(coverg) != 'None':
cover_list[0] = 0
cover_list[1] = cover_url
cover_list[2] = fanart_path
print(' >正在从javbus下载封面:', cover_url)
try:
download_pic(cover_list)
print(' >fanart.jpg下载成功')
except:
print(' >下载fanart.jpg失败:' + cover_url)
continue
else:
print(' >从javbus上查找封面失败:' + bus_search_url)
continue
# crop
img = Image.open(fanart_path)
w, h = img.size # fanart的宽 高
ex = int(w * 0.52625) # 0.52625是根据emby的poster宽高比较出来的
poster = img.crop((ex, 0, w, h)) # (ex,0)是左下角(x,y)坐标 (w, h)是右上角(x,y)坐标
poster.save(poster_path, quality=95) # quality=95 是无损crop,如果不设置,默认75
print(' >poster.jpg裁剪成功')
# 5收集女优头像
if if_sculpture == '是':
if actors[0] == '未知演员':
print(' >未知演员')
else:
for each_actor in actors:
exist_actor_path = '女优头像\\' + each_actor + '.jpg'
# print(exist_actor_path)
jpg_type = '.jpg'
if not os.path.exists(exist_actor_path): # 女优jpg图片还没有
exist_actor_path = '女优头像\\' + each_actor + '.png'
if not os.path.exists(exist_actor_path): # 女优png图片还没有
print(' >没有女优头像:' + each_actor + '\n')
config_actor = configparser.ConfigParser()
config_actor.read('【缺失的女优头像统计For Kodi】.ini', encoding='utf-8-sig')
try:
each_actor_times = config_actor.get('缺失的女优头像', each_actor)
config_actor.set("缺失的女优头像", each_actor, str(int(each_actor_times) + 1))
except:
config_actor.set("缺失的女优头像", each_actor, '1')
config_actor.write(open('【缺失的女优头像统计For Kodi】.ini', "w", encoding='utf-8-sig'))
continue
else:
jpg_type = '.png'
actors_path = new_root + '\\.actors\\'
if not os.path.exists(actors_path):
os.makedirs(actors_path)
shutil.copyfile('女优头像\\' + each_actor + jpg_type,
actors_path + each_actor + jpg_type)
print(' >女优头像收集完成:', each_actor)
print()
except:
print('发生错误,如一直在该影片报错请截图并联系作者:' + relative_path + '\n' + traceback.format_exc() + '\n')
continue
| StarcoderdataPython |
21684 | from . import program
from . import turtle_test
from . import antoine_test
from . import dance | StarcoderdataPython |
3316855 | <filename>app/engine/overworld/overworld_states.py
import logging
import app.engine.config as cf
from app.data.database import DB
from app.engine import engine, menus
from app.engine.fluid_scroll import FluidScroll
from app.engine.game_state import game
from app.engine.input_manager import INPUT
from app.engine.objects.overworld import (OverworldNodeObject,
OverworldNodeProperty)
from app.engine.overworld.overworld_actions import OverworldMove
from app.engine.overworld.overworld_manager import OverworldManager
from app.engine.overworld.overworld_map_view import OverworldMapView
from app.engine.overworld.overworld_movement_manager import \
OverworldMovementManager
from app.engine.sound import SOUNDTHREAD
from app.engine.state import MapState, State
from app.utilities.typing import NID
class OverworldState(MapState):
"""The main overworld state - sprite is on the map and you can navigate around.
"""
name = 'overworld'
@classmethod
def set_up_overworld_game_state(cls, overworld_to_load: NID = None):
game.generic()
from .overworld_cursor import OverworldCursor
from .overworld_map_view import OverworldMapView
from .overworld_movement_manager import OverworldMovementManager
if not overworld_to_load:
overworld_to_load: NID = game.game_vars['_next_overworld_nid']
game.cursor = OverworldCursor(game.camera)
game.overworld_controller = OverworldManager(game.overworld_registry[overworld_to_load], game.cursor)
game.movement = OverworldMovementManager(game.overworld_controller)
game.map_view = OverworldMapView(game.overworld_controller, game.cursor)
# assign the next level
if game.game_vars.get('_next_level_nid'):
next_level_node_nid = game.overworld_controller.node_by_level(game.game_vars.get('_next_level_nid')).nid
game.overworld_controller.set_node_property(next_level_node_nid, OverworldNodeProperty.IS_NEXT_LEVEL, True)
def start(self):
OverworldState.set_up_overworld_game_state()
self.begin_time = engine.get_time()
game.cursor.set_pos(game.overworld_controller.selected_party_node().position)
game.camera.force_center(*game.overworld_controller.selected_party_node().position)
game.events.trigger('overworld_start', level_nid = game.game_vars['_next_level_nid'])
def begin(self):
game.cursor.show()
def take_input(self, event):
game.cursor.set_speed_state(INPUT.is_pressed('BACK'))
game.cursor.take_input()
if event == 'BACK': # flick our cursor back to our party
game.cursor.set_pos(game.overworld_controller.selected_party_node().position)
elif event == 'SELECT': # this is where the fun begins
cur_pos = game.cursor.position
selected_node: OverworldNodeObject = game.overworld_controller.node_at(cur_pos)
if selected_node: # if we clicked on a node
entity = game.overworld_controller.entity_at(selected_node.position)
if entity and entity.team == 'player': # there's a party underneath us, select it and launch the party menu
game.overworld_controller.select_entity(entity)
SOUNDTHREAD.play_sfx('Select 5')
game.state.change('overworld_party_option_menu')
return
else: # we selected a node without a party
party_node = game.overworld_controller.selected_party_node()
if game.overworld_controller.any_path(party_node, selected_node): # if there is a path from our party to this node
if selected_node.prefab.level == game.game_vars['_next_level_nid']: # if this is the next level, stop short and trigger event start
movement = OverworldMove(game.overworld_controller.selected_entity, selected_node, game.overworld_controller, event=True, remove_last=True)
game.state.change('overworld_next_level')
else:
movement = OverworldMove(game.overworld_controller.selected_entity, selected_node.nid, game.overworld_controller)
# queue camera movement to unit
game.camera.do_slow_pan(1000)
game.camera.set_center(party_node.position[0], party_node.position[1])
game.state.change('overworld_movement')
movement.queue(game.movement)
else: # clicked on empty space, trigger the general menu
SOUNDTHREAD.play_sfx('Select 5')
game.state.change('overworld_game_option_menu')
def update(self):
super().update()
def draw(self, surf):
if isinstance(game.map_view, OverworldMapView):
return super().draw(surf)
else:
return surf
class OverworldMovementState(State):
"""State in which overworld movement is handled and executed.
"""
name = 'overworld_movement'
transparent = True
MOVEMENT_DELAY = 250
def __init__(self, name):
super().__init__(name=name)
self.delay = 0
def begin(self):
self.delay = self.MOVEMENT_DELAY
game.cursor.hide()
self.cancel_movement = False
def take_input(self, event):
if event == 'BACK':
self.cancel_movement = True
def update(self):
# make sure the camera is centered first
if not game.camera.at_rest():
return
# slight delay after camera centers
# this just looks better
if self.delay >= 0:
self.delay -= engine.get_delta()
return
# move the game pieces around
movement: OverworldMovementManager = game.movement
movement.update()
if self.cancel_movement == True:
try:
movement.interrupt_movement(game.overworld_controller.selected_entity.nid)
self.cancel_movement = False
except: # this might be scripted, no interruption
logging.warning("Exception raised in OverworldMovement cancel_movement handling.")
# update the camera onto our following unit
focal_unit_nid: NID = movement.get_following_unit()
if focal_unit_nid:
focal_unit = game.overworld_controller.entities[focal_unit_nid]
unit_position = focal_unit.display_position
game.cursor.set_pos((round(unit_position[0]), round(unit_position[1])))
game.camera.set_center(*unit_position)
if len(game.movement) <= 0:
game.state.back()
return 'repeat'
class OverworldLevelTransition(State):
"""State handling transition events between overworld and
new level. This state should only last for one or two updates, maximum.
"""
name = 'overworld_next_level'
transparent = True
def start(self):
logging.debug("Begin Overworld-Level Transition State")
if not game.events.trigger('level_select', level_nid=game.game_vars['_next_level_nid']):
# no events, then just queue the move
movement = OverworldMove(game.overworld_controller.selected_entity.nid,
game.overworld_controller.node_by_level(game.game_vars['_next_level_nid']).nid,
game.overworld_controller)
game.state.change('overworld_movement')
movement.queue(game.movement)
return 'repeat'
def update(self):
self.go_to_next_level(game.game_vars['_next_level_nid'])
return 'repeat'
def go_to_next_level(self, nid=None):
game.sweep()
if not nid:
next_level_nid = game.game_vars['_next_level_nid']
else:
next_level_nid = nid
game.start_level(next_level_nid)
game.memory['next_state'] = 'turn_change'
game.state.change('transition_to')
def end(self):
return 'repeat'
class OverworldGameOptionMenuState(State):
"""When you click on an empty space, the Unit-Status-Guide-Options-Save menu
that appears; this is that state.
"""
name = 'overworld_game_option_menu'
transparent = True
def __init__(self, name=None):
super().__init__(name=name)
self.fluid = FluidScroll()
def start(self):
game.cursor.hide()
options = ['Unit', 'Status', 'Options', 'Save']
info_desc = ['Unit_desc', 'Status_desc', 'Options_desc', 'Save_desc']
ignore = [True, False, False, False]
unlocked_lore = [lore for lore in DB.lore if lore.nid in game.unlocked_lore and lore.category == 'Guide']
if unlocked_lore:
options.insert(2, 'Guide')
info_desc.insert(2, 'Guide_desc')
ignore.insert(2, False)
if cf.SETTINGS['debug']:
options.insert(0, 'Debug')
info_desc.insert(0, 'Debug_desc')
ignore.insert(0, False)
self.menu = menus.Choice(None, options, info=info_desc)
self.menu.set_ignore(ignore)
def make_save(self):
logging.info('%s: Creating Overworld Save...', 'make_save')
game.memory['next_state'] = 'title_save'
game.memory['save_kind'] = 'overworld'
game.state.change('transition_to')
def take_input(self, event):
first_push = self.fluid.update()
directions = self.fluid.get_directions()
self.menu.handle_mouse()
if 'DOWN' in directions:
SOUNDTHREAD.play_sfx('Select 6')
self.menu.move_down(first_push)
elif 'UP' in directions:
SOUNDTHREAD.play_sfx('Select 6')
self.menu.move_up(first_push)
if event == 'BACK':
SOUNDTHREAD.play_sfx('Select 4')
game.state.back()
elif event == 'SELECT':
SOUNDTHREAD.play_sfx('Select 1')
selection = self.menu.get_current()
if selection == 'Save':
self.make_save()
elif selection == 'Guide':
game.memory['next_state'] = 'base_guide'
game.state.change('transition_to')
elif selection == 'Options':
game.memory['next_state'] = 'settings_menu'
game.state.change('transition_to')
# @TODO Implement these
# elif selection == 'Status':
# pass
# # game.memory['next_state'] = 'status_screen'
# # game.state.change('transition_to')
# elif selection == 'Unit':
# pass
# # game.state.change('unit_menu')
# # game.state.change('transition_out')
elif event == 'INFO':
self.menu.toggle_info()
def update(self):
self.menu.update()
def draw(self, surf):
self.menu.draw(surf)
return surf
class OverworldPartyOptionMenu(State):
"""The party management menu that opens up when clicking on the selected party
"""
name = 'overworld_party_option_menu'
transparent = True
def __init__(self, name=None):
super().__init__(name=name)
self.fluid = FluidScroll()
def start(self):
game.cursor.hide()
options = ['Base Camp']
info_desc = ['Convoy_desc']
ignore = [False]
self.menu = menus.Choice(None, options, info=info_desc)
self.menu.set_ignore(ignore)
def take_input(self, event):
first_push = self.fluid.update()
directions = self.fluid.get_directions()
self.menu.handle_mouse()
if 'DOWN' in directions:
SOUNDTHREAD.play_sfx('Select 6')
self.menu.move_down(first_push)
elif 'UP' in directions:
SOUNDTHREAD.play_sfx('Select 6')
self.menu.move_up(first_push)
if event == 'BACK':
SOUNDTHREAD.play_sfx('Select 4')
game.state.back()
elif event == 'SELECT':
SOUNDTHREAD.play_sfx('Select 1')
selection = self.menu.get_current()
if selection == 'Base Camp':
game.memory['next_state'] = 'base_main'
game.state.change('transition_to')
elif event == 'INFO':
self.menu.toggle_info()
def update(self):
self.menu.update()
def draw(self, surf):
self.menu.draw(surf)
return surf
| StarcoderdataPython |
1719432 | def weiner(text, n):
text = text + '$'
root = trie.TrieNode("")
link, head = { (root, ""): root }, root
for i in range(n + 1, 0, -1):
# niezmiennik: link[v][c] = u dla wewnętrznych u i v takich, że word(u) = c word(v)
v, depth = head, n + 2
while v != root and link.get((v, text[i])) is None:
v, depth = v.parent, depth - len(v.label)
u = link.get((v, text[i]))
if u is None or text[depth] in u.children:
if u is None:
u, remaining = slow_find(root, text[depth - 1:])
else:
u, remaining = slow_find(u, text[depth:])
v, _ = fast_find(v, text[depth:-remaining], False)
depth = len(text) - remaining
if u != root:
link[(v, text[i])] = u
leaf = trie.TrieNode(text[depth:])
u.add_child(leaf)
head = leaf
return root, link
| StarcoderdataPython |
1768082 | while True:
try:
cont = 0
cont2 = 0
calculo = 0
N = int(input())
Votos = (input().split())
for i in range(N):
if (int(Votos[cont])) == 1:
cont2 += 1
cont += 1
calculo = (N / 3) * 2
if cont2 >= calculo:
print("impeachment")
else:
print("acusacao arquivada")
except EOFError:
break
| StarcoderdataPython |
3348475 | # Generated by Django 3.0.3 on 2020-03-10 12:50
import django.core.validators
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('interview_backend', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='company',
name='problems_per_interview',
field=models.IntegerField(default=3, validators=[django.core.validators.MinValueValidator(1)]),
),
]
| StarcoderdataPython |
1737926 | import os, sys
import Image
size = 100, 100
for infile in sys.argv[1:]:
outfile = os.path.splitext(infile)[0] + ".thumbnail"
if infile != outfile:
try:
im = Image.open(infile)
im.thumbnail(size, Image.ANTIALIAS)
im.save(outfile, "JPEG")
except IOError:
print "cannot create thumbnail for '%s'" % infile
| StarcoderdataPython |
53616 | <reponame>aayla-secura/simple_CORS_https_server
from setuptools import setup, find_packages
with open('README.md', 'r') as fh:
long_description = fh.read()
setup(
name='mixnmatchttp',
version='1.0.dev32',
url='https://github.com/aayla-secura/mixnmatchttp',
author='AaylaSecura1138',
author_email='<EMAIL>',
description='Modular HTTP server: Auth, Caching, Proxy, and more',
long_description=long_description,
long_description_content_type='text/markdown',
classifiers=[
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
],
packages=find_packages(),
install_requires=[
'future>=0.12',
'wrapt>=1',
'awesomedict',
],
extras_require={
'unix_pwd': ['passlib>=1.7.2'],
'scrypt': ['passlib>=1.7.2', 'scrypt>=0.8.3'],
'bcrypt': ['passlib>=1.7.2', 'bcrypt>=3.1.4'],
'jwt': ['PyJWT[crypto]>=1.7.1'],
'daemon': ['python-daemon>=2.2.4'],
'sql': ['SQLAlchemy>=1.3.16'],
},
zip_safe=False)
| StarcoderdataPython |
1644794 | <gh_stars>1-10
# Kate is stuck in a maze. You should help her to find her way out.
# On the first line, you will be given how many rows there are in the maze. On the following n lines, you will be given the maze itself. Here is a legend for the maze:
# • "#" - means a wall; Kate cannot go through there
# • " " - means empty space; Kate can go through there
# • "k" - the initial position of Kate; start looking for a way out from there
# There are two options: Kate either gets out or not:
# • If Kate can get out, print the following:
# "Kate got out in {number_of_moves} moves".
# Note: If there are two or more ways out, she always chooses the longest one.
# • Otherwise, print: "Kate cannot get out".
# Gather the input data
maze = []
paths_list = []
temp_paths_list = []
mz_rows = int(input())
input_data = [input() for _ in range(mz_rows)]
# Address the issue maze rows are of different lenght (e.g. if spaces on the right are missing)
mz_cols = max([len(data) for data in input_data])
input_data = [data if len(data) == mz_cols else data + ' ' * (mz_cols - len(data)) for data in input_data]
# Fill the maze
for i in range(mz_rows):
maze_row = [data for data in input_data[i]]
maze.append(maze_row)
# Set Kate's position as the start of all paths
temp_paths_list.append([(row,col) for row in range(mz_rows) for col in range(mz_cols) if maze[row][col] == 'k'])
# Generate all possible paths originating from Kate's position
while True:
if not temp_paths_list:
break
for path in temp_paths_list:
row, col = path[-1]
temp_branches = []
if row > 0:
if maze[row - 1][col] == ' ' and (row - 1, col) not in path:
temp_branches.append((row - 1, col))
if row < mz_rows - 1:
if maze[row + 1][col] == ' ' and (row + 1, col) not in path:
temp_branches.append((row + 1, col))
if col > 0:
if maze[row][col - 1] == ' ' and (row, col - 1) not in path:
temp_branches.append((row, col - 1))
if col < mz_cols - 1:
if maze[row][col + 1] == ' ' and (row, col + 1) not in path:
temp_branches.append((row, col + 1))
if not temp_branches:
paths_list.append(path)
temp_paths_list.remove(path)
break
else:
for branch in temp_branches:
temp_path = path.copy()
temp_path.append(branch)
temp_paths_list.append(temp_path)
temp_paths_list.remove(path)
break
# Keep only the patsh exiting the maze
paths_to_keep = []
for path in paths_list:
row, col = path[-1]
if row in (0, mz_rows - 1) or col in (0, mz_cols - 1):
paths_to_keep.append(path)
# Print out the outcome
if paths_to_keep:
max_moves = max([len(path) for path in paths_to_keep])
print(f'Kate got out in {max_moves} moves')
else:
print('Kate cannot get out') | StarcoderdataPython |
4842678 | <filename>src/psion/oauth2/authentication/methods/__init__.py
from .base import BaseAuthentication
from .client_secret_basic import ClientSecretBasic
from .client_secret_post import ClientSecretPost
from .none import None_
| StarcoderdataPython |
1628876 | <filename>baselines/EMNLP2019/config.py<gh_stars>100-1000
#MODEL = "simple_pipeline"
#MODEL = "pipeline_without_global"
#MODEL = "best_pipeline"
#MODEL = "ours"
MODEL = "bert"
NUMBER_URI_CANDIDATES = 1 if MODEL == "ours" else 1
NUMBER_URI_CANDIDATES_TO_CONSIDER = 1
URI_THRESHOLD = 0.0
SOFT_COREF_CANDIDATES = MODEL == "ours" or MODEL == "bert"
MULTITASK = True
CANDIDATE_RECALL = False
USE_ENTITY_LINKER = True
USE_BERT = MODEL == "bert"
MODELS_DIR = "models"
KNOWLEDGE_NET_DIR = "../../"
| StarcoderdataPython |
3258232 | <gh_stars>1-10
"""This module tests the githubactions module"""
from configator import create_actions_config
def test_create_configator_file_creates_github_actions(mocker):
""" Testing to see if a file is created through github_actions"""
mock_open = mocker.mock_open()
#take in buildin open function and replace with mock function
mocker.patch('builtins.open', mock_open)
mocker.patch("os.makedirs")
create_actions_config.create_configator_file()
mock_open.assert_called_once_with(".github/workflows/grade.yml","w", encoding='utf-8')
| StarcoderdataPython |
1635656 | <filename>examples/application_commands/autocomplete.py
import discpy
from discpy.ext import commands
bot = commands.Bot(command_prefix='>')
# these are the list of items that will be
# shown as choices in autocomplete.
ITEMS = ['Bun', 'Cake', 'Cookie', 'Bread', 'Orange Juice']
# this function would autocomplete the option choices. You can use this
# function to do your processing for example getting some data from
# a database or from an API but this out of scope of this simple example.
async def autocomplete_items(value: str, option: discpy.application.Option, interaction: discpy.Interaction):
if not value:
# there is a chance user has not entered any value, i.e empty string
# in which case, we will simply return all the autocomplete items.
ac_items = [discpy.OptionChoice(name=item, value=item) for item in ITEMS]
else:
# in this case, user has input something and we will return
# the items that start with provided query...
# lowercase the value because case doesn't matter for us and we don't
# want autocomplete to break if the value case is not same as items case.
value = value.lower()
# now return the items whose names start with
# the user's entered value. We also lowered case the item name.
ac_items = [discpy.OptionChoice(name=item, value=item) for item in ITEMS if item.lower().startswith(value)]
# discord API does not allow showing more then 25 choices in autocomplete so we will return
# the first 25 items otherwise it would raise HTTPException.
return ac_items[:25]
# the command that would autocomplete
# autocomplete function can be specified in `autocomplete` keyword argument. note: autocomplete is only supported
# for string type options.
@bot.slash_command()
@discpy.application.option('item', autocomplete=autocomplete_items)
async def buy(ctx, item: str):
await ctx.send(f'You bought {item}!')
bot.run('token')
| StarcoderdataPython |
4811035 | <filename>python4kyoani/util.py
def name_for_save(image_path):
return f'pray_{image_path.name}'
| StarcoderdataPython |
1608320 | <gh_stars>10-100
from mechanize import Browser
import sys
import os
import re
def get_filelist(url):
child_stdin, child_stdout, child_stderr = os.popen3(['rsync', '-r', url])
#child_stdin, child_stdout, child_stderr = os.popen3(['cat', 'buildservice-repos.txt'])
child_stdin.close()
dirs = {}
for line in child_stdout:
# split line, but take into account that filenames could contain spaces
#
# >>> a = '-rw-r--r-- 4405843968 2007/09/27 17:50:25 distribution/10.3/iso/dvd/openSUSE-10.3-GM- DVD-i386.iso'
# >>> a.split(None, 4)
# ['-rw-r--r--', '4405843968', '2007/09/27', '17:50:25', 'distribution/10.3/iso/dvd/openSUSE-10.3-GM- DVD-i386.iso']
try:
mode, size, date, time, name = line.split(None, 4)
except:
print(repr(line))
import sys
sys.exit(1)
name = name.rstrip()
if mode.startswith('d'):
dirs[name] = Directory(name)
elif mode.startswith('-'):
d, p = os.path.split(name)
if not d:
d = '.'
dirs[d].files.append(p)
elif mode.startswith('l'):
# we ignore symbolic links
continue
else:
# something unknown...
print('skipping', line)
err = child_stderr.read()
return dirs, err
burl, url = sys.argv[1], sys.argv[2]
#burl_len = len('http://widehat.opensuse.org/')
#burl_len = len('http://opensuse.unixheads.net/')
#burl_len = len('http://download.opensuse.org/pub/opensuse/')
burl_len = len(burl)
br = Browser()
br.open(url)
print('directories:')
for link in br.links(url_regex=re.compile(r"""
^(?!(http|mailto|\?|/))
.*
/$
""", re.X)):
# print (link.url)
print(link.base_url[burl_len:] + link.url)
print()
print('files:')
for link in br.links(url_regex=re.compile(r"""
^(?!(http|mailto|\?|/))
.*
[^/]$
""", re.X)):
# print (link)
print(link.base_url[burl_len:] + link.url)
for line in get_filelist('rsync.opensuse.org::opensuse-updates'):
print(line)
| StarcoderdataPython |
1797302 | <filename>_unittests/ut_datasets/test_geojson.py
# -*- coding: utf-8 -*-
"""
@brief test log(time=13s)
"""
import unittest
from bokeh.models import GeoJSONDataSource
from pyquickhelper.pycode import ExtTestCase
from papierstat.datasets import get_geojson_countries
class TestGeoJSON(ExtTestCase):
def test_geojson(self):
name = get_geojson_countries()
self.assertExists(name)
with open(name, 'r', encoding='utf-8') as f:
geo = GeoJSONDataSource(geojson=f.read())
self.assertTrue(geo is not None)
if __name__ == "__main__":
unittest.main()
| StarcoderdataPython |
3354385 | #!/usr/bin/env python
__doc__ = '''
This module provides a function to write csv results to a file from the
speedtest
'''
__copyright__ = '''
MIT License
Copyright (c) 2018 bandeezy
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
'''
# Author: <NAME>.
# Username: bandeezy
import os
import sys
def write_results_to_csv(data, filename, header=None):
print("Writing results to CSV stored here: {}".format(filename))
# if file doesn't exist, create it with the corresponding header
if not (os.path.isfile(filename)):
if header:
with open(filename, 'w') as out_file:
out_file.write(header + "\n")
else:
print('Please run internet_speed.py at least once before to '
'as this file takes advantage of the speedtest API to '
'generate the header.')
sys.exit(1)
with open(filename, 'a') as out_file:
out_file.write(data + "\n")
| StarcoderdataPython |
1721779 | <reponame>brosenberg/temple-of-gelmahd
#!/usr/bin/env python
from actors import actors
from actors import player
from combat import combat
from rooms import rooms
from utils import files
from utils import utils
ACTORS = files.load_file("actors.json")
DUNGEON = files.load_file("test-dungeon.json")
ITEMS = files.load_file("items.json")
# TODO: Move save and load to utils.
def _load_game(pc):
print "Specify the path to the save file:"
save_file = raw_input("> ")
try:
with open(save_file, 'r') as f:
pc.load(f.read())
except IOError:
print "Could not load the save game file '%s'" % (save_file,)
return False
return True
def load_game(pc):
loaded = False
while not loaded:
print "Would you like to load your game?"
if utils.get_yesno_input():
loaded = _load_game(pc)
else:
return
def _save_game(pc):
print "Specify the path to the save file:"
save_file = raw_input("> ")
try:
with open(save_file, 'w') as f:
f.write(repr(pc))
except IOError:
print "Could not write save game file '%s'" % (save_file,)
return False
return True
def save_game(pc):
saved = False
while not saved:
print "Would you like to save your game?"
if utils.get_yesno_input():
saved = _save_game(pc)
else:
return
def load_dungeon():
dungeon = {}
for room in DUNGEON:
new_room = rooms.Room()
new_room.load(DUNGEON[room])
dungeon[room] = new_room
return dungeon
def enter_dungeon(pc):
dungeon = load_dungeon()
room = "0"
while True:
print utils.color_text("purple", "-"*80)
print "You are in a %s" % (dungeon[room],)
if dungeon[room].inhabitants:
print "You encounter %s" % (" and ".join(dungeon[room].inhabitants),)
fight(pc, dungeon[room].inhabitants[0])
i = 1
expected = []
for exit in dungeon[room].egress:
print "%s: %s to the %s" % (utils.color_text("green", i), exit[1], exit[2])
expected.append(str(i))
i += 1
print
prompt = "Which door would you like to take?"
s = int(utils.get_expected_input(expected, prompt))-1
room = str(dungeon[room].egress[s][0])
def fight(pc, monster):
monster = actors.load_actor(ACTORS[monster])
fight = combat.Combat(pc, monster)
fight.main_loop()
def main():
pc = player.Player("Bob")
print "You have been banished to the Depths of the Forsaken!"
prompt = "Would you like to %s a new character or %s an old one?\n" % (utils.color_text("green", "create"), utils.color_text("green", "load"))
s = utils.get_expected_input(["create", "load"], prompt)
if s == "load":
loaded = False
while not loaded:
loaded = _load_game(pc)
else:
pc = player.chargen()
save_game(pc)
while True:
# This is awful. Make it cleaner.
print "What would you like to do?"
prompt = "%s character sheet.\n" % (utils.color_text("green", "Show"),)
prompt += "%s or %s the game.\n" % (utils.color_text("green", "Save"), utils.color_text("green", "load"))
prompt += "%s an enemy.\n" % (utils.color_text("green", "Fight"),)
prompt += "Enter the %s\n" % (utils.color_text("green", "Depths"),)
prompt += "%s for a short while. If you have enough experience to level up, you will level up upon resting.\n" % (utils.color_text("green", "Rest"),)
prompt += "%s the game.\n" % (utils.color_text("green", "Quit"),)
s = utils.get_expected_input(["show", "save", "load", "fight", "depths", "rest", "quit"], prompt).lower()
if s == "show":
print pc.character_record()
elif s == "save":
_save_game(pc)
elif s == "load":
_load_game(pc)
elif s == "fight":
fight(pc, "decaying skeleton")
if pc.stats["hp_cur"] < 1:
p = "%s game or %s?\n" % (utils.color_text("green", "Load"), utils.color_text("green", "quit"))
r = utils.get_expected_input(["load", "quit"], p).lower()
if r == "load":
load_game(pc)
else:
break
elif s == "depths":
enter_dungeon(pc)
elif s == "rest":
# This should probably be its own function
pc.stats["ap_cur"] = pc.stats["ap_max"]
pc.stats["hp_cur"] = pc.stats["hp_max"]
pc.stats["sp_cur"] = pc.stats["sp_max"]
pc.stats["fatigue_cur"] = pc.stats["fatigue_max"]
pc.lifespan += 100
pc.rests += 1
if pc.level_up():
print utils.color_text("purple", "You have leveled up! You are now level %d!" % (pc.level,))
elif s == "quit":
break
print "Good bye!"
if __name__ == '__main__':
main()
| StarcoderdataPython |
1661645 | <gh_stars>0
# Copyright (c) 2019 AT&T Intellectual Property.
# Copyright (c) 2018-2019 Nokia.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# This source code is part of the near-RT RIC (RAN Intelligent Controller)
# platform project (RICP).
#
import queue
import time
from unittest.mock import Mock
import pytest
import ricsdl.backend
from ricsdl.configuration import _Configuration
from ricsdl.configuration import DbBackendType
@pytest.fixture()
def fake_dict_backend_fixture(request):
request.cls.ns = 'some-ns'
request.cls.dm = {'abc': b'1', 'bcd': b'2'}
request.cls.new_dm = {'abc': b'3', 'bcd': b'2'}
request.cls.dm2 = {'cdf': b'4'}
request.cls.remove_dm = {'bcd': b'2'}
request.cls.key = 'abc'
request.cls.keys = ['abc', 'bcd']
request.cls.key2 = ['cdf']
request.cls.old_data = b'1'
request.cls.new_data = b'3'
request.cls.keypattern = r'*bc*'
request.cls.group = 'some-group'
request.cls.groupmember = b'm1'
request.cls.groupmembers = set([b'm1', b'm2'])
request.cls.new_groupmembers = set(b'm3')
request.cls.all_groupmembers = request.cls.groupmembers | request.cls.new_groupmembers
request.cls.channels = ['abs', 'gma']
request.cls.channels_and_events = {'abs': ['cbn']}
request.cls.configuration = Mock()
mock_conf_params = _Configuration.Params(db_host=None,
db_port=None,
db_sentinel_port=None,
db_sentinel_master_name=None,
db_cluster_addr_list=None,
db_type=DbBackendType.FAKE_DICT)
request.cls.configuration.get_params.return_value = mock_conf_params
request.cls.db = ricsdl.backend.get_backend_instance(request.cls.configuration)
@pytest.mark.usefixtures('fake_dict_backend_fixture')
class TestFakeDictBackend:
def test_is_connected_function_success(self):
ret = self.db.is_connected()
assert ret is True
def test_set_function_success(self):
self.db.set(self.ns, self.dm)
self.db.set(self.ns, self.dm2)
ret = self.db.get(self.ns, self.keys)
assert ret == self.dm
ret = self.db.get(self.ns, self.key2)
assert ret == self.dm2
def test_set_if_function_success(self):
self.db.set(self.ns, self.dm)
ret = self.db.set_if(self.ns, self.key, self.old_data, self.new_data)
assert ret is True
ret = self.db.get(self.ns, self.keys)
assert ret == self.new_dm
def test_set_if_function_returns_false_if_existing_key_value_not_expected(self):
self.db.set_if(self.ns, self.key, self.old_data, self.new_data)
self.db.set(self.ns, self.new_dm)
ret = self.db.set_if(self.ns, self.key, self.old_data, self.new_data)
assert ret is False
def test_set_if_not_exists_function_success(self):
ret = self.db.set_if_not_exists(self.ns, self.key, self.new_data)
assert ret is True
ret = self.db.get(self.ns, self.keys)
assert ret == {self.key: self.new_data}
def test_set_if_not_exists_function_returns_false_if_key_already_exists(self):
self.db.set(self.ns, self.dm)
ret = self.db.set_if_not_exists(self.ns, self.key, self.new_data)
assert ret is False
def test_find_keys_function_success(self):
self.db.set(self.ns, self.dm)
ret = self.db.find_keys(self.ns, self.keypattern)
assert ret == self.keys
def test_find_keys_function_returns_empty_list_when_no_matching_keys_found(self):
ret = self.db.find_keys(self.ns, self.keypattern)
assert ret == []
def test_find_and_get_function_success(self):
self.db.set(self.ns, self.dm)
ret = self.db.find_and_get(self.ns, self.keypattern)
assert ret == self.dm
def test_find_and_get_function_returns_empty_dict_when_no_matching_keys_exist(self):
ret = self.db.find_and_get(self.ns, self.keypattern)
assert ret == dict()
def test_remove_function_success(self):
self.db.set(self.ns, self.dm)
self.db.remove(self.ns, self.keys)
ret = self.db.get(self.ns, self.keys)
assert ret == dict()
def test_remove_if_function_success(self):
self.db.set(self.ns, self.dm)
ret = self.db.remove_if(self.ns, self.key, self.old_data)
assert ret is True
ret = self.db.get(self.ns, self.keys)
assert ret == self.remove_dm
def test_remove_if_function_returns_false_if_data_does_not_match(self):
ret = self.db.remove_if(self.ns, self.key, self.old_data)
assert ret is False
self.db.set(self.ns, self.dm)
ret = self.db.remove_if(self.ns, self.key, self.new_data)
assert ret is False
def test_add_member_function_success(self):
self.db.add_member(self.ns, self.group, self.groupmembers)
ret = self.db.get_members(self.ns, self.group)
assert ret == self.groupmembers
self.db.add_member(self.ns, self.group, self.new_groupmembers)
ret = self.db.get_members(self.ns, self.group)
assert ret == self.all_groupmembers
def test_remove_member_function_success(self):
self.db.remove_member(self.ns, self.group, self.groupmembers)
self.db.add_member(self.ns, self.group, self.groupmembers)
self.db.remove_member(self.ns, self.group, self.groupmembers)
ret = self.db.get_members(self.ns, self.group)
assert ret == set()
def test_remove_group_function_success(self):
self.db.remove_group(self.ns, self.group)
ret = self.db.get_members(self.ns, self.group)
assert ret == set()
def test_is_member_function_success(self):
ret = self.db.is_member(self.ns, self.group, b'not member')
assert ret is False
self.db.add_member(self.ns, self.group, self.groupmembers)
ret = self.db.is_member(self.ns, self.group, self.groupmember)
assert ret is True
ret = self.db.is_member(self.ns, self.group, b'not member')
assert ret is False
def test_group_size_function_success(self):
ret = self.db.group_size(self.ns, self.group)
assert ret == 0
self.db.add_member(self.ns, self.group, self.groupmembers)
ret = self.db.group_size(self.ns, self.group)
assert ret == len(self.groupmembers)
def test_fake_dict_backend_object_string_representation(self):
assert str(self.db) == str({'DB type': 'FAKE DB'})
def test_set_and_publish_function_success(self):
self.db.set_and_publish(self.ns, self.channels_and_events, self.dm)
ret = self.db.get(self.ns, self.keys)
assert ret == self.dm
assert self.db._queue.qsize() == 1
def test_set_if_and_publish_success(self):
self.db.set(self.ns, self.dm)
ret = self.db.set_if_and_publish(self.ns, self.channels_and_events, self.key, self.old_data,
self.new_data)
assert ret is True
ret = self.db.get(self.ns, self.keys)
assert ret == self.new_dm
assert self.db._queue.qsize() == 1
def test_set_if_and_publish_returns_false_if_existing_key_value_not_expected(self):
self.db.set_if_and_publish(self.ns, self.channels_and_events, self.key, self.old_data,
self.new_data)
self.db.set(self.ns, self.new_dm)
ret = self.db.set_if(self.ns, self.key, self.old_data, self.new_data)
assert ret is False
assert self.db._queue.qsize() == 0
def test_set_if_not_exists_and_publish_success(self):
ret = self.db.set_if_not_exists_and_publish(self.ns, self.channels_and_events, self.key,
self.new_data)
assert ret is True
ret = self.db.get(self.ns, self.keys)
assert ret == {self.key: self.new_data}
assert self.db._queue.qsize() == 1
def test_set_if_not_exists_and_publish_returns_false_if_key_already_exists(self):
self.db.set(self.ns, self.dm)
ret = self.db.set_if_not_exists_and_publish(self.ns, self.channels_and_events, self.key,
self.new_data)
assert ret is False
assert self.db._queue.qsize() == 0
def test_remove_and_publish_function_success(self):
self.db.set(self.ns, self.dm)
self.db.remove_and_publish(self.ns, self.channels_and_events, self.keys)
ret = self.db.get(self.ns, self.keys)
assert ret == dict()
assert self.db._queue.qsize() == 1
def test_remove_if_and_publish_success(self):
self.db.set(self.ns, self.dm)
ret = self.db.remove_if_and_publish(self.ns, self.channels_and_events, self.key,
self.old_data)
assert ret is True
ret = self.db.get(self.ns, self.keys)
assert ret == self.remove_dm
assert self.db._queue.qsize() == 1
def test_remove_if_and_publish_returns_false_if_data_does_not_match(self):
ret = self.db.remove_if_and_publish(self.ns, self.channels_and_events, self.key,
self.old_data)
assert ret is False
self.db.set(self.ns, self.dm)
ret = self.db.remove_if_and_publish(self.ns, self.channels_and_events, self.key,
self.new_data)
assert ret is False
assert self.db._queue.qsize() == 0
def test_remove_all_publish_success(self):
self.db.set(self.ns, self.dm)
self.db.remove_all_and_publish(self.ns, self.channels_and_events)
ret = self.db.get(self.ns, self.keys)
assert ret == dict()
assert self.db._queue.qsize() == 1
def test_subscribe_channel_success(self):
cb = Mock()
self.db.subscribe_channel(self.ns, cb, self.channels)
for channel in self.channels:
assert self.db._channel_cbs.get(channel, None)
assert not self.db._listen_thread.is_alive()
def test_subscribe_channel_event_loop_success(self):
cb = Mock()
self.db.start_event_listener()
self.db.subscribe_channel(self.ns, cb, self.channels)
for channel in self.channels:
assert self.db._channel_cbs.get(channel, None)
assert self.db._listen_thread.is_alive()
def test_unsubscribe_channel_success(self):
self.db.subscribe_channel(self.ns, Mock(), self.channels)
self.db.unsubscribe_channel(self.ns, [self.channels[0]])
assert self.db._channel_cbs.get(self.channels[0], None) is None
assert self.db._channel_cbs.get(self.channels[1], None)
def test_listen(self):
cb = Mock()
self.db.start_event_listener()
self.db.subscribe_channel(self.ns, cb, self.channels)
self.db._queue.put(("abs", "cbn"))
time.sleep(0.5)
assert self.db._queue.qsize() == 0
def test_start_event_listener_success(self):
self.db.start_event_listener()
assert self.db._run_in_thread
def test_start_event_listener_subscribe_first(self):
self.db._listen_thread.start = Mock()
mock_cb = Mock()
self.db._channel_cbs = {'abs': mock_cb}
self.db.subscribe_channel(self.ns, Mock(), self.channels)
self.db.start_event_listener()
self.db._listen_thread.start.assert_called_once()
def test_start_event_listener_fail(self):
self.db._listen_thread.is_alive = Mock()
self.db._listen_thread.is_alive.return_value = True
with pytest.raises(Exception):
self.db.start_event_listener()
def test_handle_events_success(self):
self.db._queue = Mock()
self.db._queue.get.return_value = ('abs', 'cbn')
mock_cb = Mock()
self.db._channel_cbs = {'abs': mock_cb}
assert self.db.handle_events() == ('abs', 'cbn')
mock_cb.assert_called_once_with('abs', 'cbn')
def test_handle_events_success_no_notification(self):
self.db._queue = Mock()
self.db._queue.get.side_effect = queue.Empty
assert self.db.handle_events() is None
def test_handle_events_fail_already_started(self):
self.db._listen_thread = Mock()
self.db._listen_thread.is_alive.return_value = True
with pytest.raises(Exception):
self.db.handle_events()
def test_handle_events_fail_already_set(self):
self.db._run_in_thread = True
with pytest.raises(Exception):
self.db.handle_events()
@pytest.fixture()
def fake_dict_backend_lock_fixture(request):
request.cls.ns = 'some-ns'
request.cls.lockname = 'some-lock-name'
request.cls.expiration = 10
request.cls.retry_interval = 0.1
request.cls.retry_timeout = 1
request.cls.configuration = Mock()
mock_conf_params = _Configuration.Params(db_host=None,
db_port=None,
db_sentinel_port=None,
db_sentinel_master_name=None,
db_cluster_addr_list=None,
db_type=DbBackendType.FAKE_DICT)
request.cls.configuration.get_params.return_value = mock_conf_params
request.cls.lock = ricsdl.backend.get_backend_lock_instance(request.cls.configuration,
request.cls.ns,
request.cls.lockname,
request.cls.expiration,
Mock())
@pytest.mark.usefixtures('fake_dict_backend_lock_fixture')
class TestFakeDictBackendLock:
def test_acquire_function_success(self):
ret = self.lock.acquire(self.retry_interval, self.retry_timeout)
assert ret is True
def test_acquire_function_returns_false_if_lock_is_not_acquired(self):
self.lock.acquire(self.retry_interval, self.retry_timeout)
ret = self.lock.acquire(self.retry_interval, self.retry_timeout)
assert ret is False
def test_release_function_success(self):
self.lock.acquire(self.retry_interval, self.retry_timeout)
ret = self.lock.acquire(self.retry_interval, self.retry_timeout)
assert ret is False
self.lock.release()
ret = self.lock.acquire(self.retry_interval, self.retry_timeout)
assert ret is True
def test_get_validity_time_function_success(self):
ret = self.lock.get_validity_time()
assert ret == self.expiration
def test_fake_dict_backend_lock_object_string_representation(self):
expected_lock_info = {'lock DB type': 'FAKE DB',
'lock namespace': 'some-ns',
'lock name': 'some-lock-name',
'lock status': 'unlocked'}
assert str(self.lock) == str(expected_lock_info)
| StarcoderdataPython |
1723591 | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'ui/image_dialog.ui'
#
# Created by: PyQt5 UI code generator 5.15.4
#
# WARNING: Any manual changes made to this file will be lost when pyuic5 is
# run again. Do not edit this file unless you know what you are doing.
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_imageDialog(object):
def setupUi(self, imageDialog):
imageDialog.setObjectName("imageDialog")
imageDialog.resize(400, 300)
imageDialog.setMaximumSize(QtCore.QSize(400, 300))
imageDialog.setSizeGripEnabled(False)
imageDialog.setModal(False)
self.buttonBox = QtWidgets.QDialogButtonBox(imageDialog)
self.buttonBox.setGeometry(QtCore.QRect(30, 240, 341, 32))
self.buttonBox.setOrientation(QtCore.Qt.Horizontal)
self.buttonBox.setStandardButtons(QtWidgets.QDialogButtonBox.Cancel|QtWidgets.QDialogButtonBox.Ok)
self.buttonBox.setObjectName("buttonBox")
self.retranslateUi(imageDialog)
self.buttonBox.accepted.connect(imageDialog.accept)
self.buttonBox.rejected.connect(imageDialog.reject)
QtCore.QMetaObject.connectSlotsByName(imageDialog)
def retranslateUi(self, imageDialog):
_translate = QtCore.QCoreApplication.translate
imageDialog.setWindowTitle(_translate("imageDialog", "Image Options"))
| StarcoderdataPython |
1723904 | from django.conf.urls import url
from states.api.views import (
StateAPIView,
StateDetailsAPIView,
)
urlpatterns = [
url(r'^$', StateAPIView.as_view(), name='states'),
url(r'^(?P<pk>[\w.@+-]+)/$', StateDetailsAPIView.as_view(), name='state-details'),
]
| StarcoderdataPython |
1678118 | <filename>tests/st/ops/cpu/test_arithmetic_op.py<gh_stars>1-10
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import numpy as np
import pytest
import mindspore.context as context
import mindspore.nn as nn
import mindspore
from mindspore import Tensor
from mindspore.ops import operations as P
context.set_context(mode=context.GRAPH_MODE, device_target='CPU')
class SubNet(nn.Cell):
def __init__(self):
super(SubNet, self).__init__()
self.sub = P.Sub()
def construct(self, x, y):
return self.sub(x, y)
class DivNet(nn.Cell):
def __init__(self):
super(DivNet, self).__init__()
self.div = P.Div()
def construct(self, x, y):
return self.div(x, y)
class FloorDivNet(nn.Cell):
def __init__(self):
super(FloorDivNet, self).__init__()
self.floor_div = P.FloorDiv()
def construct(self, x, y):
return self.floor_div(x, y)
class ModNet(nn.Cell):
def __init__(self):
super(ModNet, self).__init__()
self.mod = P.Mod()
def construct(self, x, y):
return self.mod(x, y)
class FloorModNet(nn.Cell):
def __init__(self):
super(FloorModNet, self).__init__()
self.floor_mod = P.FloorMod()
def construct(self, x, y):
return self.floor_mod(x, y)
@pytest.mark.level0
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_sub():
x = np.random.rand(2, 3, 4, 4).astype(np.float32)
y = np.random.rand(4, 1).astype(np.float32)
net = SubNet()
output = net(Tensor(x), Tensor(y, mindspore.float32))
expect_output = x - y
assert np.all(output.asnumpy() == expect_output)
# float64
x = np.random.rand(2, 3, 4, 4).astype(np.float64)
y = np.random.rand(4, 1).astype(np.float64)
net = SubNet()
output = net(Tensor(x), Tensor(y, mindspore.float64))
expect_output = x - y
assert np.all(output.asnumpy() == expect_output)
@pytest.mark.level0
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_div():
prop = 1 if np.random.random() < 0.5 else -1
x0_np = np.random.randint(1, 100, (2, 3, 4, 4)).astype(np.float32) * prop
y0_np = np.random.randint(1, 100, (2, 3, 4, 4)).astype(np.float32) * prop
x1_np = np.random.randint(1, 100, (2, 3, 4, 4)).astype(np.float32) * prop
y1_np = np.random.randint(1, 100, (2, 1, 4, 4)).astype(np.float32) * prop
x2_np = np.random.randint(1, 100, (2, 1, 1, 4)).astype(np.float16) * prop
y2_np = np.random.randint(1, 100, (2, 3, 4, 4)).astype(np.float16) * prop
x3_np = np.random.randint(1, 100, 1).astype(np.float32) * prop
y3_np = np.random.randint(1, 100, 1).astype(np.float32) * prop
x4_np = np.array(768).astype(np.float32) * prop
y4_np = np.array(3072.5).astype(np.float32) * prop
x5_np = np.random.randint(1, 100, (2, 1, 1, 4)).astype(np.int32) * prop
y5_np = np.random.randint(1, 100, (2, 3, 4, 4)).astype(np.int32) * prop
x6_np = np.random.randint(1, 100, (2, 3, 4, 4)).astype(np.int32) * prop
y6_np = np.random.randint(1, 100, (2, 3, 4, 4)).astype(np.float32) * prop
x7_np = np.random.randint(1, 100, (2, 1, 1, 4)).astype(np.int64) * prop
y7_np = np.random.randint(1, 100, (2, 3, 4, 4)).astype(np.int64) * prop
x8_np = np.random.randint(1, 100, (2, 3, 4, 4)).astype(np.float64) * prop
y8_np = np.random.randint(1, 100, (2, 1, 4, 4)).astype(np.float64) * prop
x0 = Tensor(x0_np)
y0 = Tensor(y0_np)
x1 = Tensor(x1_np)
y1 = Tensor(y1_np)
x2 = Tensor(x2_np)
y2 = Tensor(y2_np)
x3 = Tensor(x3_np)
y3 = Tensor(y3_np)
x4 = Tensor(x4_np)
y4 = Tensor(y4_np)
x5 = Tensor(x5_np)
y5 = Tensor(y5_np)
x6 = Tensor(x6_np)
y6 = Tensor(y6_np)
x7 = Tensor(x7_np)
y7 = Tensor(y7_np)
x8 = Tensor(x8_np)
y8 = Tensor(y8_np)
context.set_context(mode=context.GRAPH_MODE, device_target='CPU')
div = DivNet()
output0 = div(x0, y0)
expect0 = np.divide(x0_np, y0_np)
diff0 = output0.asnumpy() - expect0
error0 = np.ones(shape=expect0.shape) * 1.0e-5
assert np.all(diff0 < error0)
assert output0.shape == expect0.shape
output1 = div(x1, y1)
expect1 = np.divide(x1_np, y1_np)
diff1 = output1.asnumpy() - expect1
error1 = np.ones(shape=expect1.shape) * 1.0e-5
assert np.all(diff1 < error1)
assert output1.shape == expect1.shape
output2 = div(x2, y2)
expect2 = np.divide(x2_np, y2_np).astype(np.float16)
diff2 = output2.asnumpy() - expect2
error2 = np.ones(shape=expect2.shape) * 1.0e-5
assert np.all(diff2 < error2)
assert output2.shape == expect2.shape
output3 = div(x3, y3)
expect3 = np.divide(x3_np, y3_np)
diff3 = output3.asnumpy() - expect3
error3 = np.ones(shape=expect3.shape) * 1.0e-5
assert np.all(diff3 < error3)
assert output3.shape == expect3.shape
output4 = div(x4, y4)
expect4 = np.divide(x4_np, y4_np)
diff4 = output4.asnumpy() - expect4
error4 = np.ones(shape=expect4.shape) * 1.0e-5
assert np.all(diff4 < error4)
assert output4.shape == expect4.shape
output5 = div(x5, y5)
expect5 = x5_np // y5_np
assert np.all(output5.asnumpy() == expect5)
output6 = div(x6, y6)
expect6 = np.divide(x6_np, y6_np)
diff6 = output6.asnumpy() - expect6
error6 = np.ones(shape=expect6.shape) * 1.0e-5
assert np.all(diff6 < error6)
assert output6.shape == expect6.shape
output7 = div(x7, y7)
expect7 = np.divide(x7_np, y7_np).astype(np.int64)
assert np.all(output7.asnumpy() == expect7)
assert output7.shape == expect7.shape
output8 = div(x8, y8)
expect8 = np.divide(x8_np, y8_np)
diff8 = output8.asnumpy() - expect8
error8 = np.ones(shape=expect8.shape) * 1.0e-7
assert np.all(diff8 < error8)
assert output8.shape == expect8.shape
@pytest.mark.level0
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_floor_div():
prop = 1 if np.random.random() < 0.5 else -1
x0_np = np.random.randint(1, 100, (2, 3, 4, 4)).astype(np.float32) * prop
y0_np = np.random.randint(1, 100, (2, 1, 4, 4)).astype(np.float32) * prop
x1_np = np.random.randint(1, 100, (2, 1, 1, 4)).astype(np.float16) * prop
y1_np = np.random.randint(1, 100, (2, 3, 4, 4)).astype(np.float16) * prop
x2_np = np.random.randint(1, 100, (2, 1, 1, 4)).astype(np.int32) * prop
y2_np = np.random.randint(1, 100, (2, 3, 4, 4)).astype(np.int32) * prop
x3_np = np.random.randint(1, 100, (2, 3, 4, 4)).astype(np.int32) * prop
y3_np = np.random.randint(1, 100, (2, 3, 4, 4)).astype(np.float32) * prop
x4_np = np.random.randint(1, 100, (2, 1, 1, 4)).astype(np.int64) * prop
y4_np = np.random.randint(1, 100, (2, 3, 4, 4)).astype(np.int64) * prop
x5_np = np.random.randint(1, 100, (2, 3, 4, 4)).astype(np.float64) * prop
y5_np = np.random.randint(1, 100, (2, 1, 4, 4)).astype(np.float64) * prop
x0 = Tensor(x0_np)
y0 = Tensor(y0_np)
x1 = Tensor(x1_np)
y1 = Tensor(y1_np)
x2 = Tensor(x2_np)
y2 = Tensor(y2_np)
x3 = Tensor(x3_np)
y3 = Tensor(y3_np)
x4 = Tensor(x4_np)
y4 = Tensor(y4_np)
x5 = Tensor(x5_np)
y5 = Tensor(y5_np)
context.set_context(mode=context.GRAPH_MODE, device_target='CPU')
floor_div = FloorDivNet()
output0 = floor_div(x0, y0)
expect0 = np.floor_divide(x0_np, y0_np)
diff0 = output0.asnumpy() - expect0
error0 = np.ones(shape=expect0.shape) * 1.0e-5
assert np.all(diff0 < error0)
assert output0.shape == expect0.shape
output1 = floor_div(x1, y1)
expect1 = np.floor_divide(x1_np, y1_np)
diff1 = output1.asnumpy() - expect1
error1 = np.ones(shape=expect1.shape) * 1.0e-5
assert np.all(diff1 < error1)
assert output1.shape == expect1.shape
output2 = floor_div(x2, y2)
expect2 = np.floor_divide(x2_np, y2_np).astype(np.float16)
diff2 = output2.asnumpy() - expect2
error2 = np.ones(shape=expect2.shape) * 1.0e-5
assert np.all(diff2 < error2)
assert output2.shape == expect2.shape
output3 = floor_div(x3, y3)
expect3 = np.floor_divide(x3_np, y3_np)
diff3 = output3.asnumpy() - expect3
error3 = np.ones(shape=expect3.shape) * 1.0e-5
assert np.all(diff3 < error3)
assert output3.shape == expect3.shape
output4 = floor_div(x4, y4)
expect4 = np.floor_divide(x4_np, y4_np)
diff4 = output4.asnumpy() - expect4
error4 = np.ones(shape=expect4.shape) * 1.0e-5
assert np.all(diff4 < error4)
assert output4.shape == expect4.shape
output5 = floor_div(x5, y5)
expect5 = np.floor_divide(x5_np, y5_np)
diff5 = output5.asnumpy() - expect5
error5 = np.ones(shape=expect5.shape) * 1.0e-7
assert np.all(diff5 < error5)
assert output5.shape == expect5.shape
@pytest.mark.level0
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_mod():
prop = 1 if np.random.random() < 0.5 else -1
x0_np = np.random.randint(1, 100, (2, 3, 4, 4)).astype(np.float32) * prop
y0_np = np.random.randint(1, 100, (2, 3, 4, 4)).astype(np.float32) * prop
x1_np = np.random.randint(1, 100, (2, 3, 4, 4)).astype(np.float32) * prop
y1_np = np.random.randint(1, 100, (2, 1, 4, 4)).astype(np.float32) * prop
x2_np = np.random.randint(1, 100, (2, 1, 1, 4)).astype(np.float16) * prop
y2_np = np.random.randint(1, 100, (2, 3, 4, 4)).astype(np.float16) * prop
x3_np = np.random.randint(1, 100, 1).astype(np.float32) * prop
y3_np = np.random.randint(1, 100, 1).astype(np.float32) * prop
x4_np = np.array(768).astype(np.float32) * prop
y4_np = np.array(3072.5).astype(np.float32) * prop
x5_np = np.random.randint(1, 100, (2, 1, 1, 4)).astype(np.int32) * prop
y5_np = np.random.randint(1, 100, (2, 3, 4, 4)).astype(np.int32) * prop
x6_np = np.random.randint(1, 100, (2, 3, 4, 4)).astype(np.int32) * prop
y6_np = np.random.randint(1, 100, (2, 3, 4, 4)).astype(np.float32) * prop
x7_np = np.random.randint(1, 100, (2, 1, 1, 4)).astype(np.int64) * prop
y7_np = np.random.randint(1, 100, (2, 3, 4, 4)).astype(np.int64) * prop
x0 = Tensor(x0_np)
y0 = Tensor(y0_np)
x1 = Tensor(x1_np)
y1 = Tensor(y1_np)
x2 = Tensor(x2_np)
y2 = Tensor(y2_np)
x3 = Tensor(x3_np)
y3 = Tensor(y3_np)
x4 = Tensor(x4_np)
y4 = Tensor(y4_np)
x5 = Tensor(x5_np)
y5 = Tensor(y5_np)
x6 = Tensor(x6_np)
y6 = Tensor(y6_np)
x7 = Tensor(x7_np)
y7 = Tensor(y7_np)
context.set_context(mode=context.GRAPH_MODE, device_target='CPU')
mod = ModNet()
output0 = mod(x0, y0)
expect0 = np.mod(x0_np, y0_np)
diff0 = output0.asnumpy() - expect0
error0 = np.ones(shape=expect0.shape) * 1.0e-5
assert np.all(diff0 < error0)
assert output0.shape == expect0.shape
output1 = mod(x1, y1)
expect1 = np.mod(x1_np, y1_np)
diff1 = output1.asnumpy() - expect1
error1 = np.ones(shape=expect1.shape) * 1.0e-5
assert np.all(diff1 < error1)
assert output1.shape == expect1.shape
output2 = mod(x2, y2)
expect2 = np.mod(x2_np, y2_np).astype(np.float16)
diff2 = output2.asnumpy() - expect2
error2 = np.ones(shape=expect2.shape) * 1.0e-5
assert np.all(diff2 < error2)
assert output2.shape == expect2.shape
output3 = mod(x3, y3)
expect3 = np.mod(x3_np, y3_np)
diff3 = output3.asnumpy() - expect3
error3 = np.ones(shape=expect3.shape) * 1.0e-5
assert np.all(diff3 < error3)
assert output3.shape == expect3.shape
output4 = mod(x4, y4)
expect4 = np.mod(x4_np, y4_np)
diff4 = output4.asnumpy() - expect4
error4 = np.ones(shape=expect4.shape) * 1.0e-5
assert np.all(diff4 < error4)
assert output4.shape == expect4.shape
output5 = mod(x5, y5)
expect5 = np.mod(x5_np, y5_np)
assert np.all(output5.asnumpy() == expect5)
assert output5.shape == expect5.shape
output6 = mod(x6, y6)
expect6 = np.mod(x6_np, y6_np)
diff6 = output6.asnumpy() - expect6
error6 = np.ones(shape=expect6.shape) * 1.0e-5
assert np.all(diff6 < error6)
assert output6.shape == expect6.shape
output7 = mod(x7, y7)
expect7 = np.mod(x7_np, y7_np).astype(np.int64)
assert np.all(output7.asnumpy() == expect7)
assert output6.shape == expect6.shape
@pytest.mark.level0
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_floor_mod():
prop = 1 if np.random.random() < 0.5 else -1
x0_np = np.random.randint(1, 100, (2, 3, 4, 4)).astype(np.float32) * prop
y0_np = np.random.randint(1, 100, (2, 3, 4, 4)).astype(np.float32) * prop
x1_np = np.random.randint(1, 100, (2, 3, 4, 4)).astype(np.float32) * prop
y1_np = np.random.randint(1, 100, (2, 1, 4, 4)).astype(np.float32) * prop
x2_np = np.random.randint(1, 100, (2, 1, 1, 4)).astype(np.float16) * prop
y2_np = np.random.randint(1, 100, (2, 3, 4, 4)).astype(np.float16) * prop
x3_np = np.random.randint(1, 100, 1).astype(np.float32) * prop
y3_np = np.random.randint(1, 100, 1).astype(np.float32) * prop
x4_np = np.array(768).astype(np.float32) * prop
y4_np = np.array(3072.5).astype(np.float32) * prop
x5_np = np.random.randint(1, 100, (2, 1, 1, 4)).astype(np.int32) * prop
y5_np = np.random.randint(1, 100, (2, 3, 4, 4)).astype(np.int32) * prop
x6_np = np.random.randint(1, 100, (2, 3, 4, 4)).astype(np.int32) * prop
y6_np = np.random.randint(1, 100, (2, 3, 4, 4)).astype(np.float32) * prop
x7_np = np.random.randint(1, 100, (2, 1, 1, 4)).astype(np.int64) * prop
y7_np = np.random.randint(1, 100, (2, 3, 4, 4)).astype(np.int64) * prop
x0 = Tensor(x0_np)
y0 = Tensor(y0_np)
x1 = Tensor(x1_np)
y1 = Tensor(y1_np)
x2 = Tensor(x2_np)
y2 = Tensor(y2_np)
x3 = Tensor(x3_np)
y3 = Tensor(y3_np)
x4 = Tensor(x4_np)
y4 = Tensor(y4_np)
x5 = Tensor(x5_np)
y5 = Tensor(y5_np)
x6 = Tensor(x6_np)
y6 = Tensor(y6_np)
x7 = Tensor(x7_np)
y7 = Tensor(y7_np)
context.set_context(mode=context.GRAPH_MODE, device_target='CPU')
floor_mod = FloorModNet()
output0 = floor_mod(x0, y0)
expect0 = np.mod(x0_np, y0_np)
diff0 = output0.asnumpy() - expect0
error0 = np.ones(shape=expect0.shape) * 1.0e-5
assert np.all(diff0 < error0)
assert output0.shape == expect0.shape
output1 = floor_mod(x1, y1)
expect1 = np.mod(x1_np, y1_np)
diff1 = output1.asnumpy() - expect1
error1 = np.ones(shape=expect1.shape) * 1.0e-5
assert np.all(diff1 < error1)
assert output1.shape == expect1.shape
output2 = floor_mod(x2, y2)
expect2 = np.mod(x2_np, y2_np).astype(np.float16)
diff2 = output2.asnumpy() - expect2
error2 = np.ones(shape=expect2.shape) * 1.0e-5
assert np.all(diff2 < error2)
assert output2.shape == expect2.shape
output3 = floor_mod(x3, y3)
expect3 = np.mod(x3_np, y3_np)
diff3 = output3.asnumpy() - expect3
error3 = np.ones(shape=expect3.shape) * 1.0e-5
assert np.all(diff3 < error3)
assert output3.shape == expect3.shape
output4 = floor_mod(x4, y4)
expect4 = np.mod(x4_np, y4_np)
diff4 = output4.asnumpy() - expect4
error4 = np.ones(shape=expect4.shape) * 1.0e-5
assert np.all(diff4 < error4)
assert output4.shape == expect4.shape
output5 = floor_mod(x5, y5)
expect5 = np.mod(x5_np, y5_np)
assert np.all(output5.asnumpy() == expect5)
assert output5.shape == expect5.shape
output6 = floor_mod(x6, y6)
expect6 = np.mod(x6_np, y6_np)
diff6 = output6.asnumpy() - expect6
error6 = np.ones(shape=expect6.shape) * 1.0e-5
assert np.all(diff6 < error6)
assert output6.shape == expect6.shape
output7 = floor_mod(x7, y7)
expect7 = np.mod(x7_np, y7_np).astype(np.int64)
assert np.all(output7.asnumpy() == expect7)
assert output6.shape == expect6.shape
test_sub()
test_div()
test_floor_div()
test_mod()
test_floor_mod()
| StarcoderdataPython |
3353793 | import os
import pytest
import time
from django.core.files.base import ContentFile
from django.core.files.storage import default_storage
from django.utils.translation import gettext as _
from pretix.presale.style import regenerate_css
from pretix.presale.style import regenerate_css
from ..utils import screenshot
@pytest.fixture
def items(event, tax_rule):
i1 = event.items.create(name=_('Business Ticket'), default_price=400, admission=True, tax_rule=tax_rule,
active=True, position=2)
i2 = event.items.create(name=_('Individual Ticket'), default_price=250, admission=True, tax_rule=tax_rule,
active=True, position=1)
i3 = event.items.create(name=_('VIP Ticket'), default_price=600, admission=True, tax_rule=tax_rule,
active=True, position=3)
c = event.categories.create(name=_('Merchandise'))
i4 = event.items.create(name=_('T-Shirt'), default_price=25, admission=True, tax_rule=tax_rule,
active=True, category=c)
v1 = i4.variations.create(value=_('S'))
v2 = i4.variations.create(value=_('M'))
v4 = i4.variations.create(value=_('L'), default_price=30)
wc = event.categories.create(name=_('Workshops'))
wc1 = event.items.create(name=_('Workshop session: Digital future'), default_price=12, active=True, category=wc)
wc2 = event.items.create(name=_('Workshop session: Analog future'), default_price=12, active=True, category=wc)
i1.addons.create(addon_category=wc, min_count=0, max_count=2)
q1 = event.quotas.create(name=_('Available'), size=100)
q1.items.add(i1)
q1.items.add(i2)
q1.items.add(i4)
q1.items.add(wc1)
q1.items.add(wc2)
q1.variations.add(v1)
q1.variations.add(v2)
q1.variations.add(v4)
q2 = event.quotas.create(name=_('Unavailable'), size=0)
q2.items.add(i3)
return [i1, i2, i3, i4, wc1, wc2]
SCREEN = None
@pytest.yield_fixture(params=["wide", "desktop", "mobile"])
def chrome_options(request, chrome_options):
global SCREEN
SCREEN = request.param
chrome_options._arguments.remove('window-size=1366x768')
chrome_options.add_argument('headless')
if SCREEN == "wide":
chrome_options.add_argument('window-size=1920x1080')
elif SCREEN == "desktop":
chrome_options.add_argument('window-size=1024x768')
elif SCREEN == "mobile":
chrome_options.add_experimental_option('mobileEmulation', {'deviceName': 'Pixel 2'})
try:
yield chrome_options
finally:
SCREEN = None
return chrome_options
@pytest.yield_fixture(params=["stock", "custom_round", "custom_sharp"])
def color_opts(request, event):
if request.param == "custom_round":
event.settings.primary_color = '#ed0808'
event.settings.theme_color_background = '#b20707'
regenerate_css.apply(args=(event.pk,))
elif request.param == "custom_sharp":
event.settings.primary_color = '#ed0808'
event.settings.theme_color_background = '#000000'
event.settings.theme_round_borders = False
regenerate_css.apply(args=(event.pk,))
return request.param
@pytest.yield_fixture(params=["nolink", "link"])
def organizer_link_back(request, event):
event.settings.organizer_link_back = (request.param == "link")
return request.param
@pytest.yield_fixture(params=["en", "de,en", "ar,de,en,fr"])
def lang_opts(request, event):
event.settings.locales = request.param.split(',')
event.settings.locale = request.param.split(',')[0]
return request.param
@pytest.yield_fixture(params=["largeheader_title", "largeheader", "smallheader_title", "smallheader", "logo", "logo_title", "title"])
def pic_opts(request, event):
if "largeheader" in request.param:
value = open(os.path.join(os.path.dirname(__file__), "../../assets/eventheader_large.jpg"), "rb")
newname = default_storage.save('logo.jpg', ContentFile(value.read()))
event.settings.logo_image = 'file://' + newname
event.settings.logo_image_large = True
elif "smallheader" in request.param:
value = open(os.path.join(os.path.dirname(__file__), "../../assets/eventheader_small.jpg"), "rb")
newname = default_storage.save('logo.jpg', ContentFile(value.read()))
event.settings.logo_image = 'file://' + newname
elif "logo" in request.param:
value = open(os.path.join(os.path.dirname(__file__), "../../assets/ticketshoplive_logo.png"), "rb")
newname = default_storage.save('logo.jpg', ContentFile(value.read()))
event.settings.logo_image = 'file://' + newname
return request.param
@pytest.mark.django_db
def shot_shop_frontpage(live_server, organizer, event, items, color_opts, lang_opts, pic_opts,
client, organizer_link_back):
event.live = True
event.save()
event.settings.waiting_list_enabled = True
event.settings.waiting_list_auto = True
client.get(live_server.url + '/{}/{}/'.format(
organizer.slug, event.slug
))
client.find_element_by_css_selector("button[data-toggle=variations]")
client.execute_script("window.scrollTo(0, 0)")
time.sleep(1)
screenshot(client, 'style/shop_frontpage_{}.png'.format('_'.join([
SCREEN, color_opts, pic_opts, lang_opts, organizer_link_back
])))
| StarcoderdataPython |
1608912 | # -*- coding: utf-8 -*-
"""Enigma Machine Simulator.
This module is a Python implementation of the Enigma Machine.
Ran as a script it starts a simulation and allows a user to configure and use
their own Enigma Machine.
"""
import string
from time import sleep
class EnigmaMachine:
"""
A class to represent an Enigma Machine.
It comprises a set of rotors (usually three), a reflector, and an optional
plugboard.
Attributes
----------
rotors : lst
List of Enigma Rotors (list of EnigmaMachine.Rotor() object)
plugboard : EnigmaMachine.Plugboard()
Plugboard object
reflector : EnigmaMachine.Reflector()
Reflector object
"""
def __init__(self,
# Default settings for Enigma Mk I.
rotor_types=['I', 'II', 'III'],
rotor_positions='DEF',
ring_settings='ABC',
reflector_mapping='B',
steckered_pairing='AM FI NV PS TU WZ'):
"""
Initialises an EnigmaMachine with rotors, reflector and plugboard.
Args:
rotor_types (lst): List of types of rotor in the machine
rotor_positions (str): Initial positions of the rotors
ring_settings (str): Ring settings of the rotors
reflector_mapping (str): Requesite information to initialise a
EnigmaMachine.Reflector object
steckered_pairing (str): Requesite information to initialise a
EnigmaMachine.Plugboard object. If False
then plugboard is set at ""
(which is equivalent to no plugboard).
"""
# Initialise rotors from config.
if len(rotor_types) != len(ring_settings):
raise ValueError('Number of ring settings must match with number '
'of rotors')
elif len(rotor_types) != len(rotor_positions):
raise ValueError('Number of rotor positions must match with '
'number of rotors')
self.rotors = []
for i in range(len(rotor_types)):
self.rotors.append(EnigmaMachine.Rotor(
rotor_type=rotor_types[i],
position=rotor_positions[i],
ring_setting=ring_settings[i]))
# Provides the option of having no plugboard.
if not steckered_pairing:
steckered_pairing = ''
self.plugboard = EnigmaMachine.Plugboard(steckered_pairing)
self.reflector = EnigmaMachine.Reflector(reflector_mapping)
def __repr__(self):
"""
String representation of an EnigmaMachine used for debugging.
"""
return (f'EnigmaMachine(rotors={self.rotors}, '
f'plugboard={self.plugboard}, reflector={self.reflector}, '
f'rotor_config={self.rotor_config})')
def __str__(self):
"""
String representation of an EnigmaMachine used for printing to console.
"""
if self.plugboard.steckered_pairing == 'ABCDEFGHIJKLMNOPQRSTUVWXYZ':
pb_str = 'no plugboard'
else:
pb_str = 'a plugboard'
return (f'Enigma Machine with {len(self.rotors)} rotors, a reflector'
f' and {pb_str}.')
@staticmethod
def letter_to_number(letter):
"""
Converts a letter to its integer in the alphabet from 0 to 25.
Arguments:
letter (str): Letter to convert.
Returns:
number (int): Integer between 0 and 25 corresponding to letter's
place in the alphabet.
Raises:
ValueError if input is not a letter.
"""
if type(letter) != str:
raise ValueError('Input not a string')
if not letter.isalpha() or len(letter) != 1:
raise ValueError('Input should be a single letter')
letter = str.upper(letter)
number = string.ascii_uppercase.index(letter)
return number
@staticmethod
def number_to_letter(number):
"""
Finds the letter from the alphabet corresponding to number.
Example: number_to_letter(0) = 'A'
Arguments:
number (int): Place in alphabet of letter to find.
Returns:
letter (int): Upper case letter of alphabet.
Raises:
ValueError if input is not a number between 0 and 25.
"""
if type(number) != int or not 0 <= number < 26:
raise ValueError('Input should be an integer between 0 and 25')
letter = chr(number + 65)
return letter
@staticmethod
def caeser_shift(letter, n):
"""
Shifts a letter up the alphabet by n. (Known as a "Caeser Shift").
Example: caeser_shift('L', 2) = 'N'
Arguments:
letter (str): Letter to transpose.
n (int): Number of places to shift by.
Returns:
shifted_letter (int): Letter after Caeser Shift has been performed.
Raises:
ValueError if input is not a letter.
"""
if type(letter) != str:
raise ValueError('Input should be a string')
elif not (letter.isalpha() and len(letter) == 1):
raise ValueError('Input should be a single letter')
letter = str.upper(letter)
shifted_letter = chr((ord(letter) - 65 + n) % 26 + 65)
return shifted_letter
def turn_rotor_assembly(self):
"""
Turns the rotor assembly as a whole. This is performed by firstly
rotating the right-most rotor one place. The other rotors are then
rotated if the rotor to their immediate right was in its "notch"
position. This is calcuated for each rotor in the assembly.
Example: Rotor 1 has position 'A' and notch at 'G'
Rotor 2 has position 'B' and notch at 'H'
Rotor 3 has position 'E' and notch at 'E'
Then Rotor 3 is turned automatically, but as it was in its notch
position (notch == position) then Rotor 2 is also turned. Rotor 1 is
not turned because Rotor 2 was not at its notch position.
"""
for i in range(len(self.rotors) - 1):
rotor = self.rotors[i]
adjacent_rotor = self.rotors[i + 1]
if adjacent_rotor.notch == adjacent_rotor.position:
rotor.turn_rotor()
if i < len(self.rotors) - 2:
adjacent_rotor.turn_rotor()
# Last rotor always turns.
self.rotors[-1].turn_rotor()
def press_key(self, letter):
"""
Emulates the pressing of a key on the Enigma keyboard. Every time a
key is pressed, the rotor assembly is turned. This is one of the key
properties of the EnigmaMachine.
Arguments:
letter (str): Letter pressed.
Returns:
encrypted_letter (str): The letter that would be lit on the Enigma
lampboard. after pressing the "letter" key.
(In other words, what "letter" was encrypted to).
Raises:
ValueError if input is not a letter of length one.
(You can't press two keys at once otherwise you'll be disciplined
by your senior officer in the Wehrmacht!).
"""
if type(letter) != str or len(letter) != 1:
raise ValueError
# As soon as a letter is pressed, the rotor assembly turns.
self.turn_rotor_assembly()
# The first stage is the current goes through the plugboard.
encrypted_letter = self.plugboard.map_letter(letter)
encrypted_letter = encrypted_letter
# The current flows through the rotors in reverse order to begin with
# (so right-most rotor first moving leftwards).
for rotor in reversed(self.rotors):
encrypted_letter = rotor.map_letter(encrypted_letter)
# Current now passes through the reflector.
encrypted_letter = self.reflector.map_letter(encrypted_letter)
# Current now passes back through the rotors, but starting from the
# left this time (so rotor 1 -> rotor 2 -> rotor 3...).
for rotor in self.rotors:
encrypted_letter = rotor.map_letter(encrypted_letter,
reverse=True)
# Current finally flows back through the plugboard and lights up a
# character to show you the encrypted letter. Exciting!
encrypted_letter = self.plugboard.map_letter(encrypted_letter)
return encrypted_letter
def encrypt_message(self, message):
"""
Encrypts a message using the EnigmaMachine by consecutively pressing
each key of the message to be encrypted. It skips any characters that
aren't alphabetical such as punctuation, numbers etc.
Arguments:
message (str): Message to be encrypted.
Returns:
encrypted_message (str): The encrypted message.
Raises:
ValueError if input is not a string.
"""
if type(message) != str:
raise ValueError
encrypted_message = ''
for letter in message:
if letter.isalpha():
encrypted_message = encrypted_message + self.press_key(letter)
else:
encrypted_message = encrypted_message + letter
return encrypted_message
class Rotor:
"""
A class to represent an Enigma Rotor.
A rotor has a letter mapping, a position, a ring setting and a notch.
The letter mapping is essentially the internal wiring of the rotor, it
tells you which letter is mapped to which.
The position is one of 26 rotary positions of the rotor in the machine.
If you turn the rotor, then this position is changed.
Ring settings are similar to the position, but they affect the position
of the internal wiring of the rotor, relative to the rotor itself.
The notch is a mechanical device which can turn the rotor immediately
to the left of this rotor.
Attributes:
rotor_type: (str)
A Roman Numeral expressing the rotor type (I-V).
position: (str)
A letter of the alphabet denoting the position of the rotor.
ring_setting: (str)
A letter of the alphabet denoting the ring setting of the
rotor.
notch: (str)
A letter of the alphabet denoting the position of the notch
of the rotor.
"""
def __init__(self, rotor_type='I', position='A', ring_setting='A'):
"""
Initialises a Rotor from an EnigmaMachine. Also updates the mapping
with the appropriate shift from the rotor's ring setting.
Args:
mapping: (str)
A string of length 26 that says what each letter is mapped to.
position: (str)
A letter of the alphabet denoting the position of the
rotor.
ring_setting: (str)
A letter of the alphabet denoting the ring setting of the
rotor.
notch: (str)
A letter of the alphabet denoting the position of the notch
of the rotor.
Raises:
ValueError if mapping is not a valid alphabet mapping.
ValueError if any other attribute is not an alphabetic
character of length one
"""
if rotor_type == 'I':
self.mapping = 'EKMFLGDQVZNTOWYHXUSPAIBRCJ'
self.notch = 'Q'
elif rotor_type == 'II':
self.mapping = 'AJDKSIRUXBLHWTMCQGZNPYFVOE'
self.notch = 'E'
elif rotor_type == 'III':
self.mapping = 'BDFHJLCPRTXVZNYEIWGAKMUSQO'
self.notch = 'V'
elif rotor_type == 'IV':
self.mapping = 'ESOVPZJAYQUIRHXLNFTGKDCMWB'
self.notch = 'J'
elif rotor_type == 'V':
self.mapping = 'VZBRGITYUPSDNHLXAWMJQOFECK'
self.notch = 'Z'
else:
raise ValueError('Must choose a rotor type I - V')
self.position = position
self.ring_setting = ring_setting
for rotor_item in [position, ring_setting]:
if type(rotor_item) != str:
raise ValueError('Attribute must be a string')
elif not rotor_item.isalpha():
raise ValueError('Attribute must be alphabetic')
elif len(rotor_item) != 1:
raise ValueError('Attribute must have length 1')
# Apply the ring setting to the rotor which changes its mapping.
self.apply_ring_setting(ring_setting)
def __str__(self):
"""
String representation of a Rotor used for printing to console.
"""
return (f'A Rotor for an Enigma Machine with letter mapping "'
f'{self.mapping}", ring setting "{self.ring_setting}", '
f'position "{self.position}", and a '
f'notch at position "{self.notch}"')
def turn_rotor(self):
"""
Turns the rotor.
Example: If a rotor is in position "F" then after turning
it will be in position "G".
"""
# Translate letter position to number.
position_nbr = EnigmaMachine.letter_to_number(self.position)
# Get new letter position by adding 1.
new_rotor_nbr = (position_nbr + 1) % 26
self.position = EnigmaMachine.number_to_letter(new_rotor_nbr)
def apply_ring_setting(self, letter):
"""
The ring setting (or Ringstellung) changes the internal wiring of
the rotor.
All letters in the mapping are shifted by the number corresponding
to the ring setting.
Example: rotor.apply_ring_setting('A') would have no effect on the
rotor (as A->0).
rotor.apply_ring_setting('B') would perform a Caeser shift by 1 to
each letter in rotor.mapping (so "EKMFLGDQVZNTOWYHXUSPAIBRCJ"
becomes "FLNGMHERWAOUPVZIYWTQBJCSDK").
"""
shift = EnigmaMachine.letter_to_number(letter)
shifted_mapping = ''
for letter in self.mapping:
shifted_mapping = shifted_mapping + \
EnigmaMachine.caeser_shift(letter, shift)
# Adding positional shift due to ring settings.
shifted_mapping = [char for char in shifted_mapping]
shifted_mapping = \
[shifted_mapping[(i - shift) % 26] for i in range(26)]
self.mapping = "".join(shifted_mapping)
def map_letter(self, letter, reverse=False):
"""
Given a letter input, finds the letter it would be mapped to
through the rotor wiring. Takes into account the rotor position
as well as the underlying mapping. Can also do the reverse of this
for when the current passes back through the rotor.
Arguments:
letter (str): Letter to be mapped.
reverse (bool): Whether the current is going in reverse back
through the rotor. It does this after it goes
through the reflector.
Returns:
mapped_letter (str): Resulting letter after passing through
the rotor wiring.
Raises:
ValueError if input is not a string of length 1.
"""
if type(letter) != str or len(letter) != 1:
raise ValueError
letter = str.upper(letter)
# Add the transformation due to the rotor position.
initial = EnigmaMachine.letter_to_number(self.position)
if reverse:
letter = EnigmaMachine.caeser_shift(letter, initial)
mapped_letter = EnigmaMachine.number_to_letter((
self.mapping.index(letter) - initial) % 26)
else:
position = \
(initial + EnigmaMachine.letter_to_number(letter)) % 26
# Take into account the offset caused by the rotor being in
# a different position.
mapped_letter = \
EnigmaMachine.caeser_shift(self.mapping[position],
-initial)
return mapped_letter
class Reflector:
"""
A class to represent an Enigma Reflector.
The reflector pairs up two letters and is used in between the first and
second rotor passes.
The reflector enables the same Enigma machine to both encrypt and
decrypt messages without having to change any settings, however also
raises a key cryptographic weakness that the cryptoanalysts at
Bletchley Park were able to exploit.
Attributes:
reflector_mapping: (str)
A string of length 26 that says what each letter is mapped to.
A mapping must pair up letters (so if A -> Y, then Y-> A).
Can also be one of three standard reflectors (A, B or C).
"""
def __init__(self, reflector_mapping='A'):
"""
Initialises a Reflector from an EnigmaMachine.
Args:
reflector_mapping (str):
'A', 'B' or 'C' which correspond to the respective standard
reflectors used in the Enigma Machine.
Can alternativelybe a string of length 26 that says which
each letter is mapped to.
A mapping must pair up letters (so if A -> Y, then
Y-> A).
Raises:
ValueError if mapping does not map letter pairs.
"""
if reflector_mapping.upper() == 'A':
self.reflector_mapping = 'EJMZALYXVBWFCRQUONTSPIKHGD'
elif reflector_mapping.upper() == 'B':
self.reflector_mapping = 'YRUHQSLDPXNGOKMIEBFZCWVJAT'
elif reflector_mapping.upper() == 'C':
self.reflector_mapping = 'FVPJIAOYEDRZXWGCTKUQSBNMHL'
else:
self.reflector_mapping = reflector_mapping
if type(self.reflector_mapping) != str or \
len(set(self.reflector_mapping)) != 26:
raise ValueError('Invalid reflector mapping')
for i in range(len(self.reflector_mapping)):
if EnigmaMachine.letter_to_number(self.reflector_mapping[i]) \
!= self.reflector_mapping.index(
EnigmaMachine.number_to_letter(i)):
raise ValueError('Reflector must have matching pairs, '
f'check letter '
f'"{self.reflector_mapping[i]}"')
def __str__(self):
"""
String representation of a Reflector used for printing to console.
"""
return (f'A reflector for an Enigma Machine with mapping'
f' "{self.reflector_mapping}".')
def map_letter(self, letter):
"""
Gives the corresponding letter pair of a reflector.
Arguments:
letter (str): Letter to be mapped.
Returns:
mapped_letter (str): Resulting letter after passing through
the reflector.
Raises:
ValueError if input is not a string of length 1.
"""
if type(letter) != str or len(letter) != 1:
raise ValueError('Input must be a single character.')
letter = str.upper(letter)
position = EnigmaMachine.letter_to_number(letter) % 26
mapped_letter = self.reflector_mapping[position]
return mapped_letter
class Plugboard:
"""
A class to represent an Enigma Plugboard.
The plugboard gives another layer of encryption to the Enigma Machine.
It connects (usually 10) pairs of letters and this pairing is done
at both the beginning and end of the encryption mechanism. The pairing
is called a "steckered pairing" (from the German word for plugboard
"Steckerbrett".)
Without the plugboard, it was feasible to break the Enigma code using
calculations by hand. With the introduction of the plugboard,
cryptoanalysts needed the use of the earliest computers to help crack
the code.
Attributes:
steckered_pairing: (str)
A string of pairs of letters delimited by a space that encodes
letter pairing.
"""
def __init__(self, steckered_pairing='AM FI NV PS TU WZ'):
"""
Initialises a Plugboard from an EnigmaMachine.
Args:
reflector_mapping (str):
A string of pairs of letters delimited by a space that
encodes letter pairing.
Raises:
ValueError if mapping does not map letter pairs.
"""
self.steckered_pairing = steckered_pairing
error = ('Steckered pairing must be unique pairs of letters '
'seperated by a space.')
if type(steckered_pairing) != str:
raise ValueError(error)
steckered_pairing_no_spaces = steckered_pairing.replace(' ', '')
# Needs to only be letters.
if steckered_pairing != '':
if not steckered_pairing_no_spaces.isalpha() \
or len(steckered_pairing) % 3 != 2 \
or len(set(steckered_pairing_no_spaces)) != \
len(steckered_pairing_no_spaces):
raise ValueError(error)
# Needs to be pairs of letters.
for i in range(len(steckered_pairing)):
if i % 3 == 2 and steckered_pairing[i] != ' ':
raise ValueError(error)
elif i % 3 != 2 and not steckered_pairing[i].isalpha():
raise ValueError(error)
def __str__(self):
return (f'A plugboard for an Enigma Machine with steckered '
f'pairing "{self.steckered_pairing}".')
def map_letter(self, letter):
"""
Gives the corresponding letter pair of a plugboard.
Arguments:
letter (str): Letter to be mapped.
Returns:
mapped_letter (str): Resulting letter after passing through
the plugboard.
Raises:
ValueError if input is not a string of length 1.
"""
if type(letter) != str or len(letter) != 1:
raise ValueError
letter = str.upper(letter)
pairing = self.steckered_pairing
if letter not in pairing:
mapped_letter = letter
else:
position = pairing.index(letter)
if pairing.index(letter) == len(pairing) - 1:
mapped_letter = pairing[position - 1]
elif pairing[position + 1] == ' ':
mapped_letter = pairing[position - 1]
else:
mapped_letter = pairing[position + 1]
return mapped_letter
if __name__ == '__main__':
print('Welcome to the Enigma Simulator!')
sleep(1)
print('Here you can configure very own Enigma Machine and use it to '
'encrypt messages.')
sleep(1)
configure_enigma = input('Enter "N" to skip configuration and use default '
'settings or instead press enter to go ahead '
'and configure your machine. ')
if str.upper(configure_enigma) == 'N':
print('Default machine selected.')
EM = EnigmaMachine()
else:
print('Time to configure the machine!')
successful_input = False
while not successful_input:
num_rotor = input('Firstly, enter the number of rotors in your '
'machine: ')
try:
num_rotor = int(num_rotor)
successful_input = True
except ValueError as err:
print(f'Number must be a positive integer. {err}')
rotor_types = []
rotor_positions = ''
ring_settings = ''
for i in range(num_rotor):
print(f'Let\'s configure rotor {i+1}')
successful_input = False
while not successful_input:
rotor_type = input(
'Enter the rotor type (Roman numeral I - V) for rotor '
f'{i+1}: ')
rotor_position = str.upper(input(
f'Enter the rotor position (single letter A-Z) for rotor '
f'{i+1}: '))
ring_setting = str.upper(input(
'Enter the ring setting (single letter A-Z) for rotor '
f'{i+1}: '))
try:
EnigmaMachine.Rotor(rotor_type, rotor_position,
ring_setting)
successful_input = True
except ValueError as err:
print(f'Error in input for rotor {i+1}: {err}.\n'
'Let\'s try again.')
rotor_types.append(rotor_type)
rotor_positions = rotor_positions + rotor_position
ring_settings = ring_settings + ring_setting
successful_input = False
while not successful_input:
reflector_mapping = str.upper(input(
'Enter a reflector (A, B or C): '))
try:
EnigmaMachine.Reflector(reflector_mapping)
successful_input = True
except ValueError as err:
print(f'Error in input for reflector: {err} Let\'s try '
'again')
successful_input = False
while not successful_input:
steckered_pairing = str.upper(input(
'Finally, enter a steckered pairing (pairs of letters or N '
'for no plugboard): '))
try:
EnigmaMachine.Plugboard(steckered_pairing)
successful_input = True
except ValueError as err:
print(f'Error in input for reflector: {err}. Let\'s try '
'again')
EM = EnigmaMachine(rotor_types=rotor_types,
rotor_positions=rotor_positions,
ring_settings=ring_settings,
steckered_pairing=steckered_pairing,
reflector_mapping=reflector_mapping)
print('Setting up machine...')
sleep(1)
print('Adjusting the rotors...')
sleep(1)
print('Pairing up the plugboard...')
sleep(1)
print('Your Enigma Machine is ready to use!')
sleep(1)
encryption = 'Y'
while encryption == 'Y':
message = input('Enter message to encrypt: ')
print('Encrypting message...')
sleep(1)
print(f'Encrypted message: {EM.encrypt_message(message)}')
encryption = str.upper(input(
'Would you like to encrypt another message with the same machine'
'? (Rotor positions will not be reset). (Y/N): '))
print('Thanks for using the Enigma Simulator!')
| StarcoderdataPython |
5194 | # -*- coding: utf-8 -*-
"""
Unit tests for the Person plugin and its model
"""
from django import forms
from django.conf import settings
from django.test import TestCase
from cms.api import add_plugin, create_page
from cmsplugin_plain_text.cms_plugins import PlaintextPlugin
from djangocms_picture.cms_plugins import PicturePlugin
from richie.apps.core.factories import FilerImageFactory, UserFactory
from richie.apps.core.helpers import create_i18n_page
from richie.apps.persons.cms_plugins import PersonPlugin
from richie.apps.persons.factories import PersonFactory
from richie.apps.persons.models import PersonPluginModel
class PersonPluginTestCase(TestCase):
"""
Test that PersonPlugin correctly displays a Person's page placeholders content
"""
def test_cms_plugins_person_form_page_choices(self):
"""
The form to create a person plugin should only list person pages in the select box.
"""
class PersonPluginModelForm(forms.ModelForm):
"""A form for testing the choices in the select box"""
class Meta:
model = PersonPluginModel
exclude = ()
person = PersonFactory()
other_page_title = "other page"
create_page(other_page_title, "richie/fullwidth.html", settings.LANGUAGE_CODE)
plugin_form = PersonPluginModelForm()
self.assertIn(person.get_full_name(), plugin_form.as_table())
self.assertNotIn(other_page_title, plugin_form.as_table())
def test_cms_plugins_person_render(self):
"""
Test that a PersonPlugin correctly renders person's page specific information
"""
# Create a filer fake image
staff = UserFactory(is_staff=True, is_superuser=True)
image = FilerImageFactory(owner=staff)
# Create a Person
person = PersonFactory()
person_page = person.extended_object
# Add portrait to related placeholder
portrait_placeholder = person_page.placeholders.get(slot="portrait")
add_plugin(
portrait_placeholder,
PicturePlugin,
"en",
**{"picture": image, "attributes": {"alt": "portrait description"}}
)
add_plugin(
portrait_placeholder,
PicturePlugin,
"fr",
**{"picture": image, "attributes": {"alt": "description du portrait"}}
)
# A resume to related placeholder
resume_placeholder = person_page.placeholders.get(slot="resume")
add_plugin(
resume_placeholder, PlaintextPlugin, "en", **{"body": "A short resume"}
)
add_plugin(
resume_placeholder, PlaintextPlugin, "fr", **{"body": "Un résumé court"}
)
# Create a page to add the plugin to
page = create_i18n_page({"en": "A page", "fr": "Une page"})
placeholder = page.placeholders.get(slot="maincontent")
add_plugin(placeholder, PersonPlugin, "en", **{"person": person})
add_plugin(placeholder, PersonPlugin, "fr", **{"person": person})
page.publish("en")
page.publish("fr")
# Check the page content in English
url = page.get_absolute_url(language="en")
response = self.client.get(url)
# Person's name should be present as a link to the cms page
# And CMS page title should be in title attribute of the link
self.assertContains(
response,
'<a href="{url}" title="{page_title}">'.format(
url=person_page.get_absolute_url(), page_title=person_page.get_title()
),
status_code=200,
)
self.assertContains(response, person.get_full_name(), html=True)
# Person's portrait and its properties should be present
# pylint: disable=no-member
self.assertContains(response, image.file.name)
# Short resume should be present
self.assertContains(
response,
'<div class="person-plugin__content__text">A short resume</div>',
html=True,
)
# The person's full name should be wrapped in a h2
self.assertContains(
response,
'<h2 class="person-plugin__content__title">{:s}</h2>'.format(
person.get_full_name()
),
html=True,
)
# Same checks in French
url = page.get_absolute_url(language="fr")
response = self.client.get(url)
self.assertContains(
response,
'<a href="{url}" title="{page_title}">'.format(
url=person_page.get_absolute_url(), page_title=person_page.get_title()
),
status_code=200,
)
# pylint: disable=no-member
self.assertContains(response, image.file.name)
self.assertContains(
response,
'<div class="person-plugin__content__text">Un résumé court</div>',
html=True,
)
| StarcoderdataPython |
3361625 | """
Using pygments to render the code.
"""
from django.utils.translation import gettext as _
from pygments import highlight, styles
from pygments.formatters.html import HtmlFormatter
from pygments.lexers import get_all_lexers, get_lexer_by_name
from pygments.styles import get_all_styles
from fluent_contents.plugins.code import appsettings
STYLE_CHOICES = [(x, x) for x in get_all_styles()]
STYLE_CHOICES.sort(key=lambda x: x[1].lower())
_languageChoices = [
(x[1][0], x[0]) for x in get_all_lexers() if x[1]
] # x = ('Title', ('name1', 'name2', 'nameN'), ('*.ext1', '*.ext2'), ('mimetype1',))
_languageChoices.sort(key=lambda x: x[1].lower())
LANGUAGE_CHOICES = tuple(t for t in _languageChoices if t[0] in appsettings.FLUENT_CODE_SHORTLIST)
if not appsettings.FLUENT_CODE_SHORTLIST_ONLY:
LANGUAGE_CHOICES += ((_("Combinations"), [t for t in _languageChoices if "+" in t[0]]),)
LANGUAGE_CHOICES += (
(
_("Advanced"),
[
t
for t in _languageChoices
if "+" not in t[0] and t[0] not in appsettings.FLUENT_CODE_SHORTLIST
],
),
)
def render_code(instance, style_name="default"):
# Some interesting options in the HtmlFormatter:
# - nowrap -> no wrap inside <pre>
# - classprefix -> prefix for the classnames
# - noclasses -> all inline styles.
#
# To get_style_defs(), you can pass a selector prefix.
#
style = styles.get_style_by_name(style_name)
formatter = HtmlFormatter(
linenos=instance.linenumbers,
style=style,
nowrap=True,
classprefix="code%s-" % instance.pk,
)
html = highlight(instance.code, get_lexer_by_name(instance.language), formatter)
css = formatter.get_style_defs()
# Included in a DIV, so the next item will be displayed below.
return (
'<div class="code"><style type="text/css">'
+ css
+ "</style>\n<pre>"
+ html
+ "</pre></div>\n"
)
# TODO: Make code rendering more efficient, have one style definition in the head of the page!
| StarcoderdataPython |
63869 | ####################################
# Example AI which moves based on
# the lowest bomb location
####################################
import math
from .ai_base import AI_Base
from src.misc.game_enums import Entity
class AI_Avoid_Bomb(AI_Base):
def __init__(self):
pass
def restart(self):
pass
def update(self, entity_list, cart):
cart_x = cart.get_center()[0]
entity_list_copy = entity_list.copy()
bombs_list = [x for x in entity_list_copy if (
x.type is Entity.BOMB) and (x.y < 600)]
if len(bombs_list) > 0:
lowest_bomb_x = bombs_list[0].x
else:
lowest_bomb_x = cart_x
delta = lowest_bomb_x - cart_x
if abs(delta) > 150:
ai_movement = 0
else:
if delta > 0:
ai_movement = -1
else:
ai_movement = 1
if len(bombs_list) == 0:
ai_movement = 0
return ai_movement
| StarcoderdataPython |
3258701 | <filename>ppf/core/controller.py
class controller(object):
def __init__(self, trade, model, env, historical_df = 0):
self.__trade = trade
self.__model = model
self.__env = env
self.__historical_df = historical_df
self.__symbol_table = {}
self.__event = None
def get_trade(self):
return self.__trade
def get_model(self):
return self.__model
def get_environment(self):
return self.__env
def get_event(self):
return self.__event
def set_event(self, event):
self.__event = event
def get_adjuvant_table(self):
leg = self.__trade.legs()[self.__event.leg_id()]
adjuvant_table = None
if leg.has_adjuvant_table():
adjuvant_table = leg.adjuvant_table()
return adjuvant_table
def insert_symbol(self, name, at):
self.__symbol_table[name] = (at, self.__model.state().create_variable())
def update_symbol(self, name, symbol, at):
self.__symbol_table[name] = (at, symbol)
def retrieve_symbol(self, name):
if not self.__symbol_table.has_key(name):
raise RuntimeError, "name not found in symbol table"
return self.__symbol_table.get(name)[1]
def retrieve_symbol_update_time(self, name):
if not self.__symbol_table.has_key(name):
raise RuntimeError, "name not found in symbol table"
return self.__symbol_table.get(name)[0]
def retrieve_symbols_to_rollback(self, at):
symbols = []
for symbol in self.__symbol_table:
pair = self.__symbol_table.get(symbol)
if pair[0] > at:
symbols.append(symbol)
return symbols
def pay_df(self, t, state):
if t < 0:
historical_df = self.__model.state().create_variable()
historical_df = self.__historical_df
return historical_df
else:
flow = self.__event.flow()
fill = self.__model.fill()
requestor = self.__model.requestor()
T = self.__env.relative_date(flow.pay_date())/365.0
return fill.numeraire_rebased_bond(t, T, flow.pay_currency(), self.__env, requestor, state)
endif
def libor(self, t, state):
flow = self.__event.flow()
id = self.__event.reset_id()
obs = flow.observables()[id]
if t < 0:
fix = obs.fix()
if fix.is_fixed():
fixing = self.__model.state().create_variable()
fixing = fix.value()
return fixing
else:
raise RuntimeError, 'libor in the past with no fixing'
endif
else:
fill = self.__model.fill()
requestor = self.__model.requestor()
return fill.libor(t, obs, self.__env, requestor, state)
endif
def swap(self, t, state):
id = self.__event.reset_id()
obs = flow.observables()[id]
if t < 0:
fix = obs.fix()
if fix.is_fixed():
fixing = self.__model.state().create_variable()
fixing = fix.value()
return fixing
else:
raise RuntimeError, 'libor in the past with no fixing'
endif
else:
fill = self.__model.fill()
requestor = self.__model.requestor()
return fill.swap(t, obs, self.__env, requestor, state)
endif
def rollback(self, T, t, symbol):
requestor = self.__model.requestor()
state = self.__model.state()
return self.__model.rollback().rollback(t, T, state, requestor, self.__env, symbol)
def rollback_max(self, T, t, symbol_one, symbol_two):
requestor = self.__model.requestor()
state = self.__model.state()
res1 = self.__model.rollback().rollback(t, T, state, requestor, self.__env, symbol_one)
res2 = self.__model.rollback().rollback_max(t, T, state, requestor, self.__env, symbol_two-symbol_one)
return res1+res2
def evolve(self, t, T):
requestor = self.__model.requestor()
state = self.__model.state()
self.__model.evolve().evolve(t, T, state, requestor, self.__env)
def numeraire(self, t):
if t < 0:
raise RuntimeError, "attempting to call 'numeraire' in the past"
fill = self.__model.fill()
requestor = self.__model.requestor()
state = self.__model.state().fill(t, requestor, self.__env)
return fill.numeraire(t, self.__event.pay_currency(), self.__env, requestor, state)
def explanatory_variables(self, t):
if t < 0:
raise RuntimeError, "attempting to call 'explanatory_variables' in the past"
fill = self.__model.fill()
requestor = self.__model.requestor()
state = self.__model.state().fill(t, requestor, self.__env)
exercise = self.__model.exercise()
return exercise(t, fill, state, requestor, self.__env)
def __call__(self, t):
leg = self.__trade.legs()[self.__event.leg_id()]
payoff = leg.payoff()
pay_rcv = leg.pay_receive()
return pay_rcv*payoff(t, self)
| StarcoderdataPython |
4836146 | <filename>jax/_src/lax/utils.py
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This module contains utility functions split out of jax._src.lax.lax to
# avoid cyclic dependencies. Definitions that are used at import time by
# multiple modules can go here.
import builtins
from functools import partial
import operator
from typing import Callable
from jax import core
from jax._src import dtypes
from jax.interpreters import xla
from jax._src.util import safe_zip
from jax._src.lib import xla_client
xops = xla_client.ops
_max = builtins.max
# ### primitives
_input_dtype: Callable = lambda *args, **_: dtypes.canonicalize_dtype(args[0].dtype)
def _argnum_weak_type(*argnums):
return lambda *args, **_: all(args[i].weak_type for i in argnums)
def standard_primitive(shape_rule, dtype_rule, name, translation_rule=None,
weak_type_rule=None, named_shape_rule=None):
weak_type_rule = weak_type_rule or _standard_weak_type_rule
named_shape_rule = named_shape_rule or standard_named_shape_rule
prim = core.Primitive(name)
prim.def_impl(partial(xla.apply_primitive, prim))
prim.def_abstract_eval(
partial(standard_abstract_eval, prim, shape_rule, dtype_rule,
weak_type_rule, named_shape_rule))
xla.register_translation(
prim, translation_rule or partial(_standard_translate, name))
return prim
def standard_abstract_eval(prim, shape_rule, dtype_rule, weak_type_rule,
named_shape_rule, *avals, **kwargs):
assert all(isinstance(aval, core.UnshapedArray) for aval in avals), avals
assert not prim.multiple_results
weak_type = weak_type_rule(*avals, **kwargs)
least_specialized = _max(map(type, avals),
key=operator.attrgetter('array_abstraction_level'))
if least_specialized is core.ConcreteArray:
out = prim.impl(*[x.val for x in avals], **kwargs)
return core.ConcreteArray(out.dtype, out, weak_type=weak_type)
elif least_specialized is core.ShapedArray:
return core.ShapedArray(shape_rule(*avals, **kwargs),
dtype_rule(*avals, **kwargs), weak_type=weak_type,
named_shape=named_shape_rule(*avals, **kwargs))
elif least_specialized is core.UnshapedArray:
return core.UnshapedArray(dtype_rule(*avals, **kwargs), weak_type=weak_type)
else:
raise TypeError(avals, least_specialized)
def standard_multi_result_abstract_eval(
prim, shape_rule, dtype_rule, weak_type_rule,
named_shape_rule, *avals, **kwargs):
assert prim.multiple_results
assert all(isinstance(aval, core.UnshapedArray) for aval in avals), avals
least_specialized = _max(map(type, avals),
key=operator.attrgetter('array_abstraction_level'))
weak_types = weak_type_rule(*avals, **kwargs)
if least_specialized is core.ConcreteArray:
out_vals = prim.impl(*[x.val for x in avals], **kwargs)
return [core.ConcreteArray(val.dtype, val, weak_type=weak_type)
for val, weak_type in safe_zip(out_vals, weak_types)]
elif least_specialized is core.ShapedArray:
out_shapes = shape_rule(*avals, **kwargs)
out_dtypes = dtype_rule(*avals, **kwargs)
out_named_shapes = named_shape_rule(*avals, **kwargs)
return [core.ShapedArray(s, d, weak_type=weak_type, named_shape=named_shape)
for s, d, weak_type, named_shape
in safe_zip(out_shapes, out_dtypes, weak_types, out_named_shapes)]
elif least_specialized is core.UnshapedArray:
out_dtypes = dtype_rule(*avals, **kwargs)
return [core.UnshapedArray(dtype, weak_type=weak_type)
for dtype, weak_type in safe_zip(out_dtypes, weak_types)]
else:
raise TypeError(avals, least_specialized)
def _standard_translate(name, ctx, avals_in, avals_out, *args, **kwargs):
del ctx, avals_in, avals_out
xla_opname = ''.join(term.capitalize() for term in name.split('_'))
return [getattr(xops, xla_opname)(*args, **kwargs)]
def standard_named_shape_rule(*avals, **kwargs):
return core.join_named_shapes(*(a.named_shape for a in avals))
def _standard_weak_type_rule(*avals, **kwargs):
return all(aval.weak_type for aval in avals)
| StarcoderdataPython |
135832 | # -*- coding: utf-8 -*-
"""
Created on Wed Aug 28 11:46:55 2013
@author: Craig
"""
# standard modules
import json
import logging
import pprint
import urllib
import urllib2
# site modules
# local modules
# CONSTANTS
# CKAN structure:
# A CKAN site has a number of datasets
# Each dataset is a collection of datafiles and datastores
# Datafiles are simply uploade files of any format.
# A datastore is analogous to a database table.
# This module is mainly concerned with datastores.
#
# CKAN data types:
# http://docs.ckan.org/en/ckan-2.0.2/datastore-api.html#field-types
# The DataStore supports all types supported by PostgreSQL as well as
# a few additions.
# The json type has been added as a storage for nested data.
#
class CKANerror(Exception):
pass
class CKANaccess(object):
def __init__(self, base_url, ckan_key):
self.base_url = base_url
if base_url[-1] != '/': self.base_url += '/'
self.ckan_key = ckan_key
self.dataset_list = None
def _send_request(self, action, request_dict):
url = self.base_url + 'api/3/action/' + action
logging.info('ckan request: {}'.format(url))
logging.debug('ckan request dict\n{}'
.format(pprint.pformat(request_dict)))
request = urllib2.Request(url)
request.add_header('Authorization', self.ckan_key)
request_string = urllib.quote(json.dumps(request_dict))
response = urllib2.urlopen(request, request_string)
if response.code != 200:
raise CKANerror("Invalid response code {}".format(response.code))
# Load and verify CKAN's response.
response_dict = json.loads(response.read())
if response_dict['success'] is not True:
raise CKANerror("Request failed for {}\n{}"
.format(url, response_dict['error']))
logging.debug('ckan result:\n{}'
.format(pprint.pformat(response_dict['result'])))
return response_dict['result']
def get_dataset_list(self):
if self.dataset_list is None:
request_dict = {}
result = self._send_request('package_list', request_dict)
self.dataset_list = result
logging.debug('dataset_list\n{}'.format(
pprint.pformat(self.dataset_list)))
return self.dataset_list
def _clear_dataset_list(self):
# called when datasets created or destroyed
self.dataset_list = None
def dataset_exists(self, dataset_nm):
return dataset_nm in self.get_dataset_list()
def create_dataset(self, name, **kwargs):
if self.dataset_exists(name):
raise CKANerror("Cannot create: dataset '{}' exists".format(name))
request_dict = dict(kwargs)
request_dict['name'] = name
self._send_request('package_create', request_dict)
self._clear_dataset_list()
return self.get_dataset(name)
def get_dataset(self, name, create=False):
if not self.dataset_exists(name):
if not create:
raise CKANerror("Cannot get: dataset '{}' does not exist"
.format(name))
return self.create_dataset(name)
request_dict = {'id': name}
result = self._send_request('package_show', request_dict)
logging.debug('dataset dict\n{}'
.format(pprint.pformat(request_dict)))
return CKANdataset(self, name, result)
def delete_dataset(self, name):
if not self.dataset_exists(name):
raise CKANerror("Cannot delete: dataset '{}' does not exist"
.format(name))
request_dict = {'id': name}
self._send_request('package_delete', request_dict)
self._clear_dataset_list()
class CKANdataset(object):
def __init__(self, access, dataset_nm, dataset_dict):
self.access = access
self.dataset_nm = dataset_nm
self.dataset_id = dataset_dict['id']
self.resource_list = None
self.datastore_dict = None
self.alias_list = None
self._get_resource_list(dataset_dict)
def _get_resource_list(self, dataset_dict=None):
if self.resource_list is None:
if dataset_dict is None:
request_dict = {'id': self.dataset_id}
dataset_dict = self.access._send_request('package_show',
request_dict)
self.resource_list = [dsr['id']
for dsr in dataset_dict['resources']]
logging.debug('resource_list\n{}'.format(
pprint.pformat(self.resource_list)))
return self.resource_list
def _get_datastore_dict(self):
if self.datastore_dict is None:
rlist = self._get_resource_list()
request_dict = {'resource_id': '_table_metadata'}
response_dict = self.access._send_request('datastore_search',
request_dict)
self.alias_list = [rec['name']
for rec in response_dict['records']
if rec['alias_of']]
self.datastore_dict = dict([(rec['name'], rec['alias_of'])
for rec in response_dict['records']
if (rec['name'] in rlist or
rec['alias_of'] in rlist)])
logging.debug('datastore_dict\n{}'.format(
pprint.pformat(self.datastore_dict)))
return self.datastore_dict
def get_datastore_list(self):
return self._get_datastore_dict().keys()
def _clear_datastore_dict(self):
# called when datasets created or destroyed
self.datastore_dict = None
self.resource_list = None
self.alias_list = None
def datastore_exists(self, datastore_nm):
return datastore_nm in self._get_datastore_dict()
def alias_exists(self, datastore_nm):
self._get_datastore_dict() # update lists
return datastore_nm in self.alias_list
def _create_resource(self, name, **kwargs):
request_dict = dict(kwargs)
request_dict.update({
'package_id': self.dataset_id,
'url': 'http://skytruth.org',
'name': name,
})
resource = self.access._send_request('resource_create', request_dict)
return resource
def create_datastore(self, datastore_nm, **kwargs):
if self.datastore_exists(datastore_nm):
raise CKANerror("Cannot create; datastore '{}' exists"
.format(datastore_nm))
if self.alias_exists(datastore_nm):
raise CKANerror("Cannot create; alias '{}' already in use."
.format(datastore_nm))
# return a CKANdatastore object
# fields is a list of dicts [{'id':'fieldnm', 'type':'integer'}, ...]
resource = self._create_resource(datastore_nm)
request_dict = dict(kwargs)
request_dict.update({
'resource_id': resource['id'],
'aliases': datastore_nm,
})
datastore = self.access._send_request('datastore_create', request_dict)
logging.debug('datastore_create response:\n{}'
.format(pprint.pformat(datastore)))
# What is this datastore dictionary returned by the api call???
self._clear_datastore_dict()
# Now re-read the datastore_dict to get the resource_id
return self.get_datastore(datastore_nm)
def get_datastore(self, datastore_nm):
if not self.datastore_exists(datastore_nm):
raise CKANerror("Cannot get; datastore '{}' does not exist:"
.format(datastore_nm))
return CKANdatastore(self.access,
self,
datastore_nm,
self.datastore_dict[datastore_nm])
def delete_datastore(self, datastore_nm):
if not self.datastore_exists(datastore_nm):
raise CKANerror("Cannot delete: datastore '{}' does not exist"
.format(id))
dstore = self.get_datastore(datastore_nm)
request_dict = {'resource_id': dstore.datastore_id}
self.access._send_request('datastore_delete', request_dict)
self._clear_datastore_dict()
class CKANdatastore(object):
def __init__(self, access, dataset, datastore_nm, datastore_id):
self.access = access
self.dataset = dataset
self.datastore_nm = datastore_nm
self.datastore_id = datastore_id
def transfer_tsv_records(self, fields, reclines):
# format records for ckan
records = []
for recline in reclines:
#datavals = recline.split('\t')
datavals = [None if dv == 'None' else dv.strip()
for dv in recline.split('\t')]
assert len(fields) == len(datavals)
records.append(dict(zip(fields, datavals)))
request_dict = {
'resource_id': self.datastore_id,
'records': records,
#'method': 'insert',
}
self.access._send_request('datastore_upsert', request_dict)
| StarcoderdataPython |
1638065 | # Copyright 2018 The Fragpy Developers. All Rights Reserved.
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# Dictionary
covrad = {'H':0.37/0.52918,'C':0.77/0.52918,'O':0.73/0.52918,'N':0.75/0.52918}
vanrad = {'H':1.20/0.52918,'O':1.52/0.52918,'N':1.55/0.52918}
braggr={'H':0.25e0,'He':0.25e0, \
'Li':1.45e0,'Be':1.05e0,'B':0.85e0,'C':0.7e0,'N':0.65e0,'O':0.6e0,\
'F':0.5e0,'Ne':0.45e0,\
'Na':1.8e0,'Mg':1.5e0,'Al':1.25e0,'Si':1.1e0,'P':1e0,'S':1e0,\
'Cl':1e0,'Ar':1e0}
Nelectron = {'H':1,'C':6,'N':7,'O':8}
bondlen_m=0.165e1 # Covalent bon length
Ang = 0.52918
| StarcoderdataPython |
1609487 | <reponame>kuanpern/jupyterlab-snippets-multimenus
expr = exp(x**2)
deriv = diff(expr, x) | StarcoderdataPython |
1740164 | <reponame>moonfruit/yysite
# -*- coding: utf-8 -*-
import os
import pickle
from abc import ABCMeta, abstractmethod
class Cache(metaclass=ABCMeta):
def __getitem__(self, key):
value = self.get(key)
if value is None:
raise KeyError
return value
def __setitem__(self, key, value):
self.set(key, value)
@abstractmethod
def get(self, key, default=None):
pass
@abstractmethod
def set(self, key, value, timeout=0):
pass
@abstractmethod
def get_stats(self):
pass
class DummyCache(Cache):
def get(self, key, default=None):
return default
def set(self, key, value, timeout=0):
pass
def get_stats(self):
return True
class FileCache(Cache):
def __init__(self, filename=".cache"):
self.filename = os.path.abspath(filename)
self.cache = {}
try:
with open(self.filename, 'rb') as stream:
self.cache.update(pickle.load(stream))
except FileNotFoundError:
pass
def get(self, key, default=None):
return self.cache.get(key, default)
def set(self, key, value, timeout=0):
self.cache[key] = value
with open(self.filename, "wb") as stream:
pickle.dump(self.cache, stream)
def get_stats(self):
return self.filename
| StarcoderdataPython |
66844 | <gh_stars>10-100
#!/usr/bin/env python3
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorrt as trt
import os, sys
sys.path.insert(0, os.getcwd())
from code.common import logging
import argparse
import json
from typing import List
# translate from LUT indices to chars
glob_results_are_indices = True # need to translate to ascii
int_2_labels = [ ' ', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z', '\'', '*']
def __levenshtein(a: List, b: List) -> int:
"""Calculates the Levenshtein distance between a and b.
"""
n, m = len(a), len(b)
if n > m:
# Make sure n <= m, to use O(min(n,m)) space
a, b = b, a
n, m = m, n
current = list(range(n + 1))
for i in range(1, m + 1):
previous, current = current, [i] + [0] * n
for j in range(1, n + 1):
add, delete = previous[j] + 1, current[j - 1] + 1
change = previous[j - 1]
if a[j - 1] != b[i - 1]:
change = change + 1
current[j] = min(add, delete, change)
return current[n]
def word_error_rate(hypotheses: List[str], references: List[str]) -> float:
"""
Computes Average Word Error rate between two texts represented as
corresponding lists of string. Hypotheses and references must have same length.
Args:
hypotheses: list of hypotheses
references: list of references
Returns:
(float) average word error rate
"""
scores = 0
words = 0
if len(hypotheses) != len(references):
raise ValueError("In word error rate calculation, hypotheses and reference"
" lists must have the same number of elements. But I got:"
"{0} and {1} correspondingly".format(len(hypotheses), len(references)))
for h, r in zip(hypotheses, references):
h_list = h.split()
r_list = r.split()
words += len(r_list)
scores += __levenshtein(h_list, r_list)
if words!=0:
wer = 1.0*scores/words
else:
wer = float('inf')
return wer, scores, words
def parse_loadgen_log(acc_log):
with open(acc_log, "r") as acc_json:
acc_data = json.load(acc_json)
acc_json.close()
# read accuracy log json and create a dictionary of qsl_idx/data pairs
results_dict = {}
num_acc_log_duplicate_keys = 0
num_acc_log_data_mismatch = 0
sortedTranscripts = [None for i in range(len(acc_data))]
logging.info("Reading accuracy mode results...")
for sample in acc_data:
qsl_idx = sample["qsl_idx"]
data = sample["data"]
data = b''.fromhex(data)
if glob_results_are_indices:
data = "".join([ int_2_labels[idx] for idx in list(data) ])
else :
data = data.decode('ascii')
sortedTranscripts[qsl_idx] = data
return sortedTranscripts
def eval(args):
logging.info("Start RNN-T accuracy checking")
# Load ground truths
with open(args.val_manifest) as f:
manifest = json.load(f)
offender_set = {idx for idx,f in enumerate(manifest) if f['original_duration'] > args.max_duration}
ground_truths = [sample["transcript"] for idx, sample in enumerate(manifest) if idx not in offender_set]
logging.info("Finished loading the ground truths")
# Load predictions
predictions = parse_loadgen_log(args.loadgen_log)
logging.info("Finished loading the predictions")
# Make sure predicions have the same number of samples as the ground truths.
assert len(ground_truths) == len(predictions), "Predictions and ground truths do not have same number of samples"
if(args.dump_output):
fp = open("predictions.txt", "w")
fg = open("ground_truth.txt", "w")
for p in predictions:
fp.write(p + "\n")
for g in ground_truths:
fg.write(g + "\n")
fp.close()
fg.close()
# Note that here we don't use logging.info and instead use print because we need the output to be in stdout to
# capture it in the testing harness.
# Compute WER (word error rate)
wer, _, _ = word_error_rate(predictions, ground_truths)
# Report accuracy as well (1-WER) for convenience for the harness
print("Word Error Rate: {:}%, accuracy={:}%".format(wer * 100, (1 - wer) * 100))
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--loadgen_log", default="build/logs/rnnt_logs_accuracy.json")
parser.add_argument("--val_manifest", default="build/preprocessed_data/LibriSpeech/dev-clean-wav.json")
parser.add_argument("--max_duration", default=15.0)
parser.add_argument("--dump_output", default=False)
args = parser.parse_args()
eval(args)
if __name__ == "__main__":
main()
| StarcoderdataPython |
95703 | <reponame>mattbernst/ECCE
"""
This is a small wxPython GUI for showing PMF output.
"""
import glob
import math
import optparse
import os
import signal
import wx
class PmfPanel(wx.Panel):
"""This Panel holds a listbox containing PMF data display options.
Contains a list of pmf indices, a radio button set for picking the graph
type, and a checkbox specifying whether the graphs should be drawn separate
or superimposed.
"""
def __init__(self, parent, path, *args, **kwargs):
"""Create the PmfPanel."""
self.path = path
wx.Panel.__init__(self, parent, *args, **kwargs)
Sizer = wx.BoxSizer(wx.HORIZONTAL)
pmfListSizer = wx.BoxSizer(wx.VERTICAL)
pmfListSizer.Add(wx.StaticText(self,label="PMF Directive"), 0, wx.ALL, 5)
self.pmfListBox = wx.ListBox(self, style=wx.LB_ALWAYS_SB)
self.LoadPmfList()
pmfListSizer.Add(self.pmfListBox, 1, wx.ALL|wx.EXPAND, 5)
Sizer.Add(pmfListSizer, 1, wx.ALL|wx.EXPAND, 5)
fileListSizer = wx.BoxSizer(wx.VERTICAL)
fileListSizer.Add(wx.StaticText(self, label="PMF Frame"), 0, wx.ALL, 5)
self.fileListBox = wx.ListBox(self, style=wx.LB_ALWAYS_SB|wx.LB_MULTIPLE)
self.LoadFileList()
fileListSizer.Add(self.fileListBox, 1, wx.ALL|wx.EXPAND, 5)
button = wx.Button(self, label="Select All")
button.Bind(wx.EVT_BUTTON, self.OnSelectAllButton)
fileListSizer.Add(button, 0, wx.ALL|wx.EXPAND, 5)
Sizer.Add(fileListSizer, 1, wx.ALL|wx.EXPAND, 5)
optionsSizer = wx.BoxSizer(wx.VERTICAL)
optionsSizer.Add(wx.StaticText(self, label="Plot Options"), 0, wx.ALL, 5)
self.radiobox = wx.RadioBox(self, label="Plot Type:",
choices=["Energy vs Coordinate",
"Coordinate Distribution",
"Energy Distribution"],
style=wx.RA_SPECIFY_ROWS)
optionsSizer.Add(self.radiobox, 0, wx.ALL|wx.EXPAND, 5)
self.superImposeCheck = wx.CheckBox(self, label="Overlay Plots")
optionsSizer.Add(self.superImposeCheck, 0, wx.ALL|wx.EXPAND, 5)
button = wx.Button(self, label="Plot");
button.Bind(wx.EVT_BUTTON, self.OnGraphButton)
optionsSizer.Add(button, 0, wx.ALL|wx.ALIGN_CENTER, 5)
Sizer.Add(optionsSizer, 1, wx.ALL|wx.EXPAND, 5)
self.SetSizerAndFit(Sizer)
def OnGraphButton(self, event=None):
"""Create an input file for gnuplot and run it based on GUI selections."""
files = [self.fileListBox.GetClientData(n)
for n in self.fileListBox.GetSelections()]
if self.radiobox.GetSelection() == 0:
self.PlotHarmonic(files)
else:
self.BinFiles(files)
files = map(lambda x: x+".bin", files)
if self.radiobox.GetSelection() == 1:
self.PlotBinCol2(files)
if self.radiobox.GetSelection() == 2:
self.PlotBinCol3(files)
def OnSelectAllButton(self, event=None):
"""Select all PMF frames."""
for i in range(self.fileListBox.GetCount()):
self.fileListBox.Select(i)
def _Plot(self, files, plotCmd, plotFile):
"""Uses given plotCmd to write single or multiplot output to plotFile."""
if (self.superImposeCheck.IsChecked()):
plotFile.write("plot ")
for i in range(len(files)):
plotFile.write(plotCmd % (files[i],
files[i].split("-")[1].split(".")[0]))
if i < len(files) - 1:
plotFile.write(",\\\n")
else:
width,height = 1,1
for i in range(len(files)+1):
if width*height < i:
ratio = 1.25
if abs(((width+1)/height)-ratio) <= abs((width/(height+1))-ratio):
width+=1
else:
height+=1
plotFile.write("set size 1,1\n")
plotFile.write("set origin 0,0\n")
plotFile.write("set multiplot\n")
i = 0
sizeW, sizeH = 1.0/width, 1.0/height
for row in range(height):
for col in range(width):
plotFile.write("set size %f,%f\n" % (sizeW,sizeH))
plotFile.write("set origin %f,%f\n" % (sizeW*col,1.0-sizeH*(row+1)))
plotFile.write("plot " + plotCmd % (files[i],
files[i].split("-")[1].split(".")[0]) + "\n")
i+=1
if i >= len(files):
break;
if i >= len(files):
break;
plotFile.write("set nomultiplot\n")
plotFile.close()
self.RunGnuplot(plotFile.name)
def PlotHarmonic(self, files):
"""Plot column 2 vs column 3."""
pmfOpt = int(self.pmfListBox.GetStringSelection())
plotCmd = '"%%s" ' \
'using 2:($1 == %i ? $3 : 1/0) ' \
'every ::%d ' \
'title "frame %%s"' % (pmfOpt, self.pmfListBox.GetCount())
gnuplotHarmonicFileName = os.path.join(self.path, "pmf_harmonic.gnuplot")
plotFile = open(gnuplotHarmonicFileName, 'w')
plotFile.write('set terminal x11 title "Gnuplot ECCE MD PMF Property Plotter: PMF Option %i - Energy vs Coordinate"\n' % pmfOpt)
plotFile.write('set xlabel "Coordinate"\n')
plotFile.write('set ylabel "Energy"\n')
self._Plot(files, plotCmd, plotFile)
def PlotBinCol2(self, files):
"""Plot column 2 binned."""
pmfOpt = int(self.pmfListBox.GetStringSelection())
plotCmd = '"%%s" using 3:($1 == %i ? $2 == 2 ? $4 : 1/0 : 1/0) ' \
'title "frame %%s" with histeps' % pmfOpt
gnuplotBinFileName = os.path.join(self.path, "pmf_bin2.gnuplot")
plotFile = open(gnuplotBinFileName, 'w')
plotFile.write('set terminal x11 title "Gnuplot ECCE MD PMF Property Plotter: PMF Option %i - Coordinate Distribution"\n' % pmfOpt)
plotFile.write('set xlabel "Coordinate Bins"\n')
plotFile.write('set ylabel "Count"\n')
self._Plot(files, plotCmd, plotFile)
def PlotBinCol3(self, files):
"""Plot column 3 binned."""
pmfOpt = int(self.pmfListBox.GetStringSelection())
plotCmd = '"%%s" using 3:($1 == %i ? $2 == 3 ? $4 : 1/0 : 1/0) ' \
'title "frame %%s" with histeps' % pmfOpt
gnuplotBinFileName = os.path.join(self.path, "pmf_bin3.gnuplot")
plotFile = open(gnuplotBinFileName, 'w')
plotFile.write('set terminal x11 title "Gnuplot ECCE MD PMF Property Plotter: PMF Option %i - Energy Distribution"\n' % pmfOpt)
plotFile.write('set xlabel "Energy Bins"\n')
plotFile.write('set ylabel "Count"\n')
self._Plot(files, plotCmd, plotFile)
def RunGnuplot(self, filename):
"""Call gnuplot with the given file."""
cmd = "gnuplot -persist %s" % filename
os.system(cmd)
def LoadPmfList(self):
"""Read a random PMF file to initialize the PMF option listbox."""
self.pmfListBox.Clear()
pmfFiles = glob.glob(os.path.join(self.path, "*.pmf"))
if len(pmfFiles) <= 0: return #bail!
pmfNumbers = []
for line in open(pmfFiles[0]):
num = line.split()[0]
if num not in pmfNumbers:
pmfNumbers.append(num)
pmfNumbers.sort()
for num in pmfNumbers:
self.pmfListBox.Append(num)
self.pmfListBox.SetSelection(0)
def LoadFileList(self):
"""Determine *.pmf files and list their indices in the listbox."""
self.fileListBox.Clear()
files = glob.glob(os.path.join(self.path, "*.pmf"))
files.sort()
for file in files:
self.fileListBox.Select(
self.fileListBox.Append(file[:-4].split("-")[-1], file))
def BinFiles(self, files):
"""Preprocess *.pmf files for input into gnuplot as a distribution."""
for file in files:
outfile = file + ".bin"
if os.path.exists(outfile): continue
data = {}
file = open(file, 'r')
for opt in range(self.pmfListBox.GetCount()):
file.readline()
for line in file:
colVal = {}
pmfOption,colVal["col2"],colVal["col3"] = line.split()
for col in ["col2","col3"]: colVal[col] = float(colVal[col])
if pmfOption not in data:
data[pmfOption] = {}
for col in ["col2","col3"]:
data[pmfOption][col] = {"values":[colVal[col]],"bins":{}}
else:
for col in ["col2","col3"]:
data[pmfOption][col]["values"].append(colVal[col])
for pmfOption in data:
for col in ["col2","col3"]:
minVal = min(data[pmfOption][col]["values"])
maxVal = max(data[pmfOption][col]["values"])
for bin in frange(minVal,maxVal,(maxVal-minVal)/20):
data[pmfOption][col]["bins"][bin] = 0
for value in data[pmfOption][col]["values"]:
for bin in frange(minVal,maxVal,(maxVal-minVal)/20):
if value < bin:
data[pmfOption][col]["bins"][bin]+=1
break
outfile = open(outfile, 'w')
for pmfOption in data:
for col in ["col2","col3"]:
for bin in sorted(data[pmfOption][col]["bins"].keys()):
outfile.write("%s %s %f %i\n" %
(pmfOption, col[3], bin, data[pmfOption][col]["bins"][bin]))
class PmfFrame(wx.Frame):
"""Main Frame holding the PmfPanel."""
def __init__(self, path, *args, **kwargs):
"""Create the PmfFrame."""
wx.Frame.__init__(self, None, title="ECCE MD PMF Property Plotter")
# Add the PmfPanel
self.Panel = PmfPanel(self, path)
self.Fit()
signal.signal(signal.SIGTERM, self.handleSIGTERM)
def OnQuit(self, event=None):
"""Exit application."""
self.Close()
def handleSIGTERM(signum, frame):
self.OnQuit()
def frange(limit1, limit2=None, increment=1.):
"""Range function that accepts floats (and integers). numpy would be nice.
Usage:
frange(-2,2,0.1)
frange(10)
frange(10, increment=0.5)
The returned value is an iterator. Use list(frange(start,...)) for a list.
"""
if limit1 == limit2:
return (limit1 for n in xrange(1))
if limit2 is None:
limit2,limit1 = limit1,0.
else:
limit1 = float(limit1)
count = int(math.ceil((limit2-limit1)/increment))
return ((limit1 + n*increment) for n in xrange(count))
def sorted(sortable):
sortable.sort()
return sortable
if __name__ == '__main__':
app = wx.PySimpleApp()
(options, args) = optparse.OptionParser().parse_args()
frame = PmfFrame(args[0])
frame.Fit()
frame.Show(True)
if len(args) > 1:
calcviewerPipe = open(args[1], "w")
calcviewerPipe.write("#STARTED %s\n" % str(os.getpid()))
calcviewerPipe.flush()
app.MainLoop()
| StarcoderdataPython |
3215864 | <reponame>pulumi-bot/pulumi-azure-native<filename>sdk/python/pulumi_azure_native/cdn/v20200901/__init__.py
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
# Export this package's modules as members:
from ._enums import *
from .afd_custom_domain import *
from .afd_endpoint import *
from .afd_origin import *
from .afd_origin_group import *
from .custom_domain import *
from .endpoint import *
from .get_afd_custom_domain import *
from .get_afd_endpoint import *
from .get_afd_origin import *
from .get_afd_origin_group import *
from .get_custom_domain import *
from .get_endpoint import *
from .get_origin import *
from .get_origin_group import *
from .get_policy import *
from .get_profile import *
from .get_profile_supported_optimization_types import *
from .get_route import *
from .get_rule import *
from .get_rule_set import *
from .get_secret import *
from .get_security_policy import *
from .origin import *
from .origin_group import *
from .policy import *
from .profile import *
from .route import *
from .rule import *
from .rule_set import *
from .secret import *
from .security_policy import *
from ._inputs import *
from . import outputs
def _register_module():
import pulumi
from ... import _utilities
class Module(pulumi.runtime.ResourceModule):
_version = _utilities.get_semver_version()
def version(self):
return Module._version
def construct(self, name: str, typ: str, urn: str) -> pulumi.Resource:
if typ == "azure-native:cdn/v20200901:AFDCustomDomain":
return AFDCustomDomain(name, pulumi.ResourceOptions(urn=urn))
elif typ == "azure-native:cdn/v20200901:AFDEndpoint":
return AFDEndpoint(name, pulumi.ResourceOptions(urn=urn))
elif typ == "azure-native:cdn/v20200901:AFDOrigin":
return AFDOrigin(name, pulumi.ResourceOptions(urn=urn))
elif typ == "azure-native:cdn/v20200901:AFDOriginGroup":
return AFDOriginGroup(name, pulumi.ResourceOptions(urn=urn))
elif typ == "azure-native:cdn/v20200901:CustomDomain":
return CustomDomain(name, pulumi.ResourceOptions(urn=urn))
elif typ == "azure-native:cdn/v20200901:Endpoint":
return Endpoint(name, pulumi.ResourceOptions(urn=urn))
elif typ == "azure-native:cdn/v20200901:Origin":
return Origin(name, pulumi.ResourceOptions(urn=urn))
elif typ == "azure-native:cdn/v20200901:OriginGroup":
return OriginGroup(name, pulumi.ResourceOptions(urn=urn))
elif typ == "azure-native:cdn/v20200901:Policy":
return Policy(name, pulumi.ResourceOptions(urn=urn))
elif typ == "azure-native:cdn/v20200901:Profile":
return Profile(name, pulumi.ResourceOptions(urn=urn))
elif typ == "azure-native:cdn/v20200901:Route":
return Route(name, pulumi.ResourceOptions(urn=urn))
elif typ == "azure-native:cdn/v20200901:Rule":
return Rule(name, pulumi.ResourceOptions(urn=urn))
elif typ == "azure-native:cdn/v20200901:RuleSet":
return RuleSet(name, pulumi.ResourceOptions(urn=urn))
elif typ == "azure-native:cdn/v20200901:Secret":
return Secret(name, pulumi.ResourceOptions(urn=urn))
elif typ == "azure-native:cdn/v20200901:SecurityPolicy":
return SecurityPolicy(name, pulumi.ResourceOptions(urn=urn))
else:
raise Exception(f"unknown resource type {typ}")
_module_instance = Module()
pulumi.runtime.register_resource_module("azure-native", "cdn/v20200901", _module_instance)
_register_module()
| StarcoderdataPython |
85793 | import math
cat1=float(input("comprimento do cateto oposto: "))
cat2=float(input("comprimento do cateteo adjacente:"))
hip= math.pow(cat1,2)+math.pow(cat2,2)
print("hipotenusa: {:.2f}".format(math.sqrt(hip))) | StarcoderdataPython |
1614642 | # Copyright 2020 Forschungszentrum Jülich GmbH and Aix-Marseille Université
# "Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements; and to You under the Apache License, Version 2.0. "
import tvb.simulator.lab as lab
from nest_elephant_tvb.Tvb.modify_tvb import Interface_co_simulation
import numpy as np
# reference simulation
np.random.seed(42)
model = lab.models.ReducedWongWang(tau_s=np.random.rand(76))
connectivity = lab.connectivity.Connectivity().from_file()
connectivity.speed = np.array([4.0])
connectivity.configure()
coupling = lab.coupling.Linear(a=np.array(0.0154))
integrator = lab.integrators.HeunDeterministic(dt=0.1,bounded_state_variable_indices=np.array([0]),state_variable_boundaries=np.array([[0.0, 1.0]]))
monitors = lab.monitors.Raw(period=0.1, variables_of_interest=np.array(0,dtype=np.int))
# Initialise a Simulator -- Model, Connectivity, Integrator, and Monitors.
sim = lab.simulator.Simulator(model=model,
connectivity=connectivity,
coupling=coupling,
integrator=integrator,
monitors=(monitors,),
# initial_conditions=np.repeat(0.0,1*1*nb_region).reshape(1,1,nb_region,1)
)
sim.configure()
result_all=sim.run(simulation_length=10.0)
# New simulator with proxy
np.random.seed(42)
model_1 = lab.models.ReducedWongWang(tau_s=np.random.rand(76))
monitors_1 = (Interface_co_simulation(period=0.1, id_proxy=np.array([0], dtype=np.int), time_synchronize=10.0))
# Initialise a Simulator -- Model, Connectivity, Integrator, and Monitors.
sim_1 = lab.simulator.Simulator(model=model_1,
connectivity=connectivity,
coupling=coupling,
integrator=integrator,
monitors=(monitors,monitors_1,),
# initial_conditions=np.repeat(0.0,1*1*nb_region).reshape(1,1,nb_region,1)
)
sim_1.configure()
result_1_all = [np.empty((0,)),np.empty((0,1,76,1))]
for j in range(5):
result_1_all_step = sim_1.run(
simulation_length=2.0,
proxy_data=[(2.0*j)+np.arange(0.1,2.1,0.1),
np.array([ result_all[0][1][(20*j)+i][0][0] for i in range(20) ]).reshape((20,1,1,1))])
result_1_all[0] = np.concatenate((result_1_all[0],result_1_all_step[0][0]))
result_1_all[1] = np.concatenate((result_1_all[1], result_1_all_step[0][1]))
for i in range(100):
diff = result_all[0][1][i][0][1:] - result_1_all[1][i,0,1:]
diff_2 = result_all[0][1][i][0][:1] - result_1_all[1][i,0,:1]
if np.sum(diff,where=np.logical_not(np.isnan(diff))) == 0.0 and np.sum(diff_2 ,where=np.logical_not(np.isnan(diff_2))) == 0.0:
print('test succeeds')
else:
print(np.sum(diff_2))
print('test FAIL')
| StarcoderdataPython |
3319649 | <filename>config/settings/local.py
import warnings
import sys
import logging
from .base import * # noqa
with warnings.catch_warnings(record=True) as warning:
environ.Env.read_env('.env')
for w in warning:
print(w.message)
DEBUG = env.bool('DJANGO_DEBUG', default=True)
TEMPLATES[0]['OPTIONS']['debug'] = DEBUG
SECRET_KEY = env("DJANGO_SECRET_KEY",
default='t69v7lq5ayk^k_)uyvjvpo(sljrcnbh)&$(rsqqjg-87160@^%')
EMAIL_HOST = 'localhost'
EMAIL_PORT = 1025
EMAIL_BACKEND = env('DJANGO_EMAIL_BACKEND',
default='django.core.mail.backends.console.EmailBackend')
INSTALLED_APPS += ('django_extensions',)
INTERNAL_IPS = ('127.0.0.1', )
TEST_RUNNER = 'django.test.runner.DiscoverRunner'
TESTS_IN_PROGRESS = False
if 'test' in sys.argv[1:] or 'jenkins' in sys.argv[1:]:
logging.disable(logging.CRITICAL)
PASSWORD_HASHERS = (
'django.contrib.auth.hashers.MD5PasswordHasher',
)
DEBUG = False
TEMPLATE_DEBUG = False
TESTS_IN_PROGRESS = True
MIGRATION_MODULES = {app[app.rfind('.') + 1:]: None
for app in INSTALLED_APPS}
MANIFEST_URL = env('MANIFEST_URL', default=None)
CORS_ORIGIN_ALLOW_ALL = False
CORS_ALLOW_CREDENTIALS = True
CORS_ORIGIN_WHITELIST = ('localhost:4200',)
DJOSER = {
'SITE_NAME': 'CCDB (test)',
'DOMAIN': 'localhost:4200',
'PASSWORD_RESET_CONFIRM_URL': 'password-reset?uid={uid}&token={token}',
}
| StarcoderdataPython |
125013 | """
@<NAME> (https://linktr.ee/pedrosantanaabreu)
@Icev (https://somosicev.com)
PT-BR:
Faça um programa que receba a quantidade de dinheiro em reais que uma pessoa que vai viajar possui.
Ela vai passar por vários países e precisa converter seu dinheiro em dólares, euros e libra esterlina.
Sabe-se que a cotarão do dólar é de R$ 4,25; do euro, de R$ 4,75; e da libra esterlina, de R$ 5,64.
O programa deve fazer as conversões e mostrá-las.
"""
# Cotações
euro = 4.75
dolar = 4.25
libra_esterlina = 5.64
# Recebendo valor em reais
valor_reais = float(input('Digite o valor em reais | R$ '))
# Conversões
real_euro = valor_reais / euro
real_dolar = valor_reais / dolar
real_libra_esterlina = valor_reais / libra_esterlina
# Exibindo resultados
print('''
Valor em reais | R$ {:.2f}\n
Euro | € {:.2f}
Dolar | $ {:.2f}
Libra Esterlina | £ {:.2f}
'''.format(valor_reais, real_euro, real_dolar, real_libra_esterlina))
| StarcoderdataPython |
3320330 | # -*- coding: utf-8 -*-
# @Time : 20-4-17 上午9:55
# @File : myihome.py
from handlers.basehandler import BaseHandler
class MyIhomeHandler(BaseHandler):
def post(self, *args, **kwargs):
pass | StarcoderdataPython |
67627 | """Couple of MPyC oneliners.
Run with m parties to compute:
- m = sum_{i=0}^{m-1} 1 = sum(1 for i in range(m))
- m**2 = sum_{i=0}^{m-1} 2i+1 = sum(2*i+1 for i in range(m))
- 2**m = prod_{i=0}^{m-1} 2 = prod(2 for i in range(m))
- m! = prod_{i=0}^{m-1} i+1 = prod(i+1 for i in range(m))
Bit lengths of secure integers ensure each result fits for any m, 1<=m<=256.
"""
from mpyc.runtime import mpc
mpc.run(mpc.start())
print('m =', mpc.run(mpc.output(mpc.sum(mpc.input(mpc.SecInt(9)(1))))))
print('m**2 =', mpc.run(mpc.output(mpc.sum(mpc.input(mpc.SecInt(17)(2*mpc.pid+1))))))
print('2**m =', mpc.run(mpc.output(mpc.prod(mpc.input(mpc.SecInt(257)(2))))))
print('m! =', mpc.run(mpc.output(mpc.prod(mpc.input(mpc.SecInt(1685)(mpc.pid+1))))))
mpc.run(mpc.shutdown())
| StarcoderdataPython |
1744253 | <filename>historian/historian.py
#!/usr/bin/env python3
from inotify import constants
from inotify.adapters import Inotify
from pyln.client import Plugin
from sqlalchemy import create_engine
from sqlalchemy import desc
from sqlalchemy.orm import sessionmaker
from threading import Thread
from common import Base, ChannelAnnouncement, ChannelUpdate, NodeAnnouncement
import logging
import gossipd
import struct
import time
# Any message that is larger than this threshold will not be processed
# as it bloats the database.
MAX_MSG_SIZE = 1024
plugin = Plugin()
class FsMonitor(Thread):
def __init__(self):
pass
def run(self):
watch_mask = constants.IN_ALL_EVENTS
print("Starting FsMonitor")
i = Inotify()
i.add_watch('gossip_store', mask=watch_mask)
for event in i.event_gen(yield_nones=False):
(e, type_names, path, filename) = event
if e.mask & constants.IN_DELETE_SELF:
i.remove_watch('gossip_store')
i.add_watch('gossip_store', mask=watch_mask)
class FileTailer():
def __init__(self, filename):
self.filename = filename
self.pos = 1
self.version = None
def resume(self):
ev_count = 0
with open(self.filename, 'rb') as f:
self.version, = struct.unpack("!B", f.read(1))
f.seek(self.pos)
while True:
skip = False
diff = 8
hdr = f.read(8)
if len(hdr) < 8:
break
length, crc = struct.unpack("!II", hdr)
if self.version > 3:
f.read(4) # Throw away the CRC
diff += 4
# deleted = (length & 0x80000000 != 0)
# important = (length & 0x40000000 != 0)
length = length & (~0x80000000) & (~0x40000000)
msg = f.read(length)
# Incomplete write, will try again
if len(msg) < length:
logging.debug(
f"Partial read: {len(msg)}<{length}, waiting 1 second"
)
time.sleep(1)
f.seek(self.pos)
continue
diff += length
# Strip eventual wrappers:
typ, = struct.unpack("!H", msg[:2])
if self.version <= 3 and typ in [4096, 4097, 4098]:
msg = msg[4:]
self.pos += diff
if typ in [4101, 3503]:
f.seek(self.pos)
continue
if length > MAX_MSG_SIZE:
logging.warn(
f"Unreasonably large message type {typ} at position {self.pos} ({length} bytes), skipping"
)
continue
ev_count += 1
yield msg
logging.debug(
f"Reached end of {self.filename} at {self.pos} after {ev_count} "
"new messages, waiting for new fs event"
)
def wait_actionable(self, i):
for event in i.event_gen(yield_nones=False):
if event[0].mask & constants.IN_DELETE_SELF:
return 'swap'
if event[0].mask & constants.IN_MODIFY:
return 'append'
def tail(self):
watch_mask = (constants.IN_ALL_EVENTS ^ constants.IN_ACCESS ^
constants.IN_OPEN ^ constants.IN_CLOSE_NOWRITE)
i = Inotify()
i.add_watch(self.filename, mask=watch_mask)
while True:
# Consume as much as possible.
yield from self.resume()
# Now wait for a change that we can react to
ev = self.wait_actionable(i)
if ev == 'append':
continue
if ev == 'swap':
# Need to reach around since file-deletion removes C watches,
# but not the python one...
try:
i.remove_watch(self.filename)
except Exception:
pass
i.add_watch(self.filename, mask=watch_mask)
self.pos = 1
continue
class Flusher(Thread):
def __init__(self, engine):
Thread.__init__(self)
self.engine = engine
self.session_maker = sessionmaker(bind=engine)
self.session = None
def run(self):
logging.info("Starting flusher")
ft = FileTailer('gossip_store')
last_flush = time.time()
self.session = self.session_maker()
for i, e in enumerate(ft.tail()):
self.store(e)
if last_flush < time.time() - 10:
self.session.commit()
self.session = self.session_maker()
last_flush = time.time()
logging.warn("Filetailer exited...")
def store(self, raw: bytes) -> None:
try:
msg = gossipd.parse(raw)
cls = None
if isinstance(msg, gossipd.ChannelUpdate):
cls = ChannelUpdate
elif isinstance(msg, gossipd.ChannelAnnouncement):
cls = ChannelAnnouncement
elif isinstance(msg, gossipd.NodeAnnouncement):
cls = NodeAnnouncement
else:
return;
self.session.merge(cls.from_gossip(msg, raw))
except Exception as e:
logging.warn(f"Exception parsing gossip message: {e}")
@plugin.init()
def init(plugin, configuration, options):
print(options)
engine = create_engine(options['historian-dsn'], echo=False)
Base.metadata.create_all(engine)
plugin.engine = engine
Flusher(engine).start()
@plugin.method('historian-stats')
def stats(plugin):
engine = plugin.engine
session_maker = sessionmaker(bind=engine)
session = session_maker()
return {
'channel_announcements': session.query(ChannelAnnouncement).count(),
'channel_updates': session.query(ChannelUpdate).count(),
'node_announcements': session.query(NodeAnnouncement).count(),
'latest_node_announcement': session.query(NodeAnnouncement).order_by(desc(NodeAnnouncement.timestamp)).limit(1).first(),
'latest_channel_update': session.query(ChannelUpdate).order_by(desc(ChannelUpdate.timestamp)).limit(1).first(),
}
plugin.add_option(
'historian-dsn',
'sqlite:///historian.sqlite3',
"SQL DSN defining where the gossip data should be stored."
)
if __name__ == "__main__":
plugin.run()
| StarcoderdataPython |
40855 | # Generated by Django 3.0.4 on 2022-03-02 19:32
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('page_edits', '0014_delete_whatsappnumber'),
]
operations = [
migrations.DeleteModel(
name='HowWeWorkText',
),
]
| StarcoderdataPython |
114144 | <gh_stars>0
# This Python file uses the following encoding: utf-8
import sys
import os
import redis
import re
class IPFilter:
def __init__(self, logFile, IPFile):
self.logFile = logFile
self.IPFile = IPFile
self.r = redis.Redis(host='localhost', port=6379, db=0)
def checkIP(self):
f = open(self.IPFile, "r")
for line in f:
self.r.setnx(line, "")
f.close()
f = open(self.logFile, "r")
for line in f:
IPs = re.findall(r'[0-9]+(?:\.[0-9]+){3}', line)
for IP in IPs:
if self.r.exists(IP+"\x0A"):
print("warning: " + line)
break
f.close()
if __name__ == "__main__":
if len(sys.argv) < 3:
print("Not enough arguments passed")
sys.exit()
if os.path.isfile(sys.argv[1]) and os.path.isfile(sys.argv[2]):
filter = IPFilter(sys.argv[1], sys.argv[2])
filter.checkIP()
del filter
else:
print("Files don't exist")
| StarcoderdataPython |
105215 | <reponame>limchr/ALeFra
#!/usr/bin/env python
#
# Copyright (C) 2018
# <NAME>
# Centre of Excellence Cognitive Interaction Technology (CITEC)
# Bielefeld University
#
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions
# and the following disclaimer in the documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote
# products derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
# INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF
# THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.svm import SVC
import os
# import the main functionality of ALeFra
from ALeFra.active_classifier import active_classifier
# import some predefined querying strategies
from ALeFra.active_strategies import *
import matplotlib.pyplot as plt
from common.data_handler.load_outdoor import load_outdoor
from machine_learning_models.glvq import glvq
num_batches = 300
batch_size = 3
def load_toy_4(n,overlap):
"""creates a toy data set with 4 classes and a specific overlap"""
cl1_x = np.array([np.random.random(n), np.random.random(n)]).T
cl1_y = np.array([0] * n)
cl2_x = np.array([np.random.random(n)+(1-overlap), np.random.random(n)]).T
cl2_y = np.array([1] * n)
cl3_x = np.array([np.random.random(n), np.random.random(n)+(1-overlap)]).T
cl3_y = np.array([2] * n)
cl4_x = np.array([np.random.random(n)+(1-overlap), np.random.random(n)+(1-overlap)]).T
cl4_y = np.array([3] * n)
x = np.vstack((cl1_x, cl2_x,cl3_x,cl4_x))
y = np.hstack((cl1_y,cl2_y,cl3_y,cl4_y))
return (x,y)
# load data set
x, y = load_outdoor(split_approaches=False,return_images=False)
# doing a train/test split
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.2, random_state=42)
#train normal SVM and calc score on test set
a = SVC(kernel='linear')
a.fit(x_train,y_train)
print('normal svm score: ',a.score(x_test,y_test))
# export dir for saving plots
EXPORT_DIR = 'compare_queryings_outdoor_big'
# to do uncertainty sampling we need certainty information from the SVM (SVC scikit-learn classifier)
svm_kwargs = {'kernel':'linear', 'probability':True}
glvq_kwargs = {'max_prototypes_per_class': 9999999999, 'learning_rate': 50, 'strech_factor': 1}
cls = SVC
cls_kwargs = svm_kwargs
strats = [strategy_query_random,strategy_query_least_confident,strategy_query_by_committee,strategy_query_highest_entropy,strategy_query_best_vs_second_best]
# strats = [strategy_query_best_vs_second_best]
alefra_kwargs = {'classifier': cls, 'score_method':None, 'x': x_train, 'y': y_train, 'incremental_trainable': False, 'classifier_kwargs': cls_kwargs}
strat_names = [' '.join(s.__str__().split(' ')[1].split('_')[1:]) for s in strats]
active_cls = []
for i in range(len(strats)):
ac = active_classifier(**alefra_kwargs)
# set strategy for querying
ac.set_query_strategy(strats[i])
#set test set
ac.init_log(os.path.join(EXPORT_DIR,strat_names[i]),visualization_method=None,x_test = x_test,y_test = y_test,img_test = None)
active_cls.append(ac)
#active querying and testing/logging
for epoch in range(num_batches):
print(epoch)
for i in range(len(strats)):
active_cls[i].fit_active(batch_size) # query batch of size and train it
active_cls[i].evaluate() # evaluate test and train set
#active_cls_rand.visualize_by_embedding() # visualize training progress using a t-SNE embedding (saved in export dir)
for i in range(len(strats)):
# save a plot of scores while training
active_cls[i].save_scores_plot()
# alternatively a score plot can be calculated manually
#train_scores, train_scores_labeled, train_scores_unlabeled = active_cls.get_train_scores()
test_scores = [x.get_test_scores() for x in active_cls]
plt.clf()
plt.ion()
for i in range(len(active_cls)):
plt.plot(test_scores[i],label=strat_names[i])
plt.legend()
plt.xlabel('batch')
plt.ylabel('accuracy')
plt.ioff()
plt.show()
| StarcoderdataPython |
3232956 | #
# Copyright (c) 2020 Bitdefender
# SPDX-License-Identifier: Apache-2.0
#
import os, re, sys
def main():
out_file_path = sys.argv[1]
out_file_name = out_file_path.split('/')[-1].upper().split('.')[0]
f = open(out_file_path, 'wt')
f.write('#ifndef _%s_H_\n' % (out_file_name))
f.write('#define _%s_H_\n\n' % (out_file_name))
for arg in sys.argv[2:]:
in_file_path = arg.split()[0]
array_name = arg.split()[1]
data = open(in_file_path, 'rb').read()
f.write('unsigned char %s[] =\n' % (array_name))
f.write('{\n')
i = 0
for byte in data:
if i == 0:
f.write(' ')
f.write('0x%02x, ' % byte)
i += 1
if i == 16:
f.write('\n')
i = 0
if 0 != i:
f.write('\n};\n\n')
else:
f.write('};\n\n')
f.write('#endif //!_%s_H_\n' % (out_file_name))
f.close()
main()
| StarcoderdataPython |
3359195 | <filename>tests/classifiers/test_coding_dna_substitution.py
"""Module for testing Coding DNA Substitution Classifier."""
import unittest
from variation.classifiers import CodingDNASubstitutionClassifier
from .classifier_base import ClassifierBase
class TestCodingDNASubstitutionClassifier(ClassifierBase, unittest.TestCase):
"""A class to test the Coding DNA Substitution Classifier."""
def classifier_instance(self):
"""Return CodingDNASubstitutionClassifier instance."""
return CodingDNASubstitutionClassifier()
def fixture_name(self):
"""Return CodingDNASubstitutionClassifier fixture name."""
return 'coding_dna_substitution'
| StarcoderdataPython |
3316693 | <filename>tests/test_data_quality.py
# from dwetl import dw_etl
import datetime
import csv
import unittest
from dwetl import data_quality_utilities
'''
data_quality_utilities.py tests
'''
class TestDataQualityUtilities(unittest.TestCase):
#test if right exceptions are thrown when given bad data
#def test_is_numeric(self):
def test_is_valid_length(self):
#Test that matching len passes for stringified int
self.assertTrue(data_quality_utilities.is_valid_length(5634563, 7))
#test that too large len fails for stringified int
self.assertFalse(data_quality_utilities.is_valid_length(5634563, 8))
#Test that matching len passes
self.assertTrue(data_quality_utilities.is_valid_length("mustard", 7))
#Test that too small len fails
self.assertFalse(data_quality_utilities.is_valid_length("mustard", 2))
#Test that too large len fails
self.assertFalse(data_quality_utilities.is_valid_length("mustard", 20))
def test_is_less_than_eq_to_length(self):
#Test that equal for string works
self.assertTrue(data_quality_utilities.is_less_than_eq_to_length("mustard", 7))
#Test that "less than" for string works
self.assertTrue(data_quality_utilities.is_less_than_eq_to_length("must", 7))
#Test that equal for stringified len works
self.assertTrue(data_quality_utilities.is_less_than_eq_to_length(1234567, 7))
#Test that "less than" for stringified len works
self.assertTrue(data_quality_utilities.is_less_than_eq_to_length(2, 7))
#Test that non-matching len fails for string
self.assertFalse(data_quality_utilities.is_less_than_eq_to_length("mustard", 2))
#Test that non-matching len fails for stringified int
self.assertFalse(data_quality_utilities.is_less_than_eq_to_length(1234587, 2))
def test_no_missing_values(self):
# test None
self.assertFalse(data_quality_utilities.no_missing_values(None))
# test ''
self.assertFalse(data_quality_utilities.no_missing_values(''))
self.assertFalse(data_quality_utilities.no_missing_values(' '))
self.assertFalse(data_quality_utilities.no_missing_values(' '))
self.assertTrue(data_quality_utilities.no_missing_values('123'))
self.assertFalse(data_quality_utilities.no_missing_values('0'))
def test_trim(self):
#Test leading and trailing space trimmed
self.assertEqual(data_quality_utilities.trim(" mustard is cool "), "mustard is cool")
#Test that trailing space trimmed
self.assertEqual(data_quality_utilities.trim("BL-BL "), "BL-BL")
#Test that a not trimmed value is not returned
self.assertNotEqual(data_quality_utilities.trim(" I can Haz Cheezburgr"), " I can Haz Cheezburgr")
def test_is_valid_aleph_year(self):
#Test that year in range passes function
self.assertTrue(data_quality_utilities.is_valid_aleph_year(1999))
#Test that current year passes function
self.assertTrue(data_quality_utilities.is_valid_aleph_year(datetime.datetime.now().year))
#Test that year outside range fails
self.assertFalse(data_quality_utilities.is_valid_aleph_year(1899))
#Test that string fails year check
self.assertFalse(data_quality_utilities.is_valid_aleph_year("mustard"))
# def test_is_valid_aleph_date():
# def test_valid_aleph_date_redux(self):
# #Test that function run on stringified date for today returns True
# self.assertTrue(data_quality_utilities.is_valid_aleph_date_redux(datetime.datetime.now().strftime('%Y%m%d')))
#
# #Test that invalid dates fail
# self.assertFalse(data_quality_utilities.is_valid_aleph_date_redux("02180101"))
#
# # #Test int date
# # self.assertFalse(data_quality_utilities.is_valid_aleph_date_redux(20190101))
#
# #Test impossible date
# with self.assertRaises(ValueError):
# data_quality_utilities.is_valid_aleph_date_redux("20190132")
# # This fails but throws a value error, should we plan to handle? Unittest handles with assertRaises, should function act accordingly?
#
# #Test impossible date2
# with self.assertRaises(ValueError):
# data_quality_utilities.is_valid_aleph_date_redux("20190229")
# with self.assertRaises(TypeError):
# data_quality_utilities.is_valid_aleph_date_redux(20180101)
# self.assertFalse(data_quality_utilities.is_valid_aleph_date_redux((datetime.datetime.now() + datetime.timedelta(days=1)).strftime('%Y%m%d')))
def test_csv_to_dict(self):
#Test that output is dict
self.assertIsInstance(data_quality_utilities.create_dict_from_csv_redux("lookup_tables/call_no_type.csv"), dict)
#Test that output is not int
self.assertNotIsInstance(data_quality_utilities.create_dict_from_csv_redux("lookup_tables/call_no_type.csv"), int)
#Test that output is not list
self.assertNotIsInstance(data_quality_utilities.create_dict_from_csv_redux("lookup_tables/call_no_type.csv"), list)
#Test that data count in dict matches data count in csv
with open("lookup_tables/call_no_type.csv", "r") as file:
csv_reader_object = csv.reader(file)
row_count = sum(1 for row in csv_reader_object)
self.assertEqual(len(data_quality_utilities.create_dict_from_csv_redux("lookup_tables/call_no_type.csv")), row_count)
def test_is_valid_hour(self):
#Tests based on sample data from Z35_EVENT_HOUR
#Test that single character time stamp passes
self.assertTrue(data_quality_utilities.is_valid_hour(5))
#Test that 2-character time stamp passes
self.assertTrue(data_quality_utilities.is_valid_hour(15))
#Test that 3-character time stamp passes
self.assertTrue(data_quality_utilities.is_valid_hour(730))
#Test that 4-character time stamp passes
self.assertTrue(data_quality_utilities.is_valid_hour(2050))
#Test that out-of-range time stamp fails
self.assertFalse(data_quality_utilities.is_valid_hour(2515))
# def test_output_standard(self):
# b = TransformField('')
if __name__ == '__main__':
unittest.main()
| StarcoderdataPython |
3208738 | <gh_stars>0
import numpy as np
import json
import os.path
import matplotlib.pyplot as plt
from os.path import join as os_join
from os.path import sep as os_sep
from source.utilities import statistics_utilities as stats_utils
caarc_freqs = [0.231, 0.429, 0.536]
# VALUES COPIED WITH FULL MATRICIES CALCULATED
eigenmodes_target_2D_3_elems = {'y':[np.array([0. , 0.0040305 , 0.013317 , 0.02434817]),
np.array([ 0. , -0.01444802, -0.01037197, 0.02449357]),
np.array([ 0. , -0.01819986, 0.01604842, -0.02446053])],
'g':[np.array([ 0. , -0.0004894 , -0.00070823, -0.00074479]),
np.array([ 0. , 0.00095991, -0.00161083, -0.00260447]),
np.array([ 0. , -0.0009034 , -0.00068957, 0.00432069])]}
eigenmodes_target_2D_10_elems ={'y':[np.array([0.0, -0.000223647046255273, -0.0008516138734941783, -0.0018197756020471587, -0.0030651302400258222, -0.00452698257707041, -0.006148471228742031, -0.007878364112695452, -0.009673052432583382, -0.011498680245678633, -0.01333335614230312]),
np.array([ 0. , 0.00123514, 0.00401433, 0.00701557, 0.00911353, 0.00951618, 0.0078602 , 0.00422764, -0.00093387, -0.00698381, -0.01333421]),
np.array([ 0. , 0.00304246, 0.00806418, 0.01008837, 0.007016 , 0.00026276, -0.00632 , -0.00877015, -0.00526778, 0.00304816, 0.01334002])],
'g':[np.array([0.00000000e+00, 2.91028048e-05, 5.39127464e-05, 7.44741342e-05,9.08970510e-05, 1.03382795e-04, 1.12244221e-04, 1.17921246e-04,1.20991889e-04, 1.22179408e-04, 1.22356253e-04]),
np.array([ 0.00000000e+00, -1.49126242e-04, -2.06595585e-04, -1.80905410e-04,-8.99100753e-05, 4.02818206e-05, 1.79513859e-04, 2.99658399e-04,3.81148479e-04, 4.18652321e-04, 4.24986410e-04]),
np.array([ 0.00000000e+00, -3.34883542e-04, -2.77308592e-04, 3.15745412e-05,3.61060122e-04, 4.93762038e-04, 3.37172087e-04, -3.17237701e-05,-4.21134643e-04, -6.52640493e-04, -6.98021127e-04])]}
def evaluate_residual(a_cur, a_tar):
residual = np.linalg.norm(np.subtract(a_cur, a_tar)) / np.amax(np.absolute(a_tar))
# print ('current: ', a_cur)
# print ('target: ', a_tar)
# print('residual:', residual)
# print()
return residual
def cm2inch(value):
return value/2.54
def increasing_by(val_old, val_new):
'''
returns the increase in % from the origin = old
'''
increase = (val_new - val_old)/val_old * 100
return round(increase,2)
def check_and_flip_sign_dict(eigenmodes_dict):
'''
flips the sign of y and a deformation of modes to be positive at the first node after ground
dependend/coupled dofs are flip accordingly
'''
for idx in range(len(eigenmodes_dict['y'])):
if eigenmodes_dict['y'][idx][1] < 0:
eigenmodes_dict['y'][idx] = np.negative(eigenmodes_dict['y'][idx])
try:
eigenmodes_dict['g'][idx] = np.negative(eigenmodes_dict['g'][idx])
except KeyError:
pass
try:
if eigenmodes_dict['a'][idx][1] < 0:
eigenmodes_dict['a'][idx] = np.negative(eigenmodes_dict['a'][idx])
except KeyError:
pass
return eigenmodes_dict
def check_and_flip_sign_array(mode_shape_array):
'''
check_and_change_sign
change the sign of the mode shape such that the first entry is positive
'''
if mode_shape_array[1] < 0:
return mode_shape_array * -1
elif mode_shape_array [1] > 0:
return mode_shape_array
def analytic_function_static_disp(parameters, x, load_type = 'single'):
l = parameters['lx_total_beam']
EI = parameters['E_Modul'] * parameters['Iy']
magnitude = parameters['static_load_magnitude']
if load_type == 'single':
#print (' w_max soll:', magnitude*l**3/(3*EI))
return (magnitude/EI) * (0.5 * l * (x**2) - (x**3)/6)
elif load_type == 'continous':
#print (' w_max soll:', magnitude*l**4/(8*EI))
return -(magnitude/EI) * (-x**4/24 + l * x**3 /6 - l**2 * x**2 /4)
def analytic_eigenfrequencies(beam_model):
# von https://me-lrt.de/eigenfrequenzen-eigenformen-beim-balken
parameters = beam_model.parameters
l = parameters['lx_total_beam']
EI = parameters['E_Modul'] * parameters['Iy']
A = parameters['cross_section_area']
rho = parameters['material_density']
lambdas = [1.875, 4.694, 7.855]
f_j = np.zeros(3)
for i, l_i in enumerate(lambdas):
f_j[i] = np.sqrt((l_i**4 * EI)/(l**4 * rho *A)) / (2*np.pi)
return f_j
def analytic_eigenmode_shapes(beam_model):
# von https://me-lrt.de/eigenfrequenzen-eigenformen-beim-balken
#parameters = beam_model.parameters
l = beam_model.parameters['lx_total_beam']
x = beam_model.nodal_coordinates['x0']
lambdas = [1.875, 4.694, 7.855] # could also be computed as seen in the link
w = []
for j in range(3):
zeta = lambdas[j] * x / l
a = (np.sin(lambdas[j]) - np.sinh(lambdas[j])) / (np.cos(lambdas[j]) + np.cosh(lambdas[j]))
w_j = np.cos(zeta) - np.cosh(zeta) + a*(np.sin(zeta) -np.sinh(zeta))
w.append(w_j)
reduced_m = beam_model.m[::2,::2]
gen_mass = np.dot(np.dot(w_j, reduced_m), w_j)
norm_fac = np.sqrt(gen_mass)
w_j /= norm_fac
is_unity = np.dot(np.dot(w_j, reduced_m), w_j)
if round(is_unity, 4) != 1.0:
raise Exception ('analytic mode shape normalization failed')
return w
def get_eigenform_polyfit(modeshape_i, z_coords, evaluate_at, degree = 5, plot_compare = False):
'''
- modeshape_i: all modal deformations als 2D array, each column belongs to one dof
- z_coords: the original floor levels from the 45 floor model
- evaluate_at: nodal coordiantes at whcih the fitted curve should be evaluated
-> this is returned
'''
eigenmodes_fitted = {}
#CAARC_eigenmodes = self.structure_model.CAARC_eigenmodes
# returns the fitted polynomial and the discrete array of displacements
if not evaluate_at.any():
raise Exception('provied evaluation coordiantes of the eigenform')
else:
x = evaluate_at
eigenmodes_fitted['storey_level'] = np.copy(x)
eigenmodes_fitted['eigenmodes'] = {}
dof_direction_map = {'y':4, 'z':3,'a':2}
for dof_label in ['y', 'z', 'a']:
y = modeshape_i[:, dof_direction_map[dof_label]]
current_polynomial = np.poly1d(np.polyfit(z_coords,y,degree))
values = []
for x_i in x:# evaluate the fitted eigenmode at certain intervals
values.append(current_polynomial(x_i))
if values[0] != 0.0:
values[0] = 0.0
eigenmodes_fitted['eigenmodes'][dof_label] = np.asarray(values)
if plot_compare:
fig, ax = plt.subplots(ncols=3, num='fitted compared')
for d_i, dof_label in enumerate(['y', 'z', 'a']):
ax[d_i].plot(modeshape_i[:, dof_direction_map[dof_label]], z_coords, label = 'origin ' + dof_label)
ax[d_i].plot(eigenmodes_fitted['eigenmodes'][dof_label], x, label = 'fitted ' + dof_label)
ax[d_i].legend()
ax[d_i].grid()
plt.show()
return eigenmodes_fitted
def save_optimized_beam_parameters(opt_beam_model, fname):
new = 'optimized_parameters'+os_sep+fname+'.json'
if os.path.isfile(new):
print('WARNING', new, 'already exists!')
new = 'optimized_parameters'+os_sep+fname+'_1.json'
f = open(new,'w')
json.dump(opt_beam_model.parameters, f)
f.close()
print('\nsaved:', new)
def get_targets(beam_model, target='semi_realistic', opt_params =None):
'''
just used to have them especially for the initial comparison
'''
if target == 'realistic':
modi = np.load(os_join(*['inputs', 'EigenvectorsGid.npy']))
z_coords = np.load(os_join(*['inputs', 'z_coords_gid_45.npy']))
modi_fitted = get_eigenform_polyfit(modi[0], z_coords, beam_model.nodal_coordinates['x0'], plot_compare=False)
eigenmodes_target_y = modi_fitted['eigenmodes']['y']
eigenmodes_target_a = -1*modi_fitted['eigenmodes']['a']
elif target == 'semi_realistic':
ratio_a_y = opt_params['ratio_a_y_tar']
factor_y = opt_params['factor_y']
eigenmodes_target_y = beam_model.eigenmodes['y'][0]*factor_y
a_factor = ratio_a_y * max(eigenmodes_target_y)/max(beam_model.eigenmodes['a'][0])
eigenmodes_target_a = beam_model.eigenmodes['a'][0] * a_factor
return {'y':eigenmodes_target_y, 'a':eigenmodes_target_a}
def prepare_string_for_latex(string):
greek = {'ya':'y'+r'\alpha','ga':r'\gamma' + r'\alpha'}
if '_' in string:
var, label = string.split('_')[0], string.split('_')[1]
latex = r'${}$'.format(var) + r'$_{{{}}}$'.format(greek[label])
#return string.replace('_','')
return latex
else:
return string
def join_whitespaced_string(string):
return string.replace(' ','_')
# # DYNAMIC ANALYSIS
def get_fft(given_series, sampling_freq):
'''
The function get_fft estimates the Fast Fourier transform of the given signal
sampling_freq = 1/dt
'''
signal_length=len(given_series)
freq_half = np.arange(0,
sampling_freq/2 - sampling_freq/signal_length + sampling_freq/signal_length,
sampling_freq/signal_length)
# single sided fourier
series_fft = np.fft.fft(given_series)
series_fft = np.abs(series_fft[0:int(np.floor(signal_length/2))])/np.floor(signal_length/2)
max_length = len(freq_half)
if max_length < len(series_fft):
max_length = len(series_fft)
freq_half = freq_half[:max_length-1]
series_fft = series_fft[:max_length-1]
return freq_half, series_fft
def extreme_value_analysis_nist(given_series, dt, response_label = None, type_of_return = 'estimate', P1 = 0.98):
'''
dynamic_analysi_solved: dynamic_analysis.solver object
response: label given as dof_label, if given as response label it is convertedd
type_of_return: wheter the estimated or the quantile value of P1 is returned (both are computed)
'''
T_series = dt * len(given_series)
dur_ratio = 600 / T_series
# # MAXMINEST NIST
#P1 = 0.98
max_qnt, min_qnt, max_est, min_est, max_std, min_std, Nupcross = stats_utils.maxmin_qnt_est(given_series ,
cdf_p_max = P1 , cdf_p_min = 0.0001, cdf_qnt = P1, dur_ratio = dur_ratio)
abs_max_est = max([abs(max_est[0][0]), abs(min_est[0][0])])
abs_max_qnt = max([abs(max_qnt[0]), abs(min_qnt[0])])
if type_of_return == 'estimate':
extreme_response = abs_max_est
elif type_of_return == 'quantile':
extreme_response = abs_max_qnt
glob_max = max(abs(given_series))
return abs_max_qnt, abs_max_est
| StarcoderdataPython |
117935 | <gh_stars>0
# -*- coding: utf-8 -*-
from __future__ import unicode_literals, print_function
from django.contrib import messages
from django.contrib.auth.decorators import user_passes_test
from django.shortcuts import redirect, render
from django.urls import reverse
import logs.ops
from dataops import ops, pandas_db
from ontask.permissions import is_instructor
from workflow.ops import get_workflow
from .forms import SelectColumnUploadForm, SelectKeysForm
@user_passes_test(is_instructor)
def upload_s2(request):
"""
The four step process will populate the following dictionary with name
upload_data (divided by steps in which they are set
ASSUMES:
initial_column_names: List of column names in the initial file.
column_types: List of column types as detected by pandas
src_is_key_column: Boolean list with src columns that are unique
step_1: URL name of the first step
CREATES:
rename_column_names: Modified column names to remove ambiguity when
merging.
columns_to_upload: Boolean list denoting the columns in SRC that are
marked for upload.
:param request: Web request
:return: the dictionary upload_data in the session object
"""
workflow = get_workflow(request)
if not workflow:
return redirect('workflow:index')
# Get the dictionary to store information about the upload
# is stored in the session.
upload_data = request.session.get('upload_data', None)
if not upload_data:
# If there is no object, or it is an empty dict, it denotes a direct
# jump to this step, get back to the dataops page
return redirect('dataops:list')
# Get the column names, types, and those that are unique from the data frame
try:
initial_columns = upload_data.get('initial_column_names')
column_types = upload_data.get('column_types')
src_is_key_column = upload_data.get('src_is_key_column')
except KeyError:
# The page has been invoked out of order
return redirect(upload_data.get('step_1', 'dataops:list'))
# Get or create the list with the renamed column names
rename_column_names = upload_data.get('rename_column_names', None)
if rename_column_names is None:
rename_column_names = initial_columns[:]
upload_data['rename_column_names'] = rename_column_names
# Get or create list of booleans identifying columns to be uploaded
columns_to_upload = upload_data.get('columns_to_upload', None)
if columns_to_upload is None:
columns_to_upload = [False] * len(initial_columns)
upload_data['columns_to_upload'] = columns_to_upload
# Bind the form with the received data (remember unique columns)
form = SelectColumnUploadForm(
request.POST or None,
column_names=rename_column_names,
is_key=src_is_key_column
)
load_fields = [f for f in form if f.name.startswith('upload_')]
newname_fields = [f for f in form if f.name.startswith('new_name_')]
# Create one of the context elements for the form. Pack the lists so that
# they can be iterated in the template
df_info = [list(i) for i in zip(load_fields,
initial_columns,
newname_fields,
column_types,
src_is_key_column)]
# Process the initial loading of the form and return
if request.method != 'POST':
# Update the dictionary with the session information
request.session['upload_data'] = upload_data
context = {'form': form,
'df_info': df_info,
'prev_step': reverse(upload_data['step_1']),
'wid': workflow.id}
if not ops.workflow_id_has_table(workflow.id):
# It is an upload, not a merge, set the next step to finish
context['next_name'] = 'Finish'
return render(request, 'dataops/upload_s2.html', context)
# At this point we are processing a POST request
# If the form is not valid, re-load
if not form.is_valid():
context = {'form': form,
'wid': workflow.id,
'prev_step': reverse(upload_data['step_1']),
'df_info': df_info}
if not ops.workflow_id_has_table(workflow.id):
# If it is an upload, not a merge, set next step to finish
context['next_name'] = 'Finish'
return render(request, 'dataops/upload_s2.html', context)
# Form is valid
# We need to modify upload_data with the information received in the post
for i in range(len(initial_columns)):
new_name = form.cleaned_data['new_name_%s' % i]
upload_data['rename_column_names'][i] = new_name
upload = form.cleaned_data['upload_%s' % i]
upload_data['columns_to_upload'][i] = upload
# Update the dictionary with the session information
request.session['upload_data'] = upload_data
# Load the existing DF or None if it doesn't exist
existing_df = pandas_db.load_from_db(workflow.id)
if existing_df is not None:
# This is a merge operation, so move to Step 3
return redirect('dataops:upload_s3')
# This is an upload operation (not a merge) save the uploaded dataframe in
# the DB and finish.
# Get the uploaded data_frame
try:
data_frame = ops.load_upload_from_db(workflow.id)
except Exception:
return render(
request,
'error.html',
{'message': 'Exception while retrieving the data frame'})
# Update the data frame
status = ops.perform_dataframe_upload_merge(workflow.id,
existing_df,
data_frame,
upload_data)
if status:
# Something went wrong. Flag it and reload
context = {'form': form,
'wid': workflow.id,
'prev_step': reverse(upload_data['step_1']),
'df_info': df_info}
return render(request, 'dataops/upload_s2.html', context)
# Nuke the temporary table
pandas_db.delete_upload_table(workflow.id)
# Log the event
col_info = workflow.get_column_info()
logs.ops.put(request.user,
'workflow_data_upload',
workflow,
{'id': workflow.id,
'name': workflow.name,
'num_rows': workflow.nrows,
'num_cols': workflow.ncols,
'column_names': col_info[0],
'column_types': col_info[1],
'column_unique': col_info[2]})
# Go back to show the detail of the data frame
return redirect('dataops:list')
@user_passes_test(is_instructor)
def upload_s3(request):
"""
Step 3: This is already a merge operation (not an upload)
The columns to merge have been selected and renamed. The data frame to
merge is called src.
In this step the user selects the unique keys to perform the merge,
the join method, and what to do with the columns that overlap (rename or
override)
ASSUMES:
initial_column_names: List of column names in the initial file.
column_types: List of column types as detected by pandas
src_is_key_column: Boolean list with src columns that are unique
step_1: URL name of the first step
rename_column_names: Modified column names to remove ambiguity when
merging.
columns_to_upload: Boolean list denoting the columns in SRC that are
marked for upload.
CREATES:
dst_column_names: List of column names in destination frame
dst_is_unique_column: Boolean list with dst columns that are unique
dst_unique_col_names: List with the column names that are unique
dst_selected_key: Key column name selected in DST
src_selected_key: Key column name selected in SRC
how_merge: How to merge. One of {left, right, outter, inner}
how_dup_columns: How to handle column overlap
autorename_column_names: Automatically modified column names
:param request: Web request
:return: the dictionary upload_data in the session object
"""
# Get the workflow id we are processing
workflow = get_workflow(request)
if not workflow:
return redirect('workflow:index')
# Get the dictionary to store information about the upload
# is stored in the session.
upload_data = request.session.get('upload_data', None)
if not upload_data:
# If there is no object, someone is trying to jump directly here.
return redirect('dataops:list')
# Get column names in dst_df
dst_column_names = upload_data.get('dst_column_names', None)
if not dst_column_names:
dst_column_names = workflow.get_column_names()
upload_data['dst_column_names'] = dst_column_names
# Array of booleans saying which columns are unique in the dst DF.
dst_is_unique_column = upload_data.get('dst_is_unique_column')
if dst_is_unique_column is None:
dst_is_unique_column = workflow.get_column_unique()
upload_data['dst_is_unique_column'] = dst_is_unique_column
# Array of unique col names in DST
dst_unique_col_names = upload_data.get('dst_unique_col_names', None)
if dst_unique_col_names is None:
dst_unique_col_names = [v for x, v in enumerate(dst_column_names)
if dst_is_unique_column[x]]
upload_data['dst_unique_col_names'] = dst_unique_col_names
# Get the column names of the unique columns to upload in the DF to
# merge (source)
columns_to_upload = upload_data['columns_to_upload']
src_column_names = upload_data['rename_column_names']
src_is_key_column = upload_data['src_is_key_column']
src_unique_col_names = [v for x, v in enumerate(src_column_names)
if src_is_key_column[x] and columns_to_upload[x]]
# Calculate the names of columns that overlap between the two data
# frames. It is the intersection of the column names that are not key in
# the existing data frame and those in the source DF that are selected,
# renamed and not unique
rename_column_names = upload_data['rename_column_names']
are_overlap_cols = ( # DST Column names that are not Keys
(set(dst_column_names) - set(
dst_is_unique_column)) &
# SRC Column names that are renamed, selected and not
# unique
set([x for x, y, z in zip(rename_column_names,
columns_to_upload,
src_is_key_column)
if y and not z])) != set([])
# Bind the form with the received data (remember unique columns and
# preselected keys.)'
form = SelectKeysForm(
request.POST or None,
dst_keys=dst_unique_col_names,
src_keys=src_unique_col_names,
src_selected_key=upload_data.get('src_selected_key', None),
dst_selected_key=upload_data.get('dst_selected_key', None),
how_merge=upload_data.get('how_merge', None),
how_dup_columns=upload_data.get('how_dup_columns', None),
are_overlap_cols=are_overlap_cols,
)
# Process the initial loading of the form
if request.method != 'POST':
# Update the dictionary with the session information
request.session['upload_data'] = upload_data
return render(request, 'dataops/upload_s3.html',
{'form': form,
'prev_step': reverse('dataops:upload_s2'),
'wid': workflow.id})
# We are processing a post request with the information given by the user
# If the form is not valid, re-visit (nothing is checked so far...)
if not form.is_valid():
return render(request, 'dataops/upload_s3.html',
{'form': form,
'prev_step': reverse('dataops:upload_s3')})
# Get the keys and merge method and store them in the session dict
upload_data['dst_selected_key'] = form.cleaned_data['dst_key']
upload_data['src_selected_key'] = form.cleaned_data['src_key']
upload_data['how_merge'] = form.cleaned_data['how_merge']
upload_data['how_dup_columns'] = \
form.cleaned_data.get('how_dup_columns', None)
# Check if there are overlapping columns and if rename was selected as
# the method to deal with them. If so, create a list with the new
# names (adding a numeric suffix)
how_dup_columns = form.cleaned_data.get('how_dup_columns', None)
autorename_column_names = []
if are_overlap_cols and how_dup_columns == 'rename':
# Columns must be renamed!
# Initially the new list is identical to the previous one
autorename_column_names = rename_column_names[:]
for idx, col in enumerate(rename_column_names):
# Skip the selected keys
if col == upload_data['src_selected_key']:
continue
# If the column name is not in dst, no need to rename
if col not in dst_column_names:
continue
# Column with a name that collides with one in the DST
i = 0 # Suffix to rename
while True:
i += 1
new_name = col + '_{0}'.format(i)
if new_name not in rename_column_names and \
new_name not in dst_column_names:
break
# Record the new created name in the resulting list
autorename_column_names[idx] = new_name
# Remember the autorename list for the next step
upload_data['autorename_column_names'] = autorename_column_names
# Update session object
request.session['upload_data'] = upload_data
return redirect('dataops:upload_s4')
@user_passes_test(is_instructor)
def upload_s4(request):
"""
Step 4: Show the user the expected effect of the merge and perform it.
ASSUMES:
initial_column_names: List of column names in the initial file.
column_types: List of column types as detected by pandas
src_is_key_column: Boolean list with src columns that are unique
step_1: URL name of the first step
rename_column_names: Modified column names to remove ambiguity when
merging.
columns_to_upload: Boolean list denoting the columns in SRC that are
marked for upload.
dst_column_names: List of column names in destination frame
dst_is_unique_column: Boolean list with dst columns that are unique
dst_unique_col_names: List with the column names that are unique
dst_selected_key: Key column name selected in DST
src_selected_key: Key column name selected in SRC
how_merge: How to merge. One of {left, right, outter, inner}
how_dup_columns: How to handle column overlap
autorename_column_names: Automatically modified column names
override_columns_names: Names of dst columns that will be overridden in
merge
:param request: Web request
:return:
"""
# Get the workflow id we are processing
workflow = get_workflow(request)
if not workflow:
return redirect('workflow:index')
# Get the dictionary containing the information about the upload
upload_data = request.session.get('upload_data', None)
if not upload_data:
# If there is no object, someone is trying to jump directly here.
return redirect('dataops:list')
# Check the type of request that is being processed
if request.method == 'POST':
# We are processing a POST request
# Get the dataframes to merge
try:
dst_df = pandas_db.load_from_db(workflow.id)
src_df = ops.load_upload_from_db(workflow.id)
except Exception:
return render(request,
'error.html',
{'message': 'Exception while loading data frame'})
# Performing the merge
status = ops.perform_dataframe_upload_merge(workflow.id,
dst_df,
src_df,
upload_data)
# Nuke the temporary table
pandas_db.delete_upload_table(workflow.id)
col_info = workflow.get_column_info()
if status:
logs.ops.put(request.user,
'workflow_data_failedmerge',
workflow,
{'id': workflow.id,
'name': workflow.name,
'num_rows': workflow.nrows,
'num_cols': workflow.ncols,
'column_names': col_info[0],
'column_types': col_info[1],
'column_unique': col_info[2],
'error_msg': status})
messages.error(request,
'Merge operation failed. (' + status + ')'),
return redirect(reverse('dataops:list'))
# Log the event
logs.ops.put(request.user,
'workflow_data_merge',
workflow,
{'id': workflow.id,
'name': workflow.name,
'num_rows': workflow.nrows,
'num_cols': workflow.ncols,
'column_names': col_info[0],
'column_types': col_info[1],
'column_unique': col_info[2]})
# Remove the csvupload from the session object
request.session.pop('upload_data', None)
return redirect('dataops:list')
# We are processing a GET request
# Create the information to include in the final report table
dst_column_names = upload_data['dst_column_names']
src_selected_key = upload_data['src_selected_key']
# Triplets to show in the page (dst column, Boolean saying there is some
# change, and the message on the src column
autorename_column_names = upload_data['autorename_column_names']
rename_column_names = upload_data['rename_column_names']
info = []
initial_column_names = upload_data['initial_column_names']
# Create the strings to show in the table for each of the rows explaining
# what is going to be the effect of the merge operation over them.
override_columns_names = set([])
for idx, (x, y, z) in enumerate(zip(initial_column_names,
rename_column_names,
upload_data['columns_to_upload'])):
# There are several possible cases
#
# 1) The unique key. No message needed because it is displayed at
# the top of the rows
# 2) The column has not been selected. Simply show (Ignored) in the
# right.
# 3) Column is selected and is NEW
# 4) Column is selected and was renamed by the user
# 5) Column is selected and was automatically renamed by the tool
# when requesting to preserve the overlapping columns
# CASE 1: If it is a key (compare the rename value in case user tried
# to rename it.
if y == src_selected_key:
continue
# CASE 2: Column not selected, thus simply print "Ignored")
if not z:
info.append(('', False, x + ' (Ignored)'))
continue
# Calculate the final name after the renaming
final_name = x
suffix = ''
# Logic to figure out the final name after renaming
if y != x:
# If the corresponding name in rename_column_names is different,
# change
final_name = y
# To add to the column
suffix = ', Renamed'
# If autorename table exists, and the new name is different,
# rename again
if autorename_column_names and autorename_column_names[idx] != y:
final_name = \
autorename_column_names[idx]
suffix = ', Automatically renamed'
else:
# Check if there was autorename
if autorename_column_names and \
autorename_column_names[idx] != x:
final_name = \
autorename_column_names[idx]
suffix = ', Automatically renamed'
if final_name in dst_column_names:
suffix = ' (Override' + suffix + ')'
override_columns_names.add(final_name)
else:
suffix = ' (New' + suffix + ')'
info.append((final_name + suffix, True, x))
# Store the value in the request object and update
upload_data['override_columns_names'] = list(override_columns_names)
request.session['upload_data'] = upload_data
return render(request, 'dataops/upload_s4.html',
{'prev_step': reverse('dataops:upload_s3'),
'info': info,
'next_name': 'Finish'})
| StarcoderdataPython |
3348586 | <gh_stars>0
from typing import Optional, List
from .base import Command
from ..store import Store
class Generate(Command):
def __init__(self, workspace: str, store: Store, source: dict):
super().__init__(workspace)
self.store = store
self.source = source
def execute(self):
data = self.read_data()
if data is None:
data = {}
store = self.store.read()
self.merge_data(data, store)
self.write_data(data)
def merge_data(self, data: dict, store: dict):
data["source"] = {
**data.get("source", {}),
**self.source,
}
self._sort_store(store)
existing_stores = data.get("stores", [])
existing_store = self._find_by_name(existing_stores, store)
if not existing_store:
data["stores"] = existing_stores + [store]
return
# Leave unrelated stores, copy keys from new store to existing
self._copy_keys(
source=store,
target=existing_store,
exclude=("tables", "type"),
)
tables = store.get("tables", [])
for table in tables:
existing_table = self._find_by_name(existing_store.get("tables", []), table)
if existing_table:
self._copy_keys(
source=existing_table,
target=table,
exclude=("fields", "schema"),
)
for field in table.get("fields", []):
existing_field = self._find_by_name(
existing_table.get("fields", []), field
)
if existing_field:
self._copy_keys(
source=existing_field,
target=field,
exclude=("data_type", "nullable", "primary_key", "default"),
)
existing_store["tables"] = tables
@staticmethod
def _sort_store(store: dict):
tables = store.get("tables", [])
for table in tables:
table.get("fields", []).sort(key=lambda x: x.get(("ord",)))
tables.sort(key=lambda x: x["name"])
@staticmethod
def _find_by_name(items: List[dict], matching: dict, name="name") -> Optional[dict]:
for item in items:
if item[name] == matching[name]:
return item
return None
@staticmethod
def _copy_keys(source: dict, target: dict, exclude=()):
for key, val in source.items():
if key not in exclude:
target[key] = val
| StarcoderdataPython |
1741286 | from http.server import HTTPServer, BaseHTTPRequestHandler
import json
import sys
import datetime
DEFAULT_PAGE_FILENAME = 'index.html'
SERVER_PORT = 8080
class HTTPHoneypot(BaseHTTPRequestHandler):
def __init__(self, *args):
with open(DEFAULT_PAGE_FILENAME, 'rb') as fi:
self.default_page = fi.read()
super(HTTPHoneypot, self).__init__(*args)
def log_message(self, msg_format, *args):
return
@staticmethod
def publisher(data):
print(json.dumps(data))
sys.stdout.flush()
def prepare_response_headers(self):
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.end_headers()
def process_request(self, publisher, request_type):
result = dict()
result['request_type'] = request_type
result['headers'] = self.headers.as_string()
result['path'] = self.path
result['client_ip'] = self.client_address[0]
result['ts'] = datetime.datetime.utcnow().strftime('%Y-%m-%d %H:%M:%S')
if request_type == 'POST':
content_len = int(self.headers.get('Content-Length'))
post_body = self.rfile.read(content_len)
result['post_body'] = post_body.decode('latin-1')
publisher(result)
# noinspection PyPep8Naming
def do_HEAD(self):
self.process_request(self.publisher, 'HEAD')
self.prepare_response_headers()
# noinspection PyPep8Naming
def do_GET(self):
self.process_request(self.publisher, 'GET')
self.prepare_response_headers()
self.wfile.write(self.default_page)
# noinspection PyPep8Naming
def do_POST(self):
self.process_request(self.publisher, 'POST')
self.prepare_response_headers()
self.wfile.write(self.default_page)
def main():
print('Starting the server at port %s' % SERVER_PORT)
sys.stdout.flush()
httpd = HTTPServer(('', SERVER_PORT), HTTPHoneypot)
httpd.serve_forever()
if __name__ == '__main__':
main()
| StarcoderdataPython |
3228674 | import unittest
import networkx as nx
import pandas as pd
import goenrich
from goenrich.enrich import propagate
class TestPropagationExample(unittest.TestCase):
def test_correctness_on_small_example(self):
r""" Example graph
r
/ \
c1 c2
\ / \
\ / \
c3 c4
"""
O = nx.DiGraph([('c4', 'c2'), ('c3', 'c1'), ('c3', 'c2'),
('c1', 'r'), ('c2', 'r')])
r = set([6])
c1 = set([])
c2 = set([4,5])
c3 = set([1,2,3])
c4 = set([0])
x = { 'r' : r, 'c1' : c1, 'c2' : c2, 'c3' : c3, 'c4' : c4 }
b = 'background'
propagate(O, x, b)
self.assertSetEqual(O.nodes['c3'][b], c3)
self.assertSetEqual(O.nodes['c2'][b], c4 | c3 | c2)
self.assertSetEqual(O.nodes['c1'][b], c3 | c1)
self.assertSetEqual(O.nodes['r'][b], c4 | c3 | c2 | c1 | r)
class TestPropagationReal(unittest.TestCase):
def __init__(self, *args, **kwargs):
super(TestPropagationReal, self).__init__(*args, **kwargs)
self.gene2go = goenrich.read.gene2go('db/gene2go.gz')
self.O = goenrich.obo.ontology('db/go-basic.obo')
def test_on_gene2go_head(self):
test = self.gene2go.head(100)
values = {k: set(v) for k,v in test.groupby('GO_ID')['GeneID']}
propagate(self.O, values, 'head')
def test_if_runs_trough_on_real_data(self):
values = {k: set(v) for k,v in self.gene2go.groupby('GO_ID')['GeneID']}
propagate(self.O, values, 'real')
if __name__ == '__main__':
unittest.main()
| StarcoderdataPython |
1700518 | import torch
import torch.nn as nn
def get_network_for_size(size):
"""
Size is expected to be [channel, dim, dim]
"""
size = list(size) # In case the input is a tuple
if size[-2:] == [7, 7]:
net = ConvNet7x7
elif size[-2:] == [28, 28]:
net = ConvNet28x28
elif size[-2:] == [84, 84]:
net = ConvNet84x84
elif size[-2:] == [64, 64]:
# just use 84x84, it should compute output dim
net = ConvNet84x84
else:
raise AttributeError("Unexpected input size")
return net(size)
class ModelUtils(object):
"""
Allows for images larger than their stated minimums, and will auto-compute the output size accordingly
"""
@classmethod
def compute_output_size(cls, net, observation_size):
dummy_input = torch.zeros(observation_size).unsqueeze(0) # Observation size doesn't include batch, so add it
dummy_output = net(dummy_input).squeeze(0) # Remove batch
output_size = dummy_output.shape[0]
return output_size
class CommonConv(nn.Module):
def __init__(self, conv_net, post_flatten, output_size):
super().__init__()
self._conv_net = conv_net
self._post_flatten = post_flatten
self.output_size = output_size
def forward(self, x):
x = self._conv_net(x.float())
x = self._post_flatten(x)
return x
class ConvNet84x84(CommonConv):
def __init__(self, observation_shape):
# This is the same as used in AtariNet in Impala (torchbeast implementation)
output_size = 512
conv_net = nn.Sequential(
nn.Conv2d(in_channels=observation_shape[0], out_channels=32, kernel_size=8, stride=4),
nn.ReLU(),
nn.Conv2d(in_channels=32, out_channels=64, kernel_size=4, stride=2),
nn.ReLU(),
nn.Conv2d(in_channels=64, out_channels=64, kernel_size=3, stride=1),
nn.ReLU(),
nn.Flatten())
intermediate_dim = ModelUtils.compute_output_size(conv_net, observation_shape)
post_flatten = nn.Linear(intermediate_dim, output_size)
super().__init__(conv_net, post_flatten, output_size)
class ConvNet28x28(CommonConv):
def __init__(self, observation_shape):
output_size = 32
conv_net = nn.Sequential(
nn.Conv2d(observation_shape[0], 24, kernel_size=5),
nn.MaxPool2d(kernel_size=2),
nn.ReLU(), # TODO: this is new... (check)
nn.Conv2d(24, 48, kernel_size=5),
nn.MaxPool2d(kernel_size=2),
nn.ReLU(),
nn.Flatten(),
)
intermediate_dim = ModelUtils.compute_output_size(conv_net, observation_shape)
post_flatten = nn.Linear(intermediate_dim, output_size)
super().__init__(conv_net, post_flatten, output_size)
class ConvNet7x7(CommonConv):
def __init__(self, observation_shape):
# From: https://github.com/lcswillems/rl-starter-files/blob/master/model.py, modified by increasing each
# latent size (2x)
output_size = 64
conv_net = nn.Sequential(
nn.Conv2d(observation_shape[0], 32, kernel_size=2),
nn.ReLU(),
nn.MaxPool2d(kernel_size=2),
nn.Conv2d(32, 64, kernel_size=2),
nn.ReLU(),
nn.Conv2d(64, 128, kernel_size=2),
nn.ReLU(),
nn.Flatten()
)
intermediate_dim = ModelUtils.compute_output_size(conv_net, observation_shape)
post_flatten = nn.Linear(intermediate_dim, output_size)
super().__init__(conv_net, post_flatten, output_size)
| StarcoderdataPython |
1723620 | <gh_stars>1-10
#!/usr/bin/python
import docker
import sys
import os
imagename = sys.argv[1]
childname = sys.argv[2]
port = sys.argv[3]
msg = sys.argv[4]
own_dir = os.path.split(__file__)[0]
print "booting image %s with name %s listen to %s/%s" % (imagename, childname, port, msg)
container = docker.from_env().containers.run(
imagename,
["python","/test/socket_listener.py", port, msg],
volumes={own_dir:"/test"},
name=childname,
detach=True
)
print "child created container ", container
res = container.wait()
print "Got result", res, container.logs(stdout=True,stderr=True)
if res:
print "FAILED with code ", res, ": ", container.logs(stdout=True,stderr=True)
sys.stdout.flush()
sys.stderr.flush()
sys.exit(res)
| StarcoderdataPython |
1671743 | <reponame>xiayzh/MH-MDGM
import argparse
import os
import numpy as np
import itertools
from torch.utils.data import DataLoader
from torch.optim.lr_scheduler import ReduceLROnPlateau, StepLR
import torch.nn as nn
import torch.nn.functional as F
import torch
import h5py
from load_data import load_data_1scale
from plot import plot_generation,plot_reconstruction
from CONV_VAE_model import Encoder, Decoder
parser = argparse.ArgumentParser()
parser.add_argument('--exp',type = str ,default = 'Channel_64',help = 'dataset')
parser.add_argument("--n-epochs", type=int, default=50, help="number of epochs of training")
parser.add_argument('--n-train', type=int, default=40000, help='number of training data')
parser.add_argument('--n-test', type=int, default=200, help='number of test data')
parser.add_argument("--batch-size", type=int, default=64, help="size of the batches")
parser.add_argument("--lr", type=float, default=0.0002, help="adam: learning rate")
parser.add_argument("--beta1", type=float, default=0.5, help="adam: decay of first order momentum of gradient")
parser.add_argument("--beta2", type=float, default=0.999, help="adam: decay of first order momentum of gradient")
parser.add_argument("--sample-interval", type=int, default=1, help="interval between image sampling")
parser.add_argument("--beta_vae", type=float, default=0.5, help="beta hyperparameter")
args = parser.parse_args()
dir = os.getcwd()
directory = f'/Channel/experiments/experiments_64/latent256/beta_{args.beta_vae}'
exp_dir = dir + directory + "/N{}_Bts{}_Eps{}_lr{}".\
format(args.n_train, args.batch_size, args.n_epochs, args.lr)
output_dir = exp_dir + "/predictions"
model_dir = exp_dir
if not os.path.exists(output_dir):
os.makedirs(output_dir)
if not os.path.exists(model_dir):
os.makedirs(model_dir)
device = torch.device("cuda:1" if torch.cuda.is_available() else "cpu")
encoder = Encoder()
decoder = Decoder()
encoder.to(device)
decoder.to(device)
print("number of parameters: {}".format(encoder._n_parameters()+decoder._n_parameters()))
train_hdf5_file = os.getcwd() + \
f'/Channel/data/training_set_64.hdf5'
test_hdf5_file = os.getcwd() + \
f'/Channel/data/test_set_64.hdf5'
train_loader = load_data_1scale(train_hdf5_file, args.n_train, args.batch_size,singlescale=True)
with h5py.File(test_hdf5_file, 'r') as f:
x_test = f['test'][()]
x_test =x_test
optimizer= torch.optim.Adam(
itertools.chain(encoder.parameters(), decoder.parameters()), lr=args.lr, betas=(args.beta1, args.beta2))
def test(epoch,x_test):
encoder.eval()
decoder.eval()
z = torch.randn(9, 1, 16 ,16).to(device)
gen_imgs = decoder(z)
samples = np.squeeze(gen_imgs.data.cpu().numpy())
plot_generation(samples,epoch,output_dir)
real_imgs = x_test[[10,30,50,100]]
real_imgs = (torch.FloatTensor(real_imgs)).to(device)
encoded_imgs,_,_ = encoder(real_imgs)
decoded_imgs = decoder(encoded_imgs)
samples_gen1 = np.squeeze(decoded_imgs.data.cpu().numpy())
samples_real1 = np.squeeze(real_imgs.data.cpu().numpy())
samples = np.vstack((samples_real1,samples_gen1))
plot_reconstruction(samples,epoch,output_dir)
def loss_function(recon_x, x, mu, logvar):
# see Appendix B from VAE paper:
# <NAME>. Auto-Encoding Variational Bayes. ICLR, 2014
# https://arxiv.org/abs/1312.6114
BCE = F.mse_loss(recon_x.view(-1,4096), x.view(-1,4096), size_average=False)
mu=mu.reshape(-1,256)
logvar=logvar.reshape(-1,256)
KLD = torch.sum(-0.5 * torch.sum(1 + logvar - mu.pow(2) - logvar.exp(), dim = 1), dim = 0)
return BCE + args.beta_vae*KLD, BCE , KLD
# ----------#
# Training #
# ----------#
for epoch in range(1,args.n_epochs+1):
encoder.train()
decoder.train()
train_loss = 0
for batch_idx, (data, ) in enumerate(train_loader):
data = data.to(device)
optimizer.zero_grad()
z, mu, logvar = encoder(data)
recon_batch = decoder(z)
loss,rec_loss, kl_loss= loss_function(recon_batch, data, mu, logvar)
loss.backward()
train_loss += loss.item()
optimizer.step()
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f} recon_loss:{:.6f} kl_loss:{:.6f}'.format(
epoch, batch_idx * len(data), len(train_loader.dataset),
100. * batch_idx / len(train_loader),
loss.item() / len(data),rec_loss.item() / len(data),kl_loss.item() / len(data)))
batches_done = epoch * len(train_loader) + batch_idx
if (epoch) % args.sample_interval == 0:
test(epoch,x_test)
torch.save(decoder.state_dict(), model_dir + f'/decoder_64_VAE_{args.beta_vae}_epoch{epoch}.pth')
torch.save(encoder.state_dict(), model_dir + f'/encoder_64_VAE_{args.beta_vae}_epoch{epoch}.pth')
| StarcoderdataPython |
1622157 | <filename>tools/barcode_tools/helper_functions.py
#!/usr/bin/env python
# Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
#
# Use of this source code is governed by a BSD-style license
# that can be found in the LICENSE file in the root of the source
# tree. An additional intellectual property rights grant can be found
# in the file PATENTS. All contributing project authors may
# be found in the AUTHORS file in the root of the source tree.
import multiprocessing
import os
import subprocess
import sys
_DEFAULT_PADDING = 4
class HelperError(Exception):
"""Exception raised for errors in the helper."""
pass
def zero_pad(number, padding=_DEFAULT_PADDING):
"""Converts an int into a zero padded string.
Args:
number(int): The number to convert.
padding(int): The number of chars in the output. Note that if you pass for
example number=23456 and padding=4, the output will still be '23456',
i.e. it will not be cropped. If you pass number=2 and padding=4, the
return value will be '0002'.
Return:
(string): The zero padded number converted to string.
"""
return str(number).zfill(padding)
def run_shell_command(cmd_list, fail_msg=None):
"""Executes a command.
Args:
cmd_list(list): Command list to execute.
fail_msg(string): Message describing the error in case the command fails.
Return:
(string): The standard output from running the command.
Raise:
HelperError: If command fails.
"""
process = subprocess.Popen(cmd_list, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
output, error = process.communicate()
if process.returncode != 0:
if fail_msg:
print >> sys.stderr, fail_msg
raise HelperError('Failed to run %s: command returned %d and printed '
'%s and %s' % (' '.join(cmd_list), process.returncode,
output, error))
return output.strip()
def perform_action_on_all_files(directory, file_pattern, file_extension,
start_number, action, **kwargs):
"""Function that performs a given action on all files matching a pattern.
It is assumed that the files are named file_patternxxxx.file_extension, where
xxxx are digits starting from start_number.
Args:
directory(string): The directory where the files live.
file_pattern(string): The name pattern of the files.
file_extension(string): The files' extension.
start_number(int): From where to start to count frames.
action(function): The action to be performed over the files. Must return
False if the action failed, True otherwise. It should take a file name
as the first argument and **kwargs as arguments. The function must be
possible to pickle, so it cannot be a bound function (for instance).
Return:
(bool): Whether performing the action over all files was successful or not.
"""
file_prefix = os.path.join(directory, file_pattern)
file_number = start_number
process_pool = multiprocessing.Pool(processes=multiprocessing.cpu_count())
results = []
while True:
zero_padded_file_number = zero_pad(file_number)
file_name = file_prefix + zero_padded_file_number + '.' + file_extension
if not os.path.isfile(file_name):
break
future = process_pool.apply_async(action, args=(file_name,), kwds=kwargs)
results.append(future)
file_number += 1
successful = True
for result in results:
if not result.get():
print "At least one action %s failed for files %sxxxx.%s." % (
action, file_pattern, file_extension)
successful = False
process_pool.close()
return successful
| StarcoderdataPython |
3217745 | <reponame>JanIIISobieski/bme590hrm
def test_import():
from heart_rate import ECG
first_set = ECG(filename='test_data1.csv')
assert first_set.time[0] == 0
assert first_set.time[-1] == 27.775
assert first_set.voltage[0] == -0.145
assert first_set.voltage[-1] == 0.72
second_set = ECG(filename='test_data27.csv')
assert second_set.time[0] == 0
assert second_set.time[-1] == 39.996
assert second_set.voltage[0] == -0.175
assert second_set.voltage[-1] == -1.7725
def test_attributes():
from heart_rate import ECG
first_set = ECG(filename='test_data8.csv')
assert first_set.voltage_extremes == (-3.105, 1.975)
assert first_set.duration == 27.775
second_set = ECG(filename='test_data18.csv')
assert second_set.voltage_extremes == (-0.19375, 0.7875)
assert second_set.duration == 13.887
def test_beat_finding():
import glob
import os
from heart_rate import ECG
csv_loc = os.path.join(os.path.dirname(__file__), '../test_data/*.csv')
num_beats_actual = [35, 19, 19, 32, 19, 19, 37, 74, 79, 29, 36, 44, 63, 35,
10, 34, 78, 19, 19, 33, 35, 37, 33, 32, 33, 28, 9, 4,
7, 7, 19, 19]
num_beats_found = []
for csv_file in glob.glob(csv_loc):
test = ECG(filename=os.path.basename(csv_file), export=True)
num_beats_found.append(len(test.beats))
tot_beats_actual = sum(num_beats_actual)
tot_beats_found = sum(num_beats_found)
assert abs((tot_beats_actual - tot_beats_found)/(tot_beats_actual)) < 0.01
# net one percent error in the finding of the heart beats was considered
# acceptable for the purposes of the assignment
def test_heart_rate():
from heart_rate import ECG
first_set = ECG(filename='test_data20.csv')
assert abs(int(first_set.mean_hr_bpm) - 81)/81 < 0.05
def test_export():
from heart_rate import ECG
import os
import json
import numpy as np
test = ECG()
json_loc = os.path.join(os.path.dirname(__file__),
'../JSON/test_data1.json')
with open(json_loc, 'r') as fp:
json_import = json.load(fp)
assert np.allclose(test.voltage, json_import['voltage'])
| StarcoderdataPython |
46367 | from django.contrib import admin
from django.urls import path
# from django.contrib.auth.decorators import login_required
# from rest_framework.urlpatterns import format_suffix_patterns
from . views import *
urlpatterns = [
# path('admin/', admin.site.urls),
path('', main_view, name='main_view'),
]
| StarcoderdataPython |
1665502 | from torch.distributed._sharding_spec import (
ChunkShardingSpec,
)
def generate_chunk_sharding_specs_for_test(sharding_dim):
return [
ChunkShardingSpec(
dim=sharding_dim,
placements=[
"rank:0/cuda:0",
"rank:1/cuda:1",
"rank:2/cuda:2",
"rank:3/cuda:3",
],
),
# Test different ordering. (Case 1)
ChunkShardingSpec(
dim=sharding_dim,
placements=[
"rank:2/cuda:2",
"rank:3/cuda:3",
"rank:0/cuda:0",
"rank:1/cuda:1",
],
),
# Test different ordering. (Case 2)
ChunkShardingSpec(
dim=sharding_dim,
placements=[
"rank:3/cuda:3",
"rank:0/cuda:0",
"rank:1/cuda:1",
"rank:2/cuda:2",
],
)
]
| StarcoderdataPython |
1773406 | # -*- coding: utf-8 -*-
from __future__ import division, absolute_import, print_function, unicode_literals
from unittest import TestCase
class TestBasic():
def nop(self):
pass
| StarcoderdataPython |
3239806 | <filename>apps/users/migrations/0001_initial.py<gh_stars>0
# Generated by Django 4.0 on 2022-01-28 11:17
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('auth', '0012_alter_user_first_name_max_length'),
]
operations = [
migrations.CreateModel(
name='Profile',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(blank=True, max_length=200, null=True)),
('username', models.CharField(blank=True, max_length=200, null=True)),
('bio', models.TextField(blank=True, null=True)),
('profile_image', models.ImageField(blank=True, default='profiles/user-default.png', null=True, upload_to='profiles/')),
('user', models.OneToOneField(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='auth.user')),
],
),
]
| StarcoderdataPython |
1666639 | #! /usr/bin/env python
from distutils.core import setup, Extension
import os
import sys
pwd = os.getcwd()
project_top_dir = os.getenv('PROJECT_TOP_DIR')
if project_top_dir is None:
project_top_dir = os.path.join(pwd, '/../')
include_dir = os.path.join( project_top_dir, 'include' )
src_python_dir = os.path.join( project_top_dir, 'python' )
lib_dir = os.getenv('LIB_DIR')
if lib_dir is None:
lib_dir = os.path.join( project_top_dir, 'tmp-target/build/lib' )
ccflags = os.getenv('CCFLAGS')
if ccflags is None:
ccflags = ''
module_sources = [ 'if3_kernel.cpp' ]
if3kernel = Extension(
'if3_kernel',
include_dirs = [ include_dir ],
library_dirs = [ lib_dir ],
libraries = [ 'if3kernel' ],
sources = [ os.path.join(src_python_dir,z) for z in module_sources ],
)
setup (name = 'if3_kernel',
version = '3.0',
description = 'Internet Filter Kernel by Turner and Sons Productions, Inc.',
ext_modules = [if3kernel])
| StarcoderdataPython |
1673467 | from setuptools import setup
import os
VERSION = "2.0"
def get_long_description():
with open(
os.path.join(os.path.dirname(os.path.abspath(__file__)), "README.md"),
encoding="utf8",
) as fp:
return fp.read()
setup(
name="datasette-render-markdown",
description="Datasette plugin for rendering Markdown",
long_description=get_long_description(),
long_description_content_type="text/markdown",
author="<NAME>",
url="https://github.com/simonw/datasette-render-markdown",
license="Apache License, Version 2.0",
version=VERSION,
packages=["datasette_render_markdown"],
entry_points={"datasette": ["render_markdown = datasette_render_markdown"]},
install_requires=["datasette", "markdown", "bleach"],
extras_require={"test": ["pytest", "pytest-asyncio"]},
tests_require=["datasette-render-markdown[test]"],
)
| StarcoderdataPython |
1747512 | <filename>python/tvm/topi/testing/reorg_python.py<gh_stars>1000+
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, line-too-long, unused-variable, too-many-locals
"""Reorg in python"""
import numpy as np
def reorg_python(a_np, stride):
"""Reorg operator
Parameters
----------
a_np : numpy.ndarray
4-D with shape [batch, in_channel, in_height, in_width]
stride : int
Stride size
Returns
-------
b_np : np.ndarray
4-D with shape [batch, out_channel, out_height, out_width]
"""
batch, in_channel, in_height, in_width = a_np.shape
a_np = np.reshape(a_np, batch * in_channel * in_height * in_width)
out_c = int(in_channel / (stride * stride))
out_channel = in_channel * stride * stride
out_height = int(in_height / stride)
out_width = int(in_width / stride)
b_np = np.zeros(batch * out_channel * out_height * out_width)
cnt = 0
for b in range(batch):
for k in range(in_channel):
for j in range(in_height):
for i in range(in_width):
c2 = k % out_c
offset = int(k / out_c)
w2 = int(i * stride + offset % stride)
h2 = int(j * stride + offset / stride)
out_index = int(
w2 + in_width * stride * (h2 + in_height * stride * (c2 + out_c * b))
)
b_np[cnt] = a_np[int(out_index)]
cnt = cnt + 1
b_np = np.reshape(b_np, (batch, out_channel, out_height, out_width))
return b_np
| StarcoderdataPython |
1616595 | import json
import typing
from flask import Response as FlaskResponse
from cauldron import environ
from cauldron.cli import server
Responses = typing.NamedTuple('TestResponses', [
('flask', FlaskResponse),
('response', 'environ.Response')
])
def create_test_app():
"""..."""
return server.server_run.APPLICATION.test_client()
def get(app, endpoint: str, **kwargs) -> Responses:
""" send get request to the test flask application."""
flask_response = app.get(endpoint, **kwargs)
response = deserialize_flask_response(flask_response)
return Responses(flask_response, response)
def post(app, endpoint: str, data=None, **kwargs) -> Responses:
""" send post request to the test flask application."""
args = json.dumps(data) if data else None
flask_response = app.post(
endpoint,
data=args,
content_type='application/json',
**kwargs
)
response = deserialize_flask_response(flask_response)
return Responses(flask_response, response)
def deserialize_flask_response(
flask_response: FlaskResponse
) -> 'environ.Response':
"""..."""
try:
data = json.loads(flask_response.data.decode('utf-8', 'ignore'))
response = environ.Response.deserialize(data)
except Exception as error:
response = environ.Response().fail(
code='DESERIALIZE_FLASK_RESPONSE',
message='Failed to deserialize flask response',
error=error
)
return response
| StarcoderdataPython |
3350075 | <reponame>EllaDing/nlp-with-deep-learning
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
CS224N 2019-20: Homework 5
"""
import torch
import numpy as np
import torch.nn as nn
import torch.nn.functional as F
class Highway(nn.Module):
# Remember to delete the above 'pass' after your implementation
### YOUR CODE HERE for part 1f
def __init__(self, input_size):
"""
Init the Highway layers.
@param input_size(int): The size of input Tensor.
"""
super(Highway, self).__init__()
self.input_size = input_size
self.W_proj = nn.Linear(
in_features=self.input_size,
out_features=self.input_size,
bias=True)
self.W_gate = nn.Linear(
in_features=self.input_size,
out_features=self.input_size,
bias=True)
def forward(self, x_conv):
"""
@param x_conv(Tensor): Input tensor, with size (batch_size, embedding_size)
"""
x_proj = F.relu(self.W_proj(x_conv))
x_gate = torch.sigmoid(self.W_gate(x_conv))
return x_gate * x_proj + (1-x_gate) * x_conv
### END YOUR CODE
def main():
""" Main func.
"""
# seed the random number generators
seed = 10
torch.manual_seed(seed)
np.random.seed(seed * 13 // 7)
highway = Highway(3)
print(highway.W_proj.weight)
print(highway.W_proj.bias)
print(highway.W_gate.weight)
print(highway.W_gate.bias)
result = highway(torch.tensor([
[0.1, 0.2, 0.3],
[0.4, 0.5, 0.6]]))
print(result)
if __name__ == '__main__':
main()
| StarcoderdataPython |
1786366 | import os
import sys
import zipfile
import django
import zookeeper
sys.path.append("/var/projects/museum/")
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "museum.settings")
django.setup()
from museum_site.models import File
def main():
files = File.objects.all().order_by("letter", "title")
z = zookeeper.Zookeeper()
for f in files:
#print(f.letter, f.filename)
try:
zf = zipfile.ZipFile(
"/var/projects/museum/zgames/" +
f.letter + "/" + f.filename
)
except (FileNotFoundError, zipfile.BadZipFile):
print("\tSkipping due to bad zip")
continue
file_list = zf.namelist()
for file in file_list:
name, ext = os.path.splitext(file)
ext = ext.upper()
if ext == ".COM": # Com File means a font to rip
zf.extract(file, path="/var/projects/museum/tools/extract")
# Rip the font
fname = os.path.join("/var/projects/museum/tools/extract", file)
try:
id = ("0000"+str(f.id))[-4:]
z.export_font(fname, "fonts/"+id+"-"+name+".png", 1)
print("Ripped", file, "as", f.id)
except:
print("Could not rip", file)
return True
if __name__ == "__main__":
main()
| StarcoderdataPython |
141350 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
@author: luisgasco
Script to generate a terminology tsv file with concepts of SnomedCT given a list of seed codes.
You will get the concept
to normaliza clinical entities to controlled vocabularies
"""
import sys
import pandas as pd
import networkx as nx
import numpy as np
from optparse import OptionParser
def get_sucessors(g, code):
"""Get sucessors of a given code in a the given ontology g
Args:
g (Networkx DiGraph): Working ontology in Digraph format
code (str): Code in str format
Returns:
lst: List of strings of the successors of "code"
"""
return [key for key in g.successors(code)]
def get_rec(g,lista,recursive_list):
"""Recursive function to get sucessors of a givel list of codes in a given ontology "g".
Args:
g (Networkx DiGraph): Working ontology in Digraph format
lista (lst): List os strings representing the codes to get sucessors.
recursive_list (lst): List to save sucessors in the recursive function
Returns:
lst: List of strings of the successors of "code"
"""
for a in lista:
sucessors=get_sucessors(g,a)
recursive_list.extend(sucessors)
get_rec(g, sucessors,recursive_list)
return recursive_list
def load_ontology(file_name_rel, root_concept_code="138875005", relation_types = "116680003"):
"""Function to load SnomecCT relationships from RF2 format to netowrkx model.
Args:
file_name_rel (str): Path to the SnomedCT Relationship file in RF2 format
root_concept_code (str, optional): snomed code of the code from which you want to generate
the ontology file (For example if we want the branch
"Pharmaceutical / biologic product" we would use the code
"373873005", if we want the whole snomed ontology we would
use the code "138875005").Defaults to "138875005".
relation_types (str, optional): Type of relationship to consider when building the ontology.
Use string "116680003" if you only want to consider "Is a"
relationships, use "all" if you want to consider all types
of relationships (including concept model attributes).Defaults to "116680003".
Returns:
Networkx DiGraph: SnomedCT model in a NetworkxDigraph format.
This code is based on the one written by @emreg00 (https://github.com/emreg00/toolbox/blob/master/parse_snomedct.py)
"""
ontology = nx.DiGraph()
f = open(file_name_rel)
header = f.readline().strip("\n")
col_to_idx = dict((val.lower(), i) for i, val in enumerate(header.split("\t")))
for line in f:
words = line.strip("\n").split("\t")
if relation_types == "116680003": #"Is a" relationship code
if words[col_to_idx["typeid"]] in relation_types:
source_id = words[col_to_idx["sourceid"]]
target_id = words[col_to_idx["destinationid"]]
ontology.add_edge(target_id, source_id)
else: # All
source_id = words[col_to_idx["sourceid"]]
target_id = words[col_to_idx["destinationid"]]
ontology.add_edge(target_id, source_id)
ontology = nx.dfs_tree(ontology, root_concept_code)
return ontology
# Function to parse comma-´separated values
def get_comma_separated_args(option, opt, value, parser):
setattr(parser.values, option.dest, value.split(','))
def main(argv=None):
parser = OptionParser()
parser.add_option("-r", "--rel_file", dest="rel_file",
help="Path to the SnomedCT Relationship file in RF2 format")
parser.add_option("-c", "--concept_file", dest = "concept_file", help = "")
parser.add_option("-o", "--output_file", dest="output_file", help="")
parser.add_option("-l", "--code_list", dest = "code_list", type=str, action="callback", callback=get_comma_separated_args, help="Codes to get ")
parser.add_option("--root_code", dest = "root_code", default = "138875005", type=str,help="")
parser.add_option("--rel_types", dest = "rel_types", default = "all", type=str,help="")
(options, args) = parser.parse_args(argv)
# Variables to be used
relationship_file = options.rel_file
concepts_file = options.concept_file
output_terminology = options.output_file
code_list = options.code_list
# Get ontology data:
print("Loading SnomedCT into a DiGraph Networkx object...")
g = load_ontology(relationship_file, root_concept_code = options.root_code, relation_types=options.rel_types)
print("Object loaded")
# Get sucessors from original codes
print("You are going to get the SnomedCT branches of the codes {}".format(code_list))
recursive_list = list()
lista_end = get_rec(g,code_list,recursive_list)
lista_end_int = [int(key) for key in lista_end]
# Load concepts
concepts_df = pd.read_csv(concepts_file, sep="\t")
# Filter concepts with sucessors found:
concepts_df_filt = concepts_df[concepts_df.conceptId.isin(lista_end_int)]
# Save data into output file as a term file.
concepts_df_filt[["conceptId","term"]].to_csv(output_terminology, index = False, sep="\t", header = False)
if __name__ == "__main__":
sys.exit(main()) | StarcoderdataPython |
1784498 | <reponame>Zhenye-Na/LxxxCode<gh_stars>10-100
class ListNode:
def __init__(self, key, val, next=None, prev=None):
self.key = key
self.val = val
self.next = next
self.prev = prev
class LRUCache:
def __init__(self, capacity: int):
self.cache_history = {}
self.head = ListNode(-1, -1)
self.tail = ListNode(-1, -1)
self.head.next = self.tail
self.tail.prev = self.head
self.capacity = capacity
def get(self, key: int) -> int:
if key not in self.cache_history:
return -1
node = self.cache_history[key]
self._remove(node)
self._push_back(node)
return node.val
def put(self, key: int, value: int) -> None:
if self.get(key) != -1:
self.cache_history[key].val = value
return
if len(self.cache_history) >= self.capacity:
self._pop_first()
node = ListNode(key, value)
self._push_back(node)
self.cache_history[key] = node
def _pop_first(self):
del self.cache_history[self.head.next.key]
self._remove(self.head.next)
def _push_back(self, node):
node.next = self.tail
self.tail.prev.next = node
node.prev = self.tail.prev
self.tail.prev = node
def _remove(self, node):
node.prev.next = node.next
node.next.prev = node.prev
| StarcoderdataPython |
3278634 | # -*- coding: UTF-8 -*-
from unittest import TestCase
class TestVtk(TestCase):
def test_ust_from_blk(self):
from nose.plugins.skip import SkipTest
try:
import vtk
except ImportError:
raise SkipTest
from ..testing import get_blk_from_sample_neu
from ..visual_vtk import make_ust_from_blk
ust = make_ust_from_blk(get_blk_from_sample_neu())
def test_set_celldata(self):
from nose.plugins.skip import SkipTest
try:
import vtk
except ImportError:
raise SkipTest
from numpy import empty
from ..testing import get_blk_from_sample_neu
from ..visual_vtk import make_ust_from_blk, set_array
blk = get_blk_from_sample_neu()
ust = make_ust_from_blk(blk)
arr = empty(blk.ncell, dtype='float32')
set_array(arr, 'test', 'float32', ust)
class TestVtkOperation(TestCase):
def test_import(self):
from nose.plugins.skip import SkipTest
try:
import vtk
except ImportError:
raise SkipTest
# it is the test.
from ..visual_vtk import VtkOperation, Vop
| StarcoderdataPython |
1651288 | from uio import FileIO
from component import Component, components
class FileSystem:
def __init__(self, address):
self.fs = Component(address, components()[address])
self.address = address
self.readonly = self.fs.isReadOnly()
self.cwd = "/"
# noinspection PyUnusedLocal
def mount(self, readonly, mkfs):
self.readonly = self.fs.isReadOnly() or readonly
def umount(self):
pass
def ilistdir(self, dir):
for name in self.fs.list(dir):
if self.fs.isDirectory(dir + "/" + name):
yield (name, 0x4000, 0, -1)
else:
size = self.fs.size(name)
yield (name, 0x8000, 0, size)
def chdir(self, dir):
if not self.fs.isDirectory(dir):
raise OSError(1)
self.cwd = dir
def getcwd(self):
return self.cwd
def mkdir(self, path):
if self.readonly:
raise OSError(1)
result = self.fs.makeDirectory(path)
if not result:
exists = self.fs.exists(path)
if self.fs.isDirectory(path):
raise OSError(1)
elif exists: # file
raise OSError(1)
raise OSError(1)
def remove(self, path):
if self.readonly:
raise OSError(1)
self.fs.remove(path)
def rename(self, old_path, new_path):
if self.readonly:
raise OSError(1)
result = self.fs.rename(old_path, new_path)
if not result:
raise OSError(1)
def rmdir(self, path):
if self.readonly:
raise OSError(1)
if not self.fs.isDirectory(path):
if self.fs.exists(path):
# is file
raise OSError(1)
raise OSError(1)
result = self.fs.remove(path)
if not result:
raise OSError(1)
def stat(self, path):
if not self.fs.exists(path):
raise OSError(1)
is_dir = self.fs.isDirectory(path)
size = self.fs.size(path) if not is_dir else 0
mtime = self.fs.lastModified(path)
return (
0x4000 if is_dir else 0x8000, # st_mode
0, # st_ino
0, # dev
0, # nlink
0, # uid: root
0, # gid: root
size, # size
mtime, # atime
mtime, # mtime
mtime, # ctime
)
# noinspection PyUnusedLocal
def statvfs(self, path):
return (
0, # f_bsize
0, # f_frsize
0, # f_blocks
0, # f_bfree
0, # f_bavail
0, # f_files
0, # f_ffree
0, # f_favail
0, # f_flag
256, # f_namemax
)
def open(self, file, mode):
# TODO: nomalize mode
return FileIO(self.fs.address, file, mode)
def __repr__(self):
return "<FileSystem: {!r}>".format(self.address)
| StarcoderdataPython |
135041 | import torch
from torch import nn
rnn_units = 128
class Model(nn.Module):
def __init__(self, column_units):
super(Model, self).__init__()
self.cnn = nn.Sequential(
nn.Conv2d(3, 16, kernel_size=3, stride=1, padding=1),
nn.BatchNorm2d(16),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=2, stride=2)
)
self.rnn = nn.LSTM(16 * (32 // 2) * (32 // 2), rnn_units, batch_first=True)
self.classifier = nn.Sequential(
nn.Dropout(0.5),
nn.Linear(rnn_units, column_units),
)
def forward(self, x):
x = x[0]
b, c, t, h, w = 0, 1, 2, 3, 4
x = x.permute(b, t, c, h, w).clone()
b, t, c, h, w = x.shape
x = x.view(-1, c, h, w)
x = self.cnn(x)
x = x.view(b, t, -1)
x, _ = self.rnn(x)
self.h = x.detach()
b, t, f = x.shape
x = torch.stack([self.classifier(x[:, i, :]) for i in range(t)])
return x
if __name__ == '__main__':
model = CLSTM()
inputs = torch.randn(8, 10, 3, 32, 32)
outputs = model(inputs)
print(outputs.shape)
| StarcoderdataPython |
3261943 | # Guessing game
# What does it do?
# We'll write a program that plays a guessing game with a user
# Would be useful to have the program print out the rules at the beginning
# How does the user communicate to the program that the program didn't guess the right number?
# We can have the user specify that the program's guess it too low, too high, or equal
# After the program makes a guess, it will wait on user input from the command line
# Player types in "low", "high", or "equal"
# Program rads in that input and acts on it
# If the player says the number is too high
# How does the program generate a guess?
# What are the rules?
# The user will think of a number, the program has to guess it
# Is there a range of possible numbers? Or can we allow any number?
# Let's say the range is between 1 and 100
print("Think of a number between 1 and 100, and I'll guess it.")
print("You have to tell me if my guess is less than, greater than, or equal to your number")
# How does the game actually run?
# Loop until the program exits
# When does the program exits?
# Either when the user terminates it, or when the program has successfully guessed the player's number
# variables to store the floor and ceiling of our range
floor = 1
ceiling = 100
# boolean value to indicate whether the program guessed the number or not
got_it = False
# In the loop
while not got_it:
# Game generates a guess between 1 and 100
# should it guess a random number between this range?
# should we have it guess a bit smarter?
# guess the halfway point between the range?
# we'll have it guess halfway between the range
# using a midpoint formula
# In Python, `//` that's floor division (always round down)
guess = (floor + ceiling) // 2
# print out the guess to the user
print(f"I'm guessing {guess}.")
# wait for the user's input to tell the program if its guess was high, low, or equal
# read in user's input
result = input("Is my guess less, greater, or equal to your number?")
# format the result for ease
# lowercase the result
result = result.lower()
# let's grab the first letter of the input
result = result[0]
# act according to user's input
# if input == 'high'
if result == 'g':
# we need to guess lower for the next guess
# narrow the range accordingly
# set the "ceiling" of the range equal to guess - 1
ceiling = guess - 1
# if input == 'low'
elif result == 'l':
# we need to guess higher for next guess
# set the "floor" of the range to equal guess + 1
floor = guess + 1
# if input == 'equal'
elif result == 'e':
# print "I guessed your number!"
print("I guessed your number!")
# quit the game loop
got_it = True
# go on to the next loop iteration
# our range has been updated according to the user's input | StarcoderdataPython |
47015 | <gh_stars>1-10
import os
import re
import json
import argparse
import random
import numpy as np
import torch
import experiments.experiment_langid as experiment_lid
import experiments.experiment_ner as experiment_ner
import experiments.experiment_pos as experiment_pos
from types import SimpleNamespace as Namespace
PROJ_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
class Arguments(object):
def __init__(self, config_path=None):
if config_path is None:
parser = argparse.ArgumentParser()
parser.add_argument('--config', help="provide a relative path to a JSON config file from the configs directory")
parser.add_argument('--mode', choices=['train', 'eval'], default='train', help="Specify whether to train or evaluate the model")
parser.add_argument('--gpu', type=int, default=-1, help="The GPU label number. By default, the code runs on CPU")
parser.add_argument('--seed', type=int, default=42)
args = parser.parse_args()
# Fields expected from the command line
self.config = os.path.join(PROJ_DIR, args.config)
self.mode = args.mode
self.gpu = args.gpu
self.seed = args.seed
else:
self.gpu = -1
self.mode = 'eval'
self.seed = 42
self.config = os.path.join(PROJ_DIR, config_path)
assert os.path.exists(self.config) and self.config.endswith('.json'), f'Bad config path: {self.config}'
# Read the parameters from the JSON file and skip comments
with open(self.config, 'r') as f:
params = ''.join([re.sub(r"//.*$", "", line, flags=re.M) for line in f])
arguments = json.loads(params, object_hook=lambda d: Namespace(**d))
# Must-have fields expected from the JSON config file
self.experiment = arguments.experiment
self.description = arguments.description
self.task = arguments.task
self.dataset = arguments.dataset
self.model = arguments.model
self.training = arguments.training
self.evaluation = arguments.evaluation
# Checking that the JSON contains at least the fixed fields
assert all([hasattr(self.dataset, name) for name in {'train', 'dev', 'test'}])
assert all([hasattr(self.model, name) for name in {'name'}])
assert all([hasattr(self.training, name) for name in {'epochs', 'batch_size', 'optimizer', 'lr_scheduler', 'l2', 'clip_grad'}])
assert all([hasattr(self.training.optimizer, name) for name in {'name', 'lr'}])
assert all([hasattr(self.training.lr_scheduler, name) for name in {'name'}])
assert all([hasattr(self.evaluation, name) for name in {'batch_size'}])
self._format_datapaths()
self._add_extra_fields()
self._add_transfer_learning_fields(arguments)
def _add_transfer_learning_fields(self, args):
if hasattr(args, "pretrained_config"):
self.pretrained_config = args.pretrained_config
if hasattr(args, "transfer_mode"):
self.transfer_mode = args.transfer_mode
if hasattr(args, "restore_model"):
self.restore_model = args.restore_model
def _format_datapaths(self):
self.dataset.train = os.path.join(PROJ_DIR, 'data', self.dataset.train)
self.dataset.dev = os.path.join(PROJ_DIR, 'data', self.dataset.dev)
self.dataset.test = os.path.join(PROJ_DIR, 'data', self.dataset.test)
def _add_extra_fields(self):
self.checkpoints = os.path.join(PROJ_DIR, 'checkpoints', self.experiment)
self.figures = os.path.join(PROJ_DIR, 'reports/figures', self.experiment)
self.history = os.path.join(PROJ_DIR, 'reports/history', self.experiment)
self.predictions = os.path.join(PROJ_DIR, 'reports/predictions', self.experiment)
self.attentions = os.path.join(PROJ_DIR, 'reports/attentions', self.experiment)
def main():
args = Arguments()
if torch.cuda.is_available() and args.gpu >= 0:
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if torch.cuda.is_available():
torch.cuda.manual_seed_all(args.seed)
torch.backends.cudnn.deterministic = True
args.device = torch.device(f"cuda:{args.gpu}")
torch.cuda.set_device(args.device)
else:
args.device = torch.device("cpu")
print("[LOG] {}".format('=' * 40))
print("[LOG] {: >15}: '{}'".format("Experiment ID", args.experiment))
print("[LOG] {: >15}: '{}'".format("Description", args.description))
print("[LOG] {: >15}: '{}'".format("Task", args.task.upper()))
for key, val in vars(args.dataset).items():
print("[LOG] {: >15}: {}".format(key, val))
print("[LOG] {: >15}: '{}'".format("Modeling", vars(args.model)))
print("[LOG] {: >15}: '{}'".format("Training", vars(args.training)))
print("[LOG] {: >15}: '{}'".format("Evaluation", vars(args.evaluation)))
print("[LOG] {: >15}: '{}'".format("Device", args.device))
print("[LOG] {}".format('=' * 40))
if args.task.startswith('lid'): experiment_lid.main(args)
elif args.task.startswith('ner'): experiment_ner.main(args)
elif args.task.startswith('pos'): experiment_pos.main(args)
else: raise Exception('Unexpected task: {}'.format(args.task))
if __name__ == '__main__':
main()
| StarcoderdataPython |
1696994 | <gh_stars>1-10
import atexit
import unittest
from pact import Consumer, Provider
from consumer import user
pact = Consumer('Consumer').has_pact_with(Provider('Provider'))
pact.start_service()
atexit.register(pact.stop_service)
class ContractTest(unittest.TestCase):
def test_first(self):
expected = {'hello': 'verification_glasses'}
(pact
.given('')
.upon_receiving('a request for users with input verification_glasses')
.with_request('get', '/users/verification_glasses')
.will_respond_with(200, body=expected))
gateway = 'http://localhost:1234'
with pact:
result = user(gateway, 'verification_glasses')
self.assertEqual(result, expected)
| StarcoderdataPython |
116030 | <reponame>smpenna3/traffic<filename>main.py<gh_stars>0
from flask import Flask, render_template, request, Response
import logging
import traceback
import datetime as dt
import json
from traffic_lights import TrafficLights
# Setup logging
logger = logging.getLogger('mainLog')
logger.setLevel(logging.DEBUG)
fh = logging.FileHandler('log.log')
fh.setLevel(logging.DEBUG)
sh = logging.StreamHandler()
sh.setLevel(logging.INFO)
formatter = logging.Formatter('%(asctime)s - %(lineno)s - %(levelname)s - %(message)s')
fh.setFormatter(formatter)
sh.setFormatter(formatter)
logger.addHandler(fh)
logger.addHandler(sh)
# Get flask logger
flaskLogger = logging.getLogger('werkzeug')
flaskLogger.setLevel(logging.WARNING)
flaskLogger.addHandler(fh)
flaskLogger.addHandler(sh)
# Set up the flask app
app = Flask(__name__)
# Set up instance of class
lights = TrafficLights()
############# SETUP FLASK ROUTES ######################
@app.route('/red/<state>')
def red(state):
lights.set_red(state)
return Response(status=200)
@app.route('/red')
def red_get():
return str(lights.get_red())
@app.route('/green/<state>')
def green(state):
lights.set_green(state)
return Response(status=200)
@app.route('/green')
def green_get():
return str(lights.get_green())
@app.route('/yellow/<state>')
def yellow(state):
lights.set_yellow(state)
return Response(status=200)
@app.route('/yellow')
def yellow_get():
return str(lights.get_yellow())
@app.route('/', methods=['POST', 'GET'])
def home():
if request.method == 'POST':
if 'on' in request.form:
lights.all_on()
if 'off' in request.form:
lights.off()
if 'red' in request.form:
lights.set_red()
if 'yellow' in request.form:
lights.set_yellow()
if 'green' in request.form:
lights.set_green()
redStatus = 'red' if lights.get_red() else 'black'
yellowStatus = 'yellow' if lights.get_yellow() else 'black'
greenStatus = 'green' if lights.get_green() else 'black'
return render_template('index.html', red_status=redStatus, yellow_status=yellowStatus, green_status=greenStatus)
# Define simple method to turn on the lights
@app.route('/on', methods=['POST'])
def on():
if request.method == 'POST':
lights.all_on()
return Response(status=200)
else:
return Response(status=600)
# Define a simple method to turn off the lights
@app.route('/off', methods=['POST'])
def off():
if request.method == 'POST':
lights.off()
return Response(status=200)
else:
return Response(status=600)
if __name__ == '__main__':
app.run(threaded=True, debug=True, host='0.0.0.0') | StarcoderdataPython |
1745762 | import torch
from pprint import pprint
import config
from utils.manager import PathManager
from model import *
def buildModel(path_manager: PathManager,
task_config=None,
model_params: config.ParamsConfig = None,
loss_func=None,
data_source=None,):
if model_params is None:
model_params = config.params
if task_config is None:
task_config = config.task
try:
model = ModelSwitch[model_params.ModelName](path_manager, model_params, task_config, loss_func, data_source)\
.cuda()
except KeyError:
raise ValueError("[ModelBuilder] No matched model implementation for '%s'"
% model_params.ModelName)
# 组装预训练的参数
if len(task_config.PreloadStateDictVersions) > 0:
remained_model_keys = [n for n,_ in model.named_parameters()]
unexpected_keys = []
for version in task_config.PreloadStateDictVersions:
pm = PathManager(dataset=task_config.Dataset,
version=version,
model_name=model_params.ModelName)
state_dict = torch.load(pm.model())
load_result = model.load_state_dict(state_dict, strict=False)
for k in state_dict.keys():
if k not in load_result.unexpected_keys and k in remained_model_keys:
remained_model_keys.remove(k)
unexpected_keys.extend(load_result.unexpected_keys)
if len(remained_model_keys) > 0:
print(f'[buildModel] Preloading, unloaded keys:')
pprint(remained_model_keys)
if len(unexpected_keys) > 0:
print(f'[buildModel] Preloading, unexpected keys:')
pprint(unexpected_keys)
return model
def _ProtoNet(path_manager: PathManager,
model_params: config.ParamsConfig,
task_params: config.TaskConfig,
loss_func,
data_source):
return ProtoNet(model_params, path_manager, loss_func, data_source)
def _NnNet(path_manager: PathManager,
model_params: config.ParamsConfig,
task_params: config.TaskConfig,
loss_func,
data_source):
return NnNet(model_params, path_manager, loss_func, data_source)
def _HAPNet(path_manager: PathManager,
model_params: config.ParamsConfig,
task_params: config.TaskConfig,
loss_func,
data_source):
return HAPNet(model_params, path_manager, loss_func, data_source, task_params.Episode.k)
def _SIMPLE(path_manager: PathManager,
model_params: config.ParamsConfig,
task_params: config.TaskConfig,
loss_func,
data_source):
return SIMPLE(model_params, path_manager, loss_func, data_source)
def _IMP(path_manager: PathManager,
model_params: config.ParamsConfig,
task_params: config.TaskConfig,
loss_func,
data_source):
return IMP(model_params, path_manager, loss_func, data_source)
def _PostProtoNet(path_manager: PathManager,
model_params: config.ParamsConfig,
task_params: config.TaskConfig,
loss_func,
data_source):
return PostProtoNet(model_params, path_manager, loss_func, data_source)
def _MLossProtoNet(path_manager: PathManager,
model_params: config.ParamsConfig,
task_params: config.TaskConfig,
loss_func,
data_source):
return MLossProtoNet(model_params, path_manager, loss_func, data_source)
def _MLossSIMPLE(path_manager: PathManager,
model_params: config.ParamsConfig,
task_params: config.TaskConfig,
loss_func,
data_source):
return MLossSIMPLE(model_params, path_manager, loss_func, data_source)
def _MLossIMP(path_manager: PathManager,
model_params: config.ParamsConfig,
task_params: config.TaskConfig,
loss_func,
data_source):
return MLossIMP(model_params, path_manager, loss_func, data_source)
def _FEAT(path_manager: PathManager,
model_params: config.ParamsConfig,
task_params: config.TaskConfig,
loss_func,
data_source):
return FEAT(model_params, path_manager, loss_func, data_source)
def _ConvProtoNet(path_manager: PathManager,
model_params: config.ParamsConfig,
task_params: config.TaskConfig,
loss_func,
data_source):
return ConvProtoNet(model_params, path_manager, loss_func, data_source, task_params.Episode.k)
def _InductionNet(path_manager: PathManager,
model_params: config.ParamsConfig,
task_params: config.TaskConfig,
loss_func,
data_source):
return InductionNet(model_params, path_manager, loss_func, data_source)
ModelSwitch = {
'ProtoNet': _ProtoNet,
'NnNet': _NnNet,
'HAPNet': _HAPNet,
'SIMPLE': _SIMPLE,
'IMP': _IMP,
'FEAT': _FEAT,
'ConvProtoNet': _ConvProtoNet,
'InductionNet': _InductionNet,
'PostProtoNet': _PostProtoNet,
'MLossProtoNet': _MLossProtoNet,
"MLossSIMPLE": _MLossSIMPLE,
'MLossIMP': _MLossIMP,
}
| StarcoderdataPython |
3215461 | <reponame>Learn-code-strategies/DEXBot<gh_stars>0
"""
A module to provide an interactive text-based tool for dexbot configuration
The result is dexbot can be run without having to hand-edit config files.
If systemd is detected it will offer to install a user service unit (under ~/.local/share/systemd)
This requires a per-user systemd process to be running
Requires the 'whiptail' tool for text-based configuration (so UNIX only)
if not available, falls back to a line-based configurator ("NoWhiptail")
Note there is some common cross-UI configuration stuff: look in base.py
It's expected GUI/web interfaces will be re-implementing code in this file, but they should
understand the common code so worker strategy writers can define their configuration once
for each strategy class.
"""
import importlib
import pathlib
import os
import os.path
import sys
import re
import subprocess
from dexbot.whiptail import get_whiptail
from dexbot.strategies.base import StrategyBase
import dexbot.helper
STRATEGIES = [
{'tag': 'relative',
'class': 'dexbot.strategies.relative_orders',
'name': 'Relative Orders'},
{'tag': 'stagger',
'class': 'dexbot.strategies.staggered_orders',
'name': 'Staggered Orders'}]
tags_so_far = {'stagger', 'relative'}
for desc, module in dexbot.helper.find_external_strategies():
tag = desc.split()[0].lower()
# make sure tag is unique
i = 1
while tag in tags_so_far:
tag = tag+str(i)
i += 1
tags_so_far.add(tag)
STRATEGIES.append({'tag': tag, 'class': module, 'name': desc})
SYSTEMD_SERVICE_NAME = os.path.expanduser(
"~/.local/share/systemd/user/dexbot.service")
SYSTEMD_SERVICE_FILE = """
[Unit]
Description=Dexbot
[Service]
Type=notify
WorkingDirectory={homedir}
ExecStart={exe} --systemd run
TimeoutSec=20m
Environment=PYTHONUNBUFFERED=true
Environment=UNLOCK={passwd}
[Install]
WantedBy=default.target
"""
def select_choice(current, choices):
""" For the radiolist, get us a list with the current value selected """
return [(tag, text, (current == tag and "ON") or "OFF")
for tag, text in choices]
def process_config_element(elem, whiptail, config):
""" Process an item of configuration metadata display a widget as appropriate
d: the Dialog object
config: the config dictionary for this worker
"""
if elem.description:
title = '{} - {}'.format(elem.title, elem.description)
else:
title = elem.title
if elem.type == "string":
txt = whiptail.prompt(title, config.get(elem.key, elem.default))
if elem.extra:
while not re.match(elem.extra, txt):
whiptail.alert("The value is not valid")
txt = whiptail.prompt(
title, config.get(
elem.key, elem.default))
config[elem.key] = txt
if elem.type == "bool":
value = config.get(elem.key, elem.default)
value = 'yes' if value else 'no'
config[elem.key] = whiptail.confirm(title, value)
if elem.type in ("float", "int"):
while True:
if elem.type == 'int':
template = '{}'
else:
template = '{:.8f}'
txt = whiptail.prompt(title, template.format(config.get(elem.key, elem.default)))
try:
if elem.type == "int":
val = int(txt)
else:
val = float(txt)
if val < elem.extra[0]:
whiptail.alert("The value is too low")
elif elem.extra[1] and val > elem.extra[1]:
whiptail.alert("the value is too high")
else:
break
except ValueError:
whiptail.alert("Not a valid value")
config[elem.key] = val
if elem.type == "choice":
config[elem.key] = whiptail.radiolist(title, select_choice(
config.get(elem.key, elem.default), elem.extra))
def dexbot_service_running():
""" Return True if dexbot service is running
"""
cmd = 'systemctl --user status dexbot'
output = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE)
for line in output.stdout.readlines():
if b'Active:' in line and b'(running)' in line:
return True
return False
def setup_systemd(whiptail, config):
if not os.path.exists("/etc/systemd"):
return # No working systemd
if not whiptail.confirm(
"Do you want to run dexbot as a background (daemon) process?"):
config['systemd_status'] = 'disabled'
return
redo_setup = False
if os.path.exists(SYSTEMD_SERVICE_NAME):
redo_setup = whiptail.confirm('Redo systemd setup?', 'no')
if not os.path.exists(SYSTEMD_SERVICE_NAME) or redo_setup:
path = '~/.local/share/systemd/user'
path = os.path.expanduser(path)
pathlib.Path(path).mkdir(parents=True, exist_ok=True)
password = <PASSWORD>(
"The wallet password\n"
"NOTE: this will be saved on disc so the worker can run unattended. "
"This means anyone with access to this computer's files can spend all your money",
password=True)
# Because we hold password be restrictive
fd = os.open(SYSTEMD_SERVICE_NAME, os.O_WRONLY | os.O_CREAT, 0o600)
with open(fd, "w") as fp:
fp.write(
SYSTEMD_SERVICE_FILE.format(
exe=sys.argv[0],
passwd=password,
homedir=os.path.expanduser("~")))
# The dexbot service file was edited, reload the daemon configs
os.system('systemctl --user daemon-reload')
# Signal cli.py to set the unit up after writing config file
config['systemd_status'] = 'enabled'
def get_strategy_tag(strategy_class):
for strategy in STRATEGIES:
if strategy_class == strategy['class']:
return strategy['tag']
return None
def configure_worker(whiptail, worker_config):
# By default always editing
editing = True
if not worker_config:
editing = False
default_strategy = worker_config.get('module', 'dexbot.strategies.relative_orders')
strategy_list = []
for strategy in STRATEGIES:
if default_strategy == strategy['class']:
default_strategy = strategy['tag']
# Add strategy tag and name pairs to a list
strategy_list.append([strategy['tag'], strategy['name']])
# Strategy selection
worker_config['module'] = whiptail.radiolist(
"Choose a worker strategy",
select_choice(default_strategy, strategy_list)
)
for strategy in STRATEGIES:
if strategy['tag'] == worker_config['module']:
worker_config['module'] = strategy['class']
# Import the strategy class but we don't __init__ it here
strategy_class = getattr(
importlib.import_module(worker_config["module"]),
'Strategy'
)
# Check if strategy has changed and editing existing worker
if editing and default_strategy != get_strategy_tag(worker_config['module']):
new_worker_config = {}
# If strategy has changed, create new config where base elements stay the same
for config_item in StrategyBase.configure():
key = config_item[0]
new_worker_config[key] = worker_config[key]
# Add module separately to the config
new_worker_config['module'] = worker_config['module']
worker_config = new_worker_config
# Use class metadata for per-worker configuration
config_elems = strategy_class.configure()
if config_elems:
# Strategy options
for elem in config_elems:
process_config_element(elem, whiptail, worker_config)
else:
whiptail.alert(
"This worker type does not have configuration information. "
"You will have to check the worker code and add configuration values to config.yml if required")
return worker_config
def configure_dexbot(config, ctx):
whiptail = get_whiptail('DEXBot configure')
workers = config.get('workers', {})
if not workers:
while True:
txt = whiptail.prompt("Your name for the worker")
config['workers'] = {txt: configure_worker(whiptail, {})}
if not whiptail.confirm("Set up another worker?\n(DEXBot can run multiple workers in one instance)"):
break
setup_systemd(whiptail, config)
else:
bitshares_instance = ctx.bitshares
action = whiptail.menu(
"You have an existing configuration.\nSelect an action:",
[('NEW', 'Create a new worker'),
('DEL', 'Delete a worker'),
('EDIT', 'Edit a worker'),
('CONF', 'Redo general config')])
if action == 'EDIT':
worker_name = whiptail.menu("Select worker to edit", [(index, index) for index in workers])
config['workers'][worker_name] = configure_worker(whiptail, config['workers'][worker_name])
strategy = StrategyBase(worker_name, bitshares_instance=bitshares_instance, config=config)
strategy.clear_all_worker_data()
elif action == 'DEL':
worker_name = whiptail.menu("Select worker to delete", [(index, index) for index in workers])
del config['workers'][worker_name]
strategy = StrategyBase(worker_name, bitshares_instance=bitshares_instance, config=config)
strategy.clear_all_worker_data()
elif action == 'NEW':
txt = whiptail.prompt("Your name for the new worker")
config['workers'][txt] = configure_worker(whiptail, {})
elif action == 'CONF':
choice = whiptail.node_radiolist(
msg="Choose node",
items=select_choice(config['node'][0], [(index, index) for index in config['node']])
)
# Move selected node as first item in the config file's node list
config['node'].remove(choice)
config['node'].insert(0, choice)
setup_systemd(whiptail, config)
whiptail.clear()
return config
| StarcoderdataPython |
3224222 | <filename>api/tests/integration/tests/basic/radicals.py
import os
import sys
sys.path.append(
os.path.normpath(
os.path.join(os.path.abspath(__file__), "..", "..", "..", "common")
)
)
from env_indigo import *
indigo = Indigo()
indigo.setOption("molfile-saving-skip-date", "1")
print("****** Basic ********")
m = indigo.loadMolecule("C1C=CC=C1")
print(m.smiles())
a = m.getAtom(0)
print(a.radicalElectrons())
print(a.radical())
rad = [Indigo.SINGLET, Indigo.DOUBLET, Indigo.TRIPLET]
for r in rad:
a.setRadical(r)
print(a.radicalElectrons())
print(a.radical() == r)
print(m.smiles())
print("****** SDF ********")
radicalMap = {
0: "None",
Indigo.SINGLET: "Singlet",
Indigo.DOUBLET: "Doublet",
Indigo.TRIPLET: "Triplet",
}
def testRadical(m):
print(m.smiles())
for a in m.iterateAtoms():
print(
" %d: %s with %d electrons"
% (a.index(), radicalMap[a.radical()], a.radicalElectrons())
)
for m in indigo.iterateSDFile(joinPathPy("molecules/radicals.sdf", __file__)):
testRadical(m)
m2 = indigo.loadMolecule(m.molfile())
testRadical(m2)
m3 = indigo.loadMolecule(m.smiles())
testRadical(m3)
| StarcoderdataPython |
127422 | <reponame>F1ashhimself/ui-map-parser<filename>tests/test_parser.py<gh_stars>0
# -*- coding: utf-8 -*-
__author__ = '<EMAIL>'
import pytest
from hamcrest import assert_that, equal_to, raises
from ui_map_parser import UIMapException
def test_simple_element(ui_map_parser):
selector_type, selector = ui_map_parser.parse_element('SimpleELEMENT')
assert_that(selector_type, equal_to('xpath'))
assert_that(selector, equal_to('//div'))
def test_element_with_different_cased_properties(ui_map_parser):
selector_type, selector = ui_map_parser.parse_element('ElementWithDifferentCasedProperties')
assert_that(selector_type, equal_to('xpath'))
assert_that(selector, equal_to('//div'))
def test_element_without_type(ui_map_parser):
selector_type, selector = ui_map_parser.parse_element('ElementWithoutType')
assert_that(selector_type, equal_to('xpath'))
assert_that(selector, equal_to('//div'))
def test_element_with_parent_in_common(ui_map_parser):
selector_type, selector = ui_map_parser.parse_element('Foo.ElementWithParent')
assert_that(selector_type, equal_to('xpath'))
assert_that(selector, equal_to('//div/span'))
def test_element_with_parent_not_in_common(ui_map_parser):
selector_type, selector = ui_map_parser.parse_element('ElementWithParent')
assert_that(selector_type, equal_to('xpath'))
assert_that(selector, equal_to('/div[@id="some_id"]'))
def test_element_with_template(ui_map_parser):
selector_type, selector = ui_map_parser.parse_element('ElementWithTemplate', template={'id_name': 'some_id'})
assert_that(selector_type, equal_to('xpath'))
assert_that(selector, equal_to('//div[@id="some_id"]'))
def test_element_with_template_in_parent(ui_map_parser):
selector_type, selector = ui_map_parser.parse_element('Foo.ElementWithTemplateInParent',
template={'id_name': 'some_id'})
assert_that(selector_type, equal_to('xpath'))
assert_that(selector, equal_to('//div[@id="some_id"]/span'))
@pytest.mark.parametrize('ui_map_parser', ['en'], indirect=True)
def test_element_with_different_language(ui_map_parser):
selector_type, selector = ui_map_parser.parse_element('ElementWithSelectorForEnLanguage')
assert_that(selector_type, equal_to('xpath'))
assert_that(selector, equal_to('//div[text()="some text"]'))
def test_file_not_found(ui_map_parser):
file_name = 'UnexistingFile'
assert_that(lambda: ui_map_parser.parse_element(f'{file_name}.UnexistingElement'),
raises(UIMapException, f'File "{file_name.lower()}" was not found.'))
def test_element_not_found(ui_map_parser):
element_name = 'UnexistingElement'
assert_that(lambda: ui_map_parser.parse_element(element_name),
raises(UIMapException, f'Element "{element_name}" was not found.'))
def test_element_with_parent_but_different_types(ui_map_parser):
element_name = 'Foo.ElementWithParentDifferentTypes'
assert_that(lambda: ui_map_parser.parse_element(element_name),
raises(UIMapException,
f'"{element_name}" element and "SimpleElement" element have different element '
f'types.'))
def test_element_with_different_register(ui_map_parser):
selector_type, selector = ui_map_parser.parse_element('bAr.ElementWithDifferentREgister')
assert_that(selector_type, equal_to('xpath'))
assert_that(selector, equal_to('/div/span'))
| StarcoderdataPython |
3355599 | # -*- coding: utf-8 -*-
from decimal import Decimal
def parse_coverage_report(string):
lines = string.splitlines()[1:] # Skip cover mode def.
statements = len(lines)
covered = sum([line.split()[-1] != '0' and 1 or 0 for line in lines])
return float(round(Decimal(float(covered) / float(statements)) * 100, 2))
| StarcoderdataPython |
3275177 | <filename>jams/esat.py
#!/usr/bin/env python
"""
esat : Saturation vapour pressure of water and ice.
This module was written by <NAME> while at Department of
Computational Hydrosystems, Helmholtz Centre for Environmental
Research - UFZ, Leipzig, Germany, and continued while at Institut
National de Recherche pour l'Agriculture, l'Alimentation et
l'Environnement (INRAE), Nancy, France.
Copyright (c) 2012-2020 <NAME> - mc (at) macu (dot) de
Released under the MIT License; see LICENSE file for details.
* Written Jan 2012 by <NAME> (mc (at) macu (dot) de)
* Ported to Python 3, Feb 2013, <NAME>
* Changed handling of masked arrays, Oct 2013, <NAME>
* Assert T>0, Apr 2014, <NAME>
* Using numpy docstring format, May 2020, <NAME>
.. moduleauthor:: <NAME>
The following functions are provided
.. autosummary::
esat
"""
from __future__ import division, absolute_import, print_function
import numpy as np
__all__ = ['esat']
def esat(T, liquid=False, formula='GoffGratch'):
"""
Calculates the saturation vapour pressure of water and/or ice.
For temperatures above (and equal) 0 degree C (273.15 K),
the vapour pressure over liquid water is calculated.
For temperatures below 0 degree C, the vapour pressure over ice is calculated.
The optional parameter liquid=True changes the calculation to vapour pressure
over liquid water over the entire temperature range.
Parameters
----------
T : float or array_like
Temperature [K]
liquid : bool, optional
If True, use liquid formula for all temperatures.
formula : str, optional
Name of reference to use for calculations, case-insensitive (default: GoffGratch).
Note that several formulations do not provide a vapour pressure formulation over ice
and Marti and Mauersberger do not provide a formula over liquid: GoffGratch is used in theses cases.
GoffGratch: Smithsonian Tables, 1984; after Goff and Gratch, 1946 (default)
MartiMauersberger: Marti and Mauersberger, 1993
MagnusTeten: Murray, 1967
Buck_original: Buck, 1981
Buck: Buck Research Manual, 1996
WMO: Goff, 1957; WMO 1988, 2000
Wexler: Wexler, 1977
Sonntag: Sonntag, 1994
Bolton: Bolton, 1980
Fukuta: <NAME>. and <NAME>, 2003
HylandWexler: Hyland and Wexler, 1983
IAPWS: Wagner and Pruss, 2002
MurphyKoop: Murphy and Koop, 2005
Returns
-------
float or array_like
Saturation water pressure at temperature T in Pascal [Pa].
Notes
-----
From <NAME>: http://cires.colorado.edu/~voemel/vp.html
Referred literature cited in code.
Examples
--------
>>> print('{:.3f}'.format(esat(293.15)))
2335.847
>>> print('{:.3f}'.format(esat(253.15)))
103.074
>>> print('{:.3f} {:.3f}'.format(*esat([293.15,253.15])))
2335.847 103.074
>>> print('{:.3f} {:.3f}'.format(*esat([293.15,253.15],formula='GoffGratch')))
2335.847 103.074
>>> print('{:.3f} {:.3f}'.format(*esat([293.15,253.15],formula='MartiMauersberger')))
2335.847 103.650
>>> print('{:.3f} {:.3f}'.format(*esat([293.15,253.15],formula='MagnusTeten')))
2335.201 102.771
>>> print('{:.3f} {:.3f}'.format(*esat([293.15,253.15],formula='buck')))
2338.340 103.286
>>> print('{:.3f} {:.3f}'.format(*esat([293.15,253.15],formula='Buck_original')))
2337.282 103.267
>>> print('{:.3f} {:.3f}'.format(*esat([293.15,253.15],formula='wmo')))
2337.080 103.153
>>> print('{:.3f} {:.3f}'.format(*esat([293.15,253.15],formula='WEXLER')))
2323.254 103.074
>>> print('{:.3f} {:.3f}'.format(*esat([293.15,253.15],formula='Sonntag')))
2339.249 103.249
>>> print('{:.3f} {:.3f}'.format(*esat([293.15,253.15],formula='Bolton')))
2336.947 103.074
>>> print('{:.3f} {:.3f}'.format(*esat([293.15,253.15],formula='Fukuta')))
2335.847 103.074
>>> print('{:.3f} {:.3f}'.format(*esat([293.15,253.15],formula='HylandWexler')))
2338.804 103.260
>>> print('{:.3f} {:.3f}'.format(*esat([293.15,253.15],formula='IAPWS')))
2339.194 103.074
>>> print('{:.3f} {:.3f}'.format(*esat([293.15,253.15],formula='MurphyKoop')))
2339.399 103.252
>>> print('{:.3f} {:.3f}'.format(*esat(np.array([293.15,253.15]), liquid=True)))
2335.847 125.292
>>> print('{:.3f} {:.3f}'.format(*esat([293.15,253.15],formula='Fukuta', liquid=True)))
2335.847 125.079
>>> print('{:.3f} {:.3f}'.format(*esat(np.array([293.15,393.15]))))
esat.py: UserWarning: T>373.15 K - something might be wrong with T.
2335.847 198473.378
>>> print('{:.3f} {:.3f}'.format(*esat(np.array([293.15,93.15]))))
esat.py: UserWarning: T<100 - T probably given in Celsius instead of Kelvin.
2335.847 0.000
>>> out = esat(np.ma.array([253.15,-9999.], mask=[False,True]))
>>> print('{:.3f} {:.3f}'.format(*out.filled(-9999.)))
103.074 -9999.000
History
-------
Written, <NAME>, Jan 2012
Modified, <NAME>, Feb 2013 - ported to Python 3
<NAME>, Oct 2013 - changed masked array handling
<NAME>, Apr 2014 - assert
<NAME>, May 2020 - numpy docstring format
"""
#
# Constants
T0 = 273.15 # Celcius <-> Kelvin [K]
knownforms = (['Buck', 'Buck_original', 'Bolton', 'Fukuta', 'GoffGratch', 'HylandWexler',
'IAPWS', 'MagnusTeten', 'MartiMauersberger', 'MurphyKoop',
'Sonntag', 'Vaisala', 'Wexler', 'WMO'])
lknown = [i.lower() for i in knownforms]
#
# Check input
T = np.ma.array(T)
assert not np.ma.any(T <= 0.), 'T<0 - T probably given in Celsius instead of Kelvin.'
if np.ma.any(T < 100.):
print("esat.py: UserWarning: T<100 - T probably given in Celsius instead of Kelvin.")
if np.ma.any(T > (T0+100.)):
print("esat.py: UserWarning: T>373.15 K - something might be wrong with T.")
form = formula.lower()
if form not in lknown:
raise ValueError('Formula not know. Known formulas are {:s}'.format(knownforms))
#
# Split input into masked arrays
if liquid == True:
Tlim = 1e-3
else:
Tlim = T0
if T.size > 1:
isone = False
ii = np.ma.where(T>=Tlim)[0]
jj = np.ma.where(T<Tlim)[0]
if np.size(ii) > 0:
T_liq = T[ii]
if np.size(jj) > 0:
T_ice = T[jj]
else:
isone = True
if T>=Tlim:
ii = [0]
jj = []
T_liq = T
else:
ii = []
jj = [0]
T_ice = T
esat_out = T.copy() # to conserve mask
#
# Calc
#
# Liquid
if np.size(ii) > 0:
TC_liq = T_liq - T0
if form == 'buck':
'''Bucks vapour pressure formulation based on Tetens formula
Buck Research, Model CR-1A Hygrometer Operating Manual, Sep 2001'''
esat_liq = 6.1121 * np.ma.exp((18.678 - (TC_liq) / 234.5) * (TC_liq) / (257.14+TC_liq))
elif form == 'buck_original':
'''Bucks vapour pressure formulation based on Tetens formula
<NAME>., New equations for computing vapour pressure and enhancement factor,
J. Appl. Meteorol., 20, 1527-1532, 1981.'''
esat_liq = 6.1121 * np.ma.exp(17.502 * TC_liq / (240.97+TC_liq))
elif form == 'bolton':
'''<NAME>., The computation of equivalent potential temperature
Monthly Weather Report, 108, 1046-1053, 1980. equation (10)'''
esat_liq = 6.112 * np.ma.exp(17.67 * TC_liq / (TC_liq+243.5))
elif form == 'fukuta':
'''<NAME>. and <NAME>, Vapour pressure measurement of supercooled water
J. Atmos. Sci., 60, 1871-1875, 2003.
This paper does not give a vapour pressure formulation, but rather a correction over the Smithsonian Tables.
Thus calculate the table value first, then use the correction to get to the measured value.
This is done only for -39<TC<0.'''
Ts = 373.16 # steam point temperature in K
ews = 1013.246 # saturation pressure at steam point temperature, normal atmosphere
esat_liq = (10.**(-7.90298*(Ts/T_liq-1.) + 5.02808 * np.ma.log10(Ts/T_liq)
- 1.3816e-7 * (10.**(11.344*(1.-T_liq/Ts))-1.)
+ 8.1328e-3 * (10.**(-3.49149*(Ts/T_liq-1))-1.) + np.ma.log10(ews)))
mm = (TC_liq < 0.) & (TC_liq > -39.)
if np.ma.any(mm):
x = TC_liq + 19.
esat_liq = (np.where(mm, esat_liq * (0.9992 + 7.113e-4*x - 1.847e-4*x**2 + 1.189e-5*x**3
+ 1.130e-7*x**4 - 1.743e-8*x**5), esat_liq))
elif ((form == 'goffgratch') | (form == 'martimauersberger')):
'''Goff Gratch formulation, Smithsonian Meteorological Tables, 5th edition, p. 350, 1984
Original source: Goff and Gratch (1946), p. 107.'''
Ts = 373.16 # steam point temperature in K
ews = 1013.246 # saturation pressure at steam point temperature, normal atmosphere
esat_liq = (10.**(-7.90298*(Ts/T_liq-1.) + 5.02808 * np.ma.log10(Ts/T_liq)
- 1.3816e-7 * (10.**(11.344*(1.-T_liq/Ts))-1.)
+ 8.1328e-3 * (10.**(-3.49149*(Ts/T_liq-1.))-1.) + np.ma.log10(ews)))
elif form == 'hylandwexler':
'''<NAME>. and <NAME>, Formulations for the Thermodynamic Properties of the
saturated Phases of H2O from 173.15K to 473.15K, ASHRAE Trans, 89(2A), 500-519, 1983.'''
esat_liq = (np.ma.exp(- 0.58002206e4/T_liq + 0.13914993e1 - 0.48640239e-1*T_liq
+ 0.41764768e-4*T_liq**2 - 0.14452093e-7*T_liq**3
+ 0.65459673e1 * np.ma.log(T_liq)) / 100.)
elif form == 'iapws':
'''<NAME>. and <NAME> (2002)
The IAPWS formulation 1995 for the thermodynamic properties of ordinary water substance
for general and scientific use, J. Phys. Chem. Ref. Data, 31(2), 387-535.
This is the 'official' formulation from the International Association for the Properties of Water and Steam
The valid range of this formulation is 273.16 <= T <= 647.096 K and is based on the ITS90 temperature scale.'''
Tc = 647.096 # K : Temperature at the critical point
Pc = 22.064e4 # hPa : Vapour pressure at the critical point
nu = (1-T_liq/Tc)
a1 = -7.85951783
a2 = 1.84408259
a3 = -11.7866497
a4 = 22.6807411
a5 = -15.9618719
a6 = 1.80122502
esat_liq = Pc * np.ma.exp(Tc/T_liq * (a1*nu + a2*nu**1.5 + a3*nu**3 + a4*nu**3.5 + a5*nu**4 + a6*nu**7.5))
elif form == 'magnusteten':
'''<NAME>., On the computation of saturation vapour pressure, J. Appl. Meteorol., 6, 203-204, 1967.'''
esat_liq = 10.**(7.5*(TC_liq)/(TC_liq+237.5) + 0.7858)
elif form == 'murphykoop':
'''<NAME>, Review of the vapour pressure of ice and supercooled water for atmospheric applications
Q. J. R. Meteorol. Soc (2005), 131, pp. 1539-1565.'''
esat_liq = (np.exp(54.842763 - 6763.22 / T_liq - 4.210 * np.ma.log(T_liq) + 0.000367 * T_liq
+ np.tanh(0.0415 * (T_liq - 218.8)) * (53.878 - 1331.22 / T_liq -
9.44523 * np.ma.log(T_liq) + 0.014025 * T_liq)) / 100.)
elif form == 'sonntag':
'''<NAME>., Advancements in the field of hygrometry, Meteorol. Z., <NAME>., 3, 51-66, 1994.'''
esat_liq = (np.ma.exp( -6096.9385 * 1./T_liq + 16.635794 - 2.711193e-2 * T_liq
+ 1.673952e-5 * T_liq**2 + 2.433502 * np.ma.log(T_liq)))
elif form == 'wexler':
'''<NAME>., Vapour pressure formulation for ice
Journal of Research of the National Bureau of Standards-A. 81A, 5-20, 1977.'''
esat_liq = (np.ma.exp(- 2.9912729e3 * 1./T_liq**2 - 6.0170128e3 * 1./T_liq
+ 1.887643854e1 - 2.8354721e-2 * T_liq**1
+ 1.7838301e-5 * T_liq**2 - 8.4150417e-10 * T_liq**3
- 4.4412543e-13 * T_liq**4 + 2.858487 * np.ma.log(T_liq)) / 100.)
elif form == 'wmo':
'''Intended WMO formulation, originally published by Goff (1957)
incorrectly referenced by WMO technical regulations, WMO-NO 49, Vol I
General Meteorological Standards and Recommended Practices, App. A, 1988, Corrigendum Aug 2000.'''
Ts = 273.16 # steam point temperature in K
esat_liq = (10.**(10.79574*(1.-Ts/T_liq) - 5.02800 * np.ma.log10(T_liq/Ts)
+ 1.50475e-4 * (1.-10.**(-8.2969*(T_liq/Ts-1.)))
+ 0.42873e-3 * (10.**(+4.76955*(1.-Ts/T_liq))-1.)
+ 0.78614))
else:
raise ValueError("formulation not known: {:s}".format(formula))
esat_liq *= 100.
if isone:
esat_out = esat_liq
else:
esat_out[ii] = esat_liq
#
# Ice
if np.size(jj) > 0:
TC_ice = T_ice - T0
if form == 'buck':
'''Bucks vapour pressure formulation based on Tetens formula
Buck Research, Model CR-1A Hygrometer Operating Manual, Sep 2001'''
esat_ice = 6.1115 * np.exp((23.036 - TC_ice / 333.7) * TC_ice / (279.82+TC_ice))
elif form == 'buck_original':
'''Bucks vapour pressure formulation based on Tetens formula
<NAME>., New equations for computing vapour pressure and enhancement factor,
J. Appl. Meteorol., 20, 1527-1532, 1981.'''
esat_ice = 6.1115 * np.exp(22.452 * TC_ice / (272.55+TC_ice))
elif ((form == 'goffgratch') | (form == 'bolton') | (form == 'fukuta') | (form == 'iapws') | (form == 'wexler')):
'''Smithsonian Meteorological Tables, 5th edition, p. 350, 1984'''
ei0 = 6.1071 # mbar
Ts = 273.16 # freezing point in K
esat_ice = np.ma.exp(np.log(10.)*(-9.09718 * (Ts/T_ice-1.) - 3.56654 * np.ma.log10(Ts/T_ice)
+ 0.876793 * (1.-T_ice/Ts) + np.log10(ei0)))
elif (form == 'hylandwexler'):
'''<NAME>. and <NAME>, Formulations for the Thermodynamic Properties of
the saturated Phases of H2O from 173.15K to 473.15K, ASHRAE Trans, 89(2A), 500-519, 1983.'''
esat_ice = (np.exp(- 0.56745359E4 / T_ice + 0.63925247E1 - 0.96778430E-2 * T_ice
+ 0.62215701E-6 * T_ice**2. + 0.20747825E-8 * T_ice**3.
- 0.94840240E-12 * T_ice**4. + 0.41635019E1 * np.log(T_ice)) / 100.)
elif form == 'magnusteten':
'''<NAME>., On the computation of saturation vapo rpressure, J. Appl. Meteorol., 6, 203-204, 1967.'''
esat_ice = 10.**(9.5 * TC_ice/(265.5+TC_ice) + 0.7858)
elif form == 'martimauersberger':
'''<NAME>. and <NAME>, A survey and new measurements of ice vapour pressure
at temperatures between 170 and 250 K, GRL 20, 363-366, 1993.'''
esat_ice = 10.**(-2663.5/T_ice + 12.537) / 100.
elif form == 'murphykoop':
'''<NAME>, Review of the vapour pressure of ice and supercooled water
for atmospheric applications, <NAME>. Meteorol. Soc (2005), 131, pp. 1539-1565.'''
esat_ice = np.exp(9.550426 - 5723.265/T_ice + 3.53068 * np.log(T_ice) - 0.00728332 * T_ice) / 100.
elif form == 'sonntag':
'''<NAME>., Advancements in the field of hygrometry, Meteorol. Z., <NAME>., 3, 51-66, 1994.'''
esat_ice = (np.exp(- 6024.5282 * 1./T_ice + 24.721994 + 1.0613868E-2 * T_ice
- 1.3198825E-5 * T_ice**2 - 0.49382577 * np.log(T_ice)))
elif form == 'wmo':
'''WMO formulation, which is very similar to Goff Gratch
WMO technical regulations, WMO-NO 49, Vol I, General Meteorological Standards
and Recommended Practices, Aug 2000, App. A.'''
Ts = 273.16 # steam point temperature in K
esat_ice = (10.**(-9.09685 * (Ts/T_ice-1.) - 3.56654 * np.log10(Ts/T_ice)
+ 0.87682 * (1.-T_ice/Ts) + 0.78614))
else:
raise ValueError("formulation not known: {:s}".format(formula))
esat_ice *= 100.
if isone:
esat_out = esat_ice
else:
esat_out[jj] = esat_ice
#
# Finish
return esat_out
if __name__ == '__main__':
import doctest
doctest.testmod(optionflags=doctest.NORMALIZE_WHITESPACE)
| StarcoderdataPython |
125942 | <filename>venv/Lib/site-packages/formtools/__init__.py
__version__ = '2.0'
default_app_config = 'formtools.apps.FormToolsConfig'
| StarcoderdataPython |
3230575 | <reponame>GiannisVagionakis/metrics
# Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from functools import partial
import pytest
import torch
from sklearn.metrics import roc_auc_score as sk_roc_auc_score
from tests.classification.inputs import _input_binary_prob
from tests.classification.inputs import _input_multiclass_prob as _input_mcls_prob
from tests.classification.inputs import _input_multidim_multiclass_prob as _input_mdmc_prob
from tests.classification.inputs import _input_multilabel_multidim_prob as _input_mlmd_prob
from tests.classification.inputs import _input_multilabel_prob as _input_mlb_prob
from tests.helpers import seed_all
from tests.helpers.testers import NUM_CLASSES, MetricTester
from torchmetrics.classification.auroc import AUROC
from torchmetrics.functional import auroc
from torchmetrics.utilities.imports import _TORCH_LOWER_1_6
seed_all(42)
def _sk_auroc_binary_prob(preds, target, num_classes, average='macro', max_fpr=None, multi_class='ovr'):
# todo: `multi_class` is unused
sk_preds = preds.view(-1).numpy()
sk_target = target.view(-1).numpy()
return sk_roc_auc_score(y_true=sk_target, y_score=sk_preds, average=average, max_fpr=max_fpr)
def _sk_auroc_multiclass_prob(preds, target, num_classes, average='macro', max_fpr=None, multi_class='ovr'):
sk_preds = preds.reshape(-1, num_classes).numpy()
sk_target = target.view(-1).numpy()
return sk_roc_auc_score(
y_true=sk_target,
y_score=sk_preds,
average=average,
max_fpr=max_fpr,
multi_class=multi_class,
)
def _sk_auroc_multidim_multiclass_prob(preds, target, num_classes, average='macro', max_fpr=None, multi_class='ovr'):
sk_preds = preds.transpose(0, 1).reshape(num_classes, -1).transpose(0, 1).numpy()
sk_target = target.view(-1).numpy()
return sk_roc_auc_score(
y_true=sk_target,
y_score=sk_preds,
average=average,
max_fpr=max_fpr,
multi_class=multi_class,
)
def _sk_auroc_multilabel_prob(preds, target, num_classes, average='macro', max_fpr=None, multi_class='ovr'):
sk_preds = preds.reshape(-1, num_classes).numpy()
sk_target = target.reshape(-1, num_classes).numpy()
return sk_roc_auc_score(
y_true=sk_target,
y_score=sk_preds,
average=average,
max_fpr=max_fpr,
multi_class=multi_class,
)
def _sk_auroc_multilabel_multidim_prob(preds, target, num_classes, average='macro', max_fpr=None, multi_class='ovr'):
sk_preds = preds.transpose(0, 1).reshape(num_classes, -1).transpose(0, 1).numpy()
sk_target = target.transpose(0, 1).reshape(num_classes, -1).transpose(0, 1).numpy()
return sk_roc_auc_score(
y_true=sk_target,
y_score=sk_preds,
average=average,
max_fpr=max_fpr,
multi_class=multi_class,
)
@pytest.mark.parametrize("average", ['macro', 'weighted', 'micro'])
@pytest.mark.parametrize("max_fpr", [None, 0.8, 0.5])
@pytest.mark.parametrize(
"preds, target, sk_metric, num_classes",
[(_input_binary_prob.preds, _input_binary_prob.target, _sk_auroc_binary_prob, 1),
(_input_mcls_prob.preds, _input_mcls_prob.target, _sk_auroc_multiclass_prob, NUM_CLASSES),
(_input_mdmc_prob.preds, _input_mdmc_prob.target, _sk_auroc_multidim_multiclass_prob, NUM_CLASSES),
(_input_mlb_prob.preds, _input_mlb_prob.target, _sk_auroc_multilabel_prob, NUM_CLASSES),
(_input_mlmd_prob.preds, _input_mlmd_prob.target, _sk_auroc_multilabel_multidim_prob, NUM_CLASSES)]
)
class TestAUROC(MetricTester):
@pytest.mark.parametrize("ddp", [True, False])
@pytest.mark.parametrize("dist_sync_on_step", [True, False])
def test_auroc(self, preds, target, sk_metric, num_classes, average, max_fpr, ddp, dist_sync_on_step):
# max_fpr different from None is not support in multi class
if max_fpr is not None and num_classes != 1:
pytest.skip('max_fpr parameter not support for multi class or multi label')
# max_fpr only supported for torch v1.6 or higher
if max_fpr is not None and _TORCH_LOWER_1_6:
pytest.skip('requires torch v1.6 or higher to test max_fpr argument')
# average='micro' only supported for multilabel
if average == 'micro' and preds.ndim > 2 and preds.ndim == target.ndim + 1:
pytest.skip('micro argument only support for multilabel input')
self.run_class_metric_test(
ddp=ddp,
preds=preds,
target=target,
metric_class=AUROC,
sk_metric=partial(sk_metric, num_classes=num_classes, average=average, max_fpr=max_fpr),
dist_sync_on_step=dist_sync_on_step,
metric_args={
"num_classes": num_classes,
"average": average,
"max_fpr": max_fpr
},
)
def test_auroc_functional(self, preds, target, sk_metric, num_classes, average, max_fpr):
# max_fpr different from None is not support in multi class
if max_fpr is not None and num_classes != 1:
pytest.skip('max_fpr parameter not support for multi class or multi label')
# max_fpr only supported for torch v1.6 or higher
if max_fpr is not None and _TORCH_LOWER_1_6:
pytest.skip('requires torch v1.6 or higher to test max_fpr argument')
# average='micro' only supported for multilabel
if average == 'micro' and preds.ndim > 2 and preds.ndim == target.ndim + 1:
pytest.skip('micro argument only support for multilabel input')
self.run_functional_metric_test(
preds,
target,
metric_functional=auroc,
sk_metric=partial(sk_metric, num_classes=num_classes, average=average, max_fpr=max_fpr),
metric_args={
"num_classes": num_classes,
"average": average,
"max_fpr": max_fpr
},
)
def test_auroc_differentiability(self, preds, target, sk_metric, num_classes, average, max_fpr):
# max_fpr different from None is not support in multi class
if max_fpr is not None and num_classes != 1:
pytest.skip('max_fpr parameter not support for multi class or multi label')
# max_fpr only supported for torch v1.6 or higher
if max_fpr is not None and _TORCH_LOWER_1_6:
pytest.skip('requires torch v1.6 or higher to test max_fpr argument')
# average='micro' only supported for multilabel
if average == 'micro' and preds.ndim > 2 and preds.ndim == target.ndim + 1:
pytest.skip('micro argument only support for multilabel input')
self.run_differentiability_test(
preds=preds,
target=target,
metric_module=AUROC,
metric_functional=auroc,
metric_args={
"num_classes": num_classes,
"average": average,
"max_fpr": max_fpr
}
)
def test_error_on_different_mode():
""" test that an error is raised if the user pass in data of
different modes (binary, multi-label, multi-class)
"""
metric = AUROC()
# pass in multi-class data
metric.update(torch.randn(10, 5).softmax(dim=-1), torch.randint(0, 5, (10, )))
with pytest.raises(ValueError, match=r"The mode of data.* should be constant.*"):
# pass in multi-label data
metric.update(torch.rand(10, 5), torch.randint(0, 2, (10, 5)))
def test_error_multiclass_no_num_classes():
with pytest.raises(
ValueError, match="Detected input to ``multiclass`` but you did not provide ``num_classes`` argument"
):
_ = auroc(torch.randn(20, 3).softmax(dim=-1), torch.randint(3, (20, )))
| StarcoderdataPython |
1712734 | # blender modules
import bpy
# addon modules
from . import gl_utils
from . import gpu_utils
from . import settings
from .. import version_utils
def draw_cube(half_size_x, half_size_y, half_size_z, color=None):
if version_utils.IS_28:
gpu_utils.draw_wire_cube(half_size_x, half_size_y, half_size_z, color)
else:
gl_utils.draw_wire_cube(half_size_x, half_size_y, half_size_z)
def draw_sphere(radius, num_segments, color=None):
if version_utils.IS_28:
gpu_utils.draw_wire_sphere(radius, num_segments, color)
else:
gl_utils.draw_wire_sphere(radius, num_segments)
def draw_cylinder(radius, half_height, num_segments, color=None):
if version_utils.IS_28:
gpu_utils.draw_wire_cylinder(radius, half_height, num_segments, color)
else:
gl_utils.draw_wire_cylinder(radius, half_height, num_segments)
def draw_cross(size, color=None):
if version_utils.IS_28:
gpu_utils.draw_cross(size, color)
else:
gl_utils.draw_cross(size)
def get_draw_joint_limits():
if version_utils.IS_28:
return gpu_utils.draw_joint_limits
else:
return gl_utils.draw_joint_limits
def overlay_view_3d():
def try_draw(base_obj, obj):
if not hasattr(obj, 'xray'):
return
xray = obj.xray
if hasattr(xray, 'ondraw_postview'):
xray.ondraw_postview(base_obj, obj)
if hasattr(obj, 'type'):
if obj.type == 'ARMATURE':
arm_data = obj.data.xray
shapes = arm_data.display_bone_shapes
centers = arm_data.display_bone_mass_centers
limits = arm_data.display_bone_limits
if shapes or centers or limits:
for bone in obj.data.bones:
try_draw(base_obj, bone)
for obj in bpy.data.objects:
try_draw(obj, obj)
def register():
overlay_view_3d.__handle = bpy.types.SpaceView3D.draw_handler_add(
overlay_view_3d,
(),
'WINDOW',
'POST_VIEW'
)
def unregister():
bpy.types.SpaceView3D.draw_handler_remove(
overlay_view_3d.__handle,
'WINDOW'
)
| StarcoderdataPython |
1765486 | <reponame>watacool/lyrics_analysis<gh_stars>0
# coding: utf-8
# python 2.7
import os
import argparse
import pandas as pd
from download_lyrics import make_dataset
from janome.tokenizer import Tokenizer # $ pip install janome
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument("--init_load", default=False, action='store_true')
parser.add_argument("--dataset", default="./data/dataset.csv", type=str)
parser.add_argument("--wordlist", default="./data/word_list.txt", type=str)
args = parser.parse_args()
return args
def morphological_analysis_janome(text):
t = Tokenizer()
token = t.tokenize(text) # input unicode
return token
def morphological(args=None):
# make and load dataset
print args.init_load
if args.init_load:
print "# make dataset..."
make_dataset()
print "# load dataset {}...".format(args.dataset)
df = pd.read_csv(args.dataset, encoding='shift-jis')
word_list = ""
for index, row in df.iterrows():
print "# [{}] {}".format(index+1, row.title.encode('utf-8'))
mor_list = morphological_analysis_janome(row.lyrics)
for mor in mor_list:
'''
reference
http://ailaby.com/janome/
'''
type = mor.part_of_speech.split(',')[0].encode('utf-8')
if type==u'名詞'.encode('utf-8') or type==u'動詞'.encode('utf-8'):
word = mor.base_form.encode('utf-8')
if word != u','.encode('utf-8'):
print "[{}]: {}".format(word, type)
word_list = word_list + " " + word
word_list = word_list + "\n"
with open(args.wordlist, mode="w") as f:
f.write(word_list)
return word_list
def main():
args = get_args()
word_list = morphological(args)
print word_list
if __name__ == '__main__':
main()
| StarcoderdataPython |
3335362 | <reponame>nimzco/Environment
# -*- coding: utf-8 -*-
import os
import platform
import fnmatch
from re import match, sub
from subprocess import PIPE
from subprocess import Popen
import sublime
import sublime_plugin
#
# Monkey patch `sublime.Region` so it can be iterable:
sublime.Region.totuple = lambda self: (self.a, self.b)
sublime.Region.__iter__ = lambda self: self.totuple().__iter__()
PLUGIN_NAME = 'JsPrettier'
PLUGIN_PATH = os.path.join(sublime.packages_path(),
os.path.dirname(os.path.realpath(__file__)))
PLUGIN_CMD_NAME = 'js_prettier'
PROJECT_SETTINGS_KEY = PLUGIN_CMD_NAME
SETTINGS_FILE = '{0}.sublime-settings'.format(PLUGIN_NAME)
PRETTIER_OPTIONS_KEY = 'prettier_options'
PRETTIER_OPTION_CLI_MAP = [
{
'option': 'printWidth',
'cli': '--print-width',
'default': '80'
},
{
'option': 'singleQuote',
'cli': '--single-quote',
'default': 'false'
},
{
'option': 'trailingComma',
'cli': '--trailing-comma',
'default': 'none'
},
{
'option': 'bracketSpacing',
'cli': '--bracket-spacing',
'default': 'true'
},
{
'option': 'jsxBracketSameLine',
'cli': '--jsx-bracket-same-line',
'default': 'false'
},
{
'option': 'parser',
'cli': '--parser',
'default': 'babylon'
},
{
'option': 'semi',
'cli': '--semi',
'default': 'true'
}
]
ALLOWED_FILE_EXTENSIONS = [
'js',
'jsx',
'json',
'graphql',
'ts',
'tsx',
'css',
'scss',
'less'
]
IS_SUBLIME_TEXT_LATEST = int(sublime.version()) >= 3000
class JsPrettierCommand(sublime_plugin.TextCommand):
_error_message = None
@property
def debug(self):
return self.get_setting('debug', False)
@property
def has_error(self):
if not self._error_message:
return False
return True
@property
def error_message(self):
return self._error_message
@error_message.setter
def error_message(self, message=None):
self._error_message = message
@property
def proc_env(self):
env = None
if not self.is_windows():
env = os.environ.copy()
usr_path = ':/usr/local/bin'
if not self.env_path_exists(usr_path) \
and self.path_exists(usr_path):
env['PATH'] += usr_path
return env
@property
def prettier_cli_path(self):
"""The prettier cli path.
When the `prettier_cli_path` setting is empty (""),
the path is resolved by searching locations in the following order,
returning the first match of the prettier cli path...
- Locally installed prettier, relative to a Sublime Text Project
file's root directory, e.g.: `node_modules/.bin/prettier'.
- User's $HOME/node_modules directory.
- Look in the JsPrettier Sublime Text plug-in directory for
`node_modules/.bin/prettier`.
- Finally, check if prettier is installed globally,
e.g.: `yarn global add prettier`
or: `npm install -g prettier`
:return: The prettier cli path.
"""
user_prettier_path = self.get_setting('prettier_cli_path', '')
project_path = self.get_active_project_path()
if self.is_str_none_or_empty(user_prettier_path):
global_prettier_path = self.which('prettier')
project_prettier_path = os.path.join(
project_path, 'node_modules', '.bin', 'prettier')
plugin_prettier_path = os.path.join(
PLUGIN_PATH, 'node_modules', '.bin', 'prettier')
if os.path.exists(project_prettier_path):
return project_prettier_path
if os.path.exists(plugin_prettier_path):
return plugin_prettier_path
return global_prettier_path
# handle cases when the user specifies a prettier cli path that is
# relative to the working file or project:
if not os.path.isabs(user_prettier_path):
user_prettier_path = os.path.join(project_path, user_prettier_path)
return user_prettier_path
@property
def node_path(self):
return self.get_setting('node_path', None)
@property
def tab_size(self):
return int(self.view.settings().get('tab_size', 2))
@property
def use_tabs(self):
translate_tabs_to_spaces = self.view.settings().get(
'translate_tabs_to_spaces', True)
if not translate_tabs_to_spaces:
return True
return False
@property
def allow_inline_formatting(self):
return self.get_setting('allow_inline_formatting', False)
@property
def additional_cli_args(self):
return self.get_setting('additional_cli_args', {})
@property
def max_file_size_limit(self):
return int(self.get_setting('max_file_size_limit', -1))
def exceeds_max_file_size_limit(self, view):
if self.max_file_size_limit == -1:
return False
if os.path.getsize(view.file_name()) > self.max_file_size_limit:
return True
return False
def is_allowed_file_ext(self, view):
filename = view.file_name()
if not filename:
return False
file_ext = os.path.splitext(filename)[1][1:]
if file_ext in ALLOWED_FILE_EXTENSIONS:
return True
if file_ext in set(self.get_setting('custom_file_extensions', [])):
return True
return False
def run(self, edit, force_entire_file=False):
view = self.view
if view.file_name() is None:
#
# Handle file must first be saved:
if not IS_SUBLIME_TEXT_LATEST:
# sublime text 2x: limited dialog support, just show error:
return sublime.error_message(
'{0} Error\n\n'
'File must first be saved.'.format(PLUGIN_NAME))
else:
#
# sublime text 3+: show dialog that includes a save option:
result = sublime.yes_no_cancel_dialog(
'{0}\n\n'
'File must first be Saved.'.format(PLUGIN_NAME),
'Save...', "Don't Save")
if result == sublime.DIALOG_YES:
view.run_command('save')
#
# re-check if the file was saved here, incase user canceled or closed
# the save dialog:
if view.file_name() is None:
return sublime.set_timeout(lambda: sublime.status_message(
'{0}: File save canceled.'.format(PLUGIN_NAME)), 0)
prettier_cli_path = self.prettier_cli_path
if prettier_cli_path is None:
return sublime.error_message(
'{0} Error\n\n'
'The path to the Prettier cli executable could '
'not be found! Please ensure the path to prettier is '
'set in your PATH environment variable.'.format(PLUGIN_NAME))
if self.exceeds_max_file_size_limit(view):
return sublime.set_timeout(lambda: sublime.status_message(
'{0}: File ignored, max allowed file size '
'limit reached.'.format(PLUGIN_NAME)), 0)
prettier_args = self.parse_prettier_options(view)
node_path = self.node_path
# Format entire file:
if not self.has_selection(view) or force_entire_file is True:
region = sublime.Region(0, view.size())
source = view.substr(region)
if self.is_str_empty_or_whitespace_only(source):
return sublime.set_timeout(lambda: sublime.status_message(
'{0}: Nothing to format in file.'.format(PLUGIN_NAME)), 0)
transformed = self._exec_cmd(
source, node_path, prettier_cli_path, prettier_args)
if self.has_error:
self.show_console_error()
return self.show_status_bar_error()
# sanity check to ensure textual content was returned from cmd
# stdout, not necessarily caught in OSError try/catch
# exception handler
if self.is_str_empty_or_whitespace_only(transformed):
self.error_message = 'Empty content returned to stdout'
return self.show_status_bar_error()
file_changed = False
transformed = self.trim_trailing_ws_and_lines(transformed)
if transformed:
if transformed == self.trim_trailing_ws_and_lines(source):
if self.ensure_newline_at_eof(view, edit) is True:
# no formatting changes applied, however, a line
# break was needed/inserted at the end of the file:
file_changed = True
else:
view.replace(edit, region, transformed)
self.ensure_newline_at_eof(view, edit)
file_changed = True
else:
view.replace(edit, region, transformed)
self.ensure_newline_at_eof(view, edit)
file_changed = True
if file_changed is True:
sublime.set_timeout(lambda: sublime.status_message(
'{0}: File formatted.'.format(PLUGIN_NAME)), 0)
else:
sublime.set_timeout(lambda: sublime.status_message(
'{0}: File already formatted.'.format(PLUGIN_NAME)), 0)
return
# Format each selection:
for region in view.sel():
if region.empty():
continue
source = view.substr(region)
if self.is_str_empty_or_whitespace_only(source):
sublime.set_timeout(lambda: sublime.status_message(
'{0}: Nothing to format in selection.'.format(
PLUGIN_NAME)), 0)
continue
transformed = self._exec_cmd(
source, node_path, prettier_cli_path, prettier_args)
if self.has_error:
self.show_console_error()
return self.show_status_bar_error()
# sanity check to ensure textual content was returned from cmd
# stdout, not necessarily caught in OSError try/catch
# exception handler
if self.is_str_empty_or_whitespace_only(transformed):
self.error_message = 'Empty content returned to stdout'
return self.show_status_bar_error()
transformed = self.trim_trailing_ws_and_lines(transformed)
if transformed \
and transformed == self.trim_trailing_ws_and_lines(source):
sublime.set_timeout(lambda: sublime.status_message(
'{0}: Selection(s) already formatted.'.format(
PLUGIN_NAME)), 0)
else:
view.replace(edit, region, transformed)
sublime.set_timeout(lambda: sublime.status_message(
'{0}: Selection(s) formatted.'.format(PLUGIN_NAME)), 0)
def _exec_cmd(self, source, node_path, prettier_cli_path,
prettier_args):
self._error_message = None
if self.is_str_none_or_empty(node_path):
cmd = [prettier_cli_path] \
+ ['--stdin'] \
+ prettier_args
else:
cmd = [node_path] \
+ [prettier_cli_path] \
+ ['--stdin'] \
+ prettier_args
try:
self.show_debug_message(
'Prettier CLI Command', self.list_to_str(cmd))
proc = Popen(
cmd, stdin=PIPE,
stderr=PIPE,
stdout=PIPE,
env=self.proc_env,
shell=self.is_windows())
stdout, stderr = proc.communicate(input=source.encode('utf-8'))
if stderr or proc.returncode != 0:
self.format_error_message(
stderr.decode('utf-8'), str(proc.returncode))
return None
return stdout.decode('utf-8')
except OSError as ex:
sublime.error_message('{0} - {1}'.format(PLUGIN_NAME, ex))
raise
def should_show_plugin(self):
view = self.view
if self.allow_inline_formatting is True:
return True
if self.is_source_js(view) is True:
return True
if self.is_css(view) is True:
return True
if self.is_allowed_file_ext(view) is True:
return True
return False
def is_visible(self):
return self.should_show_plugin()
def is_enabled(self):
return self.should_show_plugin()
def get_setting(self, key, default_value=None):
settings = self.view.settings().get(PLUGIN_NAME)
if settings is None or settings.get(key) is None:
settings = sublime.load_settings(SETTINGS_FILE)
value = settings.get(key, default_value)
# check for project-level overrides:
project_value = self._get_project_setting(key)
if project_value is None:
return value
return project_value
def get_sub_setting(self, key=None):
settings = self.view.settings().get(PLUGIN_NAME)
if settings is None or settings.get(PRETTIER_OPTIONS_KEY).get(
key) is None:
settings = sublime.load_settings(SETTINGS_FILE)
value = settings.get(PRETTIER_OPTIONS_KEY).get(key)
# check for project-level overrides:
project_value = self._get_project_sub_setting(key)
if project_value is None:
return value
return project_value
def parse_prettier_options(self, view):
# TODO: optimize option parsing...
prettier_cli_args = []
is_css = self.is_css(view)
is_typescript = self.is_typescript(view)
is_json = self.is_json(view)
is_graphql = self.is_graphql(view)
for mapping in PRETTIER_OPTION_CLI_MAP:
option_name = mapping['option']
cli_option_name = mapping['cli']
option_value = self.get_sub_setting(option_name)
# internally override the 'parser' option for css
# and set the value to 'postcss':
if option_name == 'parser' and is_css:
prettier_cli_args.append(cli_option_name)
prettier_cli_args.append('postcss')
continue
# internally override the 'parser' for typescript
# and set the value to 'typescript':
if option_name == 'parser' and is_typescript:
prettier_cli_args.append(cli_option_name)
prettier_cli_args.append('typescript')
continue
# internally override the 'parser' for json
# and set the value to 'json':
if option_name == 'parser' and is_json:
prettier_cli_args.append(cli_option_name)
prettier_cli_args.append('json')
continue
# internally override the 'parser' for graphql
# and set the value to 'graphql':
if option_name == 'parser' and is_graphql:
prettier_cli_args.append(cli_option_name)
prettier_cli_args.append('graphql')
continue
if option_value is None or str(option_value) == '':
option_value = mapping['default']
option_value = str(option_value).strip()
if self.is_bool_str(option_value):
prettier_cli_args.append('{0}={1}'.format(
cli_option_name, option_value.lower()))
else:
prettier_cli_args.append(cli_option_name)
prettier_cli_args.append(option_value)
# set the `tabWidth` option based on the current view:
prettier_cli_args.append('--tab-width')
prettier_cli_args.append(str(self.tab_size))
# set the `useTabs` option based on the current view:
prettier_cli_args.append('{0}={1}'.format(
'--use-tabs', str(self.use_tabs).lower()))
# add the additional arguments from the settings file to the command:
if self.additional_cli_args and len(self.additional_cli_args) > 0:
for arg_key, arg_value in self.additional_cli_args.items():
arg_key = str(arg_key).strip()
arg_value = str(arg_value).strip()
# handle bool options
if arg_value != '' and self.is_bool_str(arg_value):
prettier_cli_args.append(
'{0}={1}'.format(arg_key, arg_value.lower()))
continue
prettier_cli_args.append(arg_key)
if arg_value != '':
prettier_cli_args.append(arg_value)
return prettier_cli_args
def which(self, executable, path=None):
if not self.is_str_none_or_empty(executable):
if os.path.isfile(executable):
return executable
if self.is_str_none_or_empty(path):
path = os.environ['PATH']
if not self.is_windows():
usr_path = ':/usr/local/bin'
if not self.env_path_exists(usr_path, path) \
and self.path_exists(usr_path):
path += usr_path
paths = path.split(os.pathsep)
if not os.path.isfile(executable):
for directory in paths:
exec_path = os.path.join(directory, executable)
if os.path.isfile(exec_path):
return exec_path
return None
return executable
def show_debug_message(self, label, message):
if not self.debug:
return
header = ' {0} DEBUG - {1} '.format(PLUGIN_NAME, label)
horizontal_rule = self.repeat_str('-', len(header))
print('\n{0}\n{1}\n{2}\n\n''{3}'.format(
horizontal_rule, header, horizontal_rule, message))
def show_console_error(self):
print('\n------------------\n {0} ERROR \n------------------\n\n'
'{1}'.format(PLUGIN_NAME, self.error_message))
def format_error_message(self, error_message, error_code):
self.error_message = 'Prettier reported the following ' \
'error:\n\n{0}\n' \
'Process finished with exit code {1}\n'\
.format(error_message, '{0}'
.format(error_code))
@staticmethod
def is_source_js(view):
return view.scope_name(0).startswith('source.js')
@staticmethod
def is_css(view):
filename = view.file_name()
if not filename:
return False
scopename = view.scope_name(0)
if scopename.startswith('source.css') or filename.endswith('.css'):
return True
if scopename.startswith('source.scss') or filename.endswith('.scss'):
return True
if scopename.startswith('source.less') or filename.endswith('.less'):
return True
return False
@staticmethod
def is_typescript(view):
filename = view.file_name()
if not filename:
return False
scopename = view.scope_name(0)
if scopename.startswith('source.ts') or filename.endswith('.ts'):
return True
if scopename.startswith('source.tsx') or filename.endswith('.tsx'):
return True
return False
@staticmethod
def is_json(view):
filename = view.file_name()
if not filename:
return False
scopename = view.scope_name(0)
if scopename.startswith('source.json') or filename.endswith('.json'):
return True
return False
@staticmethod
def is_graphql(view):
filename = view.file_name()
if not filename:
return False
if filename.endswith('.graphql'):
return True
return False
@staticmethod
def get_active_project_path():
"""Get the active Sublime Text project path.
Original: https://gist.github.com/astronaughts/9678368
:return: The active Sublime Text project path.
"""
window = sublime.active_window()
folders = window.folders()
if len(folders) == 1:
return folders[0]
else:
active_view = window.active_view()
if active_view:
active_file_name = active_view.file_name()
else:
active_file_name = None
if not active_file_name:
return folders[0] if len(folders) else os.path.expanduser('~')
for folder in folders:
if active_file_name.startswith(folder):
return folder
return os.path.dirname(active_file_name)
@staticmethod
def show_status_bar_error():
sublime.set_timeout(lambda: sublime.status_message(
'{0}: Format failed! Open the console window to '
'view error details.'.format(PLUGIN_NAME)), 0)
@staticmethod
def _get_project_setting(key):
"""Get a project setting.
JsPrettier project settings are stored in the sublime project file
as a dictionary, e.g.:
"settings":
{
"js_prettier": { "key": "value", ... }
}
:param key: The project setting key.
:return: The project setting value.
:rtype: str
"""
project_settings = sublime.active_window().active_view().settings()
if not project_settings:
return None
js_prettier_settings = project_settings.get(PROJECT_SETTINGS_KEY)
if js_prettier_settings:
if key in js_prettier_settings:
return js_prettier_settings[key]
return None
@staticmethod
def _get_project_sub_setting(option):
project_settings = sublime.active_window().active_view().settings()
js_prettier_settings = project_settings.get(PROJECT_SETTINGS_KEY, None)
if not js_prettier_settings:
return None
prettier_options = js_prettier_settings.get(PRETTIER_OPTIONS_KEY, None)
if prettier_options:
if option in prettier_options:
return prettier_options.get(option, None)
return None
@staticmethod
def is_bool_str(val):
"""Determine if the specified string :val is 'true' or 'false'.
:param val: The value to check.
:return: True if if val: is a boolean string, otherwise False.
:rtype: bool
"""
if val is None:
return False
if type(val) == str:
val = val.lower().strip()
if val == 'true' or val == 'false':
return True
return False
@staticmethod
def is_str_none_or_empty(val):
"""Determine if the specified str val is None or an empty.
:param val: The str to check.
:return: True if if val: is None or an empty, otherwise False.
:rtype: bool
"""
if val is None:
return True
if type(val) == str:
val = val.strip()
if not val:
return True
return False
@staticmethod
def is_str_empty_or_whitespace_only(txt):
if not txt or len(txt) == 0:
return True
# strip all whitespace/invisible chars to determine textual content:
txt = sub(r'\s+', '', txt)
if not txt or len(txt) == 0:
return True
return False
@staticmethod
def list_to_str(list_to_convert):
"""Convert a list of values into string.
Each value will be seperated by a single space.
:param list_to_convert: The list to convert to a string.
:return: The list converted into a string.
"""
return ' '.join(str(l) for l in list_to_convert)
@staticmethod
def repeat_str(str_to_repeat, repeat_length):
"""Repeat a string to a certain length.
:param str_to_repeat: The string to repeat. Normally a single char.
:param repeat_length: The amount of times to repeat the string.
:return: The repeated string.
"""
quotient, remainder = divmod(repeat_length, len(str_to_repeat))
return str_to_repeat * quotient + str_to_repeat[:remainder]
@staticmethod
def trim_trailing_ws_and_lines(val):
"""Trim trailing whitespace and line-breaks at the end of a string.
:param val: The value to trim.
:return: The val with trailing whitespace and line-breaks removed.
"""
if val is None:
return val
val = sub(r'\s+\Z', '', val)
return val
@staticmethod
def ensure_newline_at_eof(view, edit):
new_line_inserted = False
if view.size() > 0 and view.substr(view.size() - 1) != '\n':
new_line_inserted = True
view.insert(edit, view.size(), '\n')
return new_line_inserted
@staticmethod
def has_selection(view):
for sel in view.sel():
start, end = sel
if start != end:
return True
return False
@staticmethod
def env_path_exists(find_path, env_path=None):
"""Check if the specified path is listed in OS enviornment path.
:param find_path: The path the search for.
:param env_path: The environment path str.
:return: True if the find_path exists in the env_path.
:rtype: bool
"""
if not find_path:
return False
if not env_path:
env_path = os.environ['PATH']
find_path = str.replace(find_path, os.pathsep, '')
paths = env_path.split(os.pathsep)
for path in paths:
if path == find_path:
return True
return False
@staticmethod
def path_exists(path):
if not path:
return False
if os.path.exists(str.replace(path, os.pathsep, '')):
return True
return False
@staticmethod
def is_mac_os():
return platform.system() == 'Darwin'
@staticmethod
def is_windows():
return platform.system() == 'Windows' or os.name == 'nt'
class CommandOnSave(sublime_plugin.EventListener):
def on_pre_save(self, view):
if self.is_allowed(view) and self.is_enabled(view):
if self.is_excluded(view):
view.run_command(PLUGIN_CMD_NAME, {'force_entire_file': True})
def auto_format_on_save(self, view):
return self.get_setting(view, 'auto_format_on_save', False)
def auto_format_on_save_excludes(self, view):
return self.get_setting(view, 'auto_format_on_save_excludes', [])
def custom_file_extensions(self, view):
return self.get_setting(view, 'custom_file_extensions', [])
def is_allowed(self, view):
return self.is_allowed_file_ext(view)
def is_enabled(self, view):
return self.auto_format_on_save(view)
def is_excluded(self, view):
filename = view.file_name()
if not filename:
return False
excludes = self.auto_format_on_save_excludes(view)
regmatch_ef = [fnmatch.translate(pattern) for pattern in excludes]
for regmatch in regmatch_ef:
if match(regmatch, filename):
return False
return True
def is_allowed_file_ext(self, view):
filename = view.file_name()
if not filename:
return False
file_ext = os.path.splitext(filename)[1][1:]
if file_ext in ALLOWED_FILE_EXTENSIONS:
return True
if file_ext in set(self.custom_file_extensions(view)):
return True
return False
def get_setting(self, view, key, default_value=None):
settings = view.settings().get(PLUGIN_NAME)
if settings is None or settings.get(key) is None:
settings = sublime.load_settings(SETTINGS_FILE)
value = settings.get(key, default_value)
# check for project-level overrides:
project_value = self._get_project_setting(key)
if project_value is None:
return value
return project_value
@staticmethod
def _get_project_setting(key):
settings = sublime.active_window().active_view().settings()
if not settings:
return None
jsprettier = settings.get(PROJECT_SETTINGS_KEY)
if jsprettier:
if key in jsprettier:
return jsprettier[key]
return None
| StarcoderdataPython |
1784915 | '''if conditions:
do_action_1
...
do_action_n'''
# Statement if
x = 4
if x % 2 ==0: # jika sisa bagi x dengan 2 sama dengan 0
print("x habis dibagi dua") # statemen aksi lebih menjorok ke dalam
# Statement if ... elif ... else
x = 7
if x % 2 ==0: # jika sisa bagi x dengan 2 sama dengan 0
print("x habis dibagi dua")
elif x % 3 ==0: # jika sisa bagi x dengan 3 sama dengan 0
print("x habis dibagi tiga")
elif x % 5 ==0: # jika sisa bagi x dengan 5 sama dengan 0
print("x habis dibagi lima")
else:
print("x tidak habis dibagi dua, tiga ataupun lima")
#nested if
jam = 13
if jam >= 5 and jam <12: # selama jam di antara 5 s.d. 12
print("Selamat pagi!")
elif jam >= 12 and jam < 17: # selama jam di antara 12 s.d. 17
print("Selamat siang!")
elif jam >= 17 and jam < 19: # selama jam di antara 17 s.d. 19
print("Selamat sore!")
else: # selain kondisi di atas
print("Selamat malam!")
jam = 17
tagihan_ke = 'Mr. Yoyo'
warehousing = { 'harga_harian': 1000000, 'total_hari':15 }
cleansing = { 'harga_harian': 1500000, 'total_hari':10 }
integration = { 'harga_harian':2000000, 'total_hari':15 }
transform = { 'harga_harian':2500000, 'total_hari':10 }
sub_warehousing = warehousing['harga_harian']*warehousing['total_hari']
sub_cleansing = cleansing['harga_harian']*cleansing['total_hari']
sub_integration = integration['harga_harian']*integration['total_hari']
sub_transform = transform['harga_harian']*transform['total_hari']
total_harga = sub_warehousing+sub_cleansing+sub_integration+sub_transform
print("Tagihan kepada:")
print(tagihan_ke)
if jam > 19:
print("Selamat malam, anda harus membayar tagihan sebesar:")
elif jam > 17:
print("Selamat sore, anda harus membayar tagihan sebesar:")
elif jam > 12:
print("Selamat siang, anda harus membayar tagihan sebesar:")
else:
print("Selamat pagi, anda harus membayar tagihan sebesar:")
print(total_harga) | StarcoderdataPython |
3268870 | <gh_stars>10-100
# vim: set ts=8 sts=2 sw=2 tw=99 et:
#
# This file is part of AMBuild.
#
# AMBuild is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# AMBuild is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with AMBuild. If not, see <http://www.gnu.org/licenses/>.
import ambuild2.frontend.vs.gen as vs_gen
from ambuild2.frontend.v2_2.vs import cxx
class Generator(vs_gen.Generator):
def __init__(self, cm):
super(Generator, self).__init__(cm)
self.vs_version_number = cxx.Compiler.GetVersionFromVS(self.vs_version)
self.vs_vendor = cxx.VisualStudio(self.vs_version_number)
# Overridden.
def detectCompilers(self, **kwargs):
return cxx.Compiler(self.vs_vendor, kwargs.pop('target_arch', None))
def newProgramProject(self, context, name):
return cxx.Project(cxx.Program, name)
def newLibraryProject(self, context, name):
return cxx.Project(cxx.Library, name)
def newStaticLibraryProject(self, context, name):
return cxx.Project(cxx.StaticLibrary, name)
| StarcoderdataPython |
1707725 | <filename>virtual_box_tools/windows_password_database.py
import ctypes
def getpwnam(user: str):
get_user_name = ctypes.windll.secur32.GetUserNameExW
display_name = 3
size = ctypes.pointer(ctypes.c_ulong(0))
get_user_name(display_name, None, size)
name_buffer = ctypes.create_unicode_buffer(size.contents.value)
get_user_name(display_name, name_buffer, size)
return ['', '', '', '', name_buffer.value]
| StarcoderdataPython |
Subsets and Splits