blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
616
| content_id
stringlengths 40
40
| detected_licenses
sequencelengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 777
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 149
values | src_encoding
stringclasses 26
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 3
10.2M
| extension
stringclasses 188
values | content
stringlengths 3
10.2M
| authors
sequencelengths 1
1
| author_id
stringlengths 1
132
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
ab6333b26ca5c5e92c98730f02f2f883ba820907 | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/verbs/_frizzing.py | 1ed7c522d5ae15fc66c9c2b646ba67fb89ea4cfa | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 235 | py |
from xai.brain.wordbase.verbs._frizz import _FRIZZ
#calss header
class _FRIZZING(_FRIZZ, ):
def __init__(self,):
_FRIZZ.__init__(self)
self.name = "FRIZZING"
self.specie = 'verbs'
self.basic = "frizz"
self.jsondata = {}
| [
"[email protected]"
] | |
b6f490deab8b0d16a1adff8b3c97ecf942ab4482 | 9908dc07233b4025425dc212b5e4acb3b087971e | /Medium/findRedundantConnection.py | c3fd9af33cb44dbda9d4c81e96ae23b61cd0a8ad | [] | no_license | Abdelhamid-bouzid/problem-Sovling- | 15769da71d19186947607574860462ad81f34e40 | fa0eecab8a94d1ad20b5aa129973f59eddd5678d | refs/heads/main | 2023-08-27T21:49:32.337979 | 2021-10-23T21:57:55 | 2021-10-23T21:57:55 | 317,097,388 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 646 | py | class Solution:
def findRedundantConnection(self, edges: List[List[int]]) -> List[int]:
self.g = collections.defaultdict(list)
for u,v in edges:
self.g[u].append(v)
self.g[v].append(u)
for u,v in edges[::-1]:
self.vis=set()
self.dfs(1,u,v)
if len(self.vis)==len(self.g):
return [u,v]
def dfs(self,node,u,v):
if node in self.vis:
return True
self.vis.add(node)
for adj in self.g[node]:
if [node,adj]!=[u,v] and [adj,node]!=[u,v]:
self.dfs(adj,u,v)
| [
"[email protected]"
] | |
a9fa1f05a49145676d8d384b3c7e7cc8f4b16897 | 33836016ea99776d31f7ad8f2140c39f7b43b5fe | /fip_collab/2016_09_26_polycrystal_FIP_allpoint/plot_evd.py | d523d88b853904fc3267a94e0c6fc19be735c236 | [] | no_license | earthexploration/MKS-Experimentation | 92a2aea83e041bfe741048d662d28ff593077551 | 9b9ff3b468767b235e7c4884b0ed56c127328a5f | refs/heads/master | 2023-03-17T23:11:11.313693 | 2017-04-24T19:24:35 | 2017-04-24T19:24:35 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,689 | py | # -*- coding: utf-8 -*-
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.cm as cm
from constants import const
import h5py
import sys
def pltevd(H):
C = const()
"""define the colors of interest"""
n_col = len(C['sid'])
colormat = cm.rainbow(np.linspace(0, 1, n_col))
f_reg = h5py.File("regression_results_L%s.hdf5" % H, 'r')
fig = plt.figure(figsize=[5.5, 4])
f = h5py.File("responses.hdf5", 'r')
for ii in xrange(n_col):
sid = C['sid'][ii]
"""get the x, y data for plotting the evd"""
x = f.get('evd_%s' % sid)[...]
if ii == 0:
xmin = np.log(x).min()
xmax = np.log(x).max()
else:
xmin = np.min([xmin, np.log(x).min()])
xmax = np.max([xmax, np.log(x).max()])
y = (np.arange(x.size)+1)/np.float32(x.size)
"""plot the original data and the fits"""
# plt.plot(np.log(x), y, '.', markersize=2, color=colormat[ii, :],
# label=sid)
plt.plot(np.log(x), y, '-', color=colormat[ii, :],
label=sid)
f.close()
f_reg.close()
plt.xlabel("ln(FIP)")
plt.ylabel("CDF")
plt.legend(loc='lower right', shadow=True, fontsize='small')
rng = np.abs(xmax - xmin)
xmin += -0.01*rng
xmax += 0.01*rng
plt.xlim((xmin, xmax))
ymin = y.min()
ymax = y.max()
rng = ymax - ymin
ymin = 0
ymax += 0.01*rng
plt.ylim((ymin, ymax))
plt.tight_layout()
fig_name = 'evd_orig_L%s.png' % H
fig.canvas.set_window_title(fig_name)
plt.savefig(fig_name)
if __name__ == '__main__':
sid = sys.argv[1]
pltevd(sid)
plt.show()
| [
"[email protected]"
] | |
168a1a3ba4c092e59778ca8c0e121f8af2cbdb0f | b6af5ed67f758dace85c9cec2091c36d769e5668 | /build/handsnet_time/catkin_generated/installspace/tactile_image_publisher_5.py | 34897b9675238a073264ba477e450f23797c3517 | [
"MIT"
] | permissive | robertokcanale/ros_workspace_handsnet | 897920d6ef30554556449085816d2e8ffb096721 | 09672bf2b4c54d0064f339005dc5eb3ac4f9d80d | refs/heads/main | 2023-04-10T06:50:12.652997 | 2021-04-12T12:08:41 | 2021-04-12T12:08:41 | 353,714,029 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,782 | py | #!/usr/bin/env python3
import rospy
#import tensorflow as tf
from PIL import Image
from sensor_msgs.msg import Image as TactileImage
from handsnet_time.msg import Image_array
import numpy as np
#I can make a message of this type
#sensor_msgs/Image[] data
if __name__ == '__main__':
pub = rospy.Publisher('tactile_image_array', Image_array, queue_size=10)
rospy.init_node('tactile_image_publisher5')
rate = rospy.Rate(1000) # 1hz
contacts = Image_array()
#print(contacts.tactile_image[1])
while not rospy.is_shutdown():
for i in range(0, 4):
im_name='src/handsnet_time/data/'+str(i+1)+'.png'
#PIL image
im = Image.open(im_name)
im = im.convert('RGB')
im = im.resize((68,100), Image.ANTIALIAS)
#sensor_msgs.msg.Image
contacts.tactile_image[i] = TactileImage()
contacts.tactile_image[i].header.stamp = rospy.Time.now()
contacts.tactile_image[i].height = im.height
contacts.tactile_image[i].width = im.width
contacts.tactile_image[i].encoding = "rgb8"
contacts.tactile_image[i].is_bigendian = False
contacts.tactile_image[i].step = 3 * im.width # Full row length in bytes
contacts.tactile_image[i].data = np.array(im).tobytes()
pub.publish(contacts)
rate.sleep()
#also, I need something of the kind PIL.Image.Image
#tested it, and it wants a PIL image, don't forget to place the GPU stuff
#model = tf.keras.models.load_model('src/handsnet/data/HandsNet_2_97.h5')
#input_arr= tf.keras.preprocessing.image.img_to_array(im)
#input_arr = np.array([input_arr])
#predictions = model.predict(input_arr)
#print(predictions)
| [
"[email protected]"
] | |
e6ff765e39660197728176631c129a6e521196c7 | ce4f7f8e9336b8bbf9cbfe147d922e37034ab6c3 | /old/past1_E.py | e538f70ef5be64f085af47fec1d7b8236ac36a41 | [] | no_license | kussy-tessy/atcoder | 5604919747242ee9740b9131bb6e168e96af0151 | ee917fa5a5218d4a9e72f710d0d844e7c203f13b | refs/heads/master | 2023-07-21T09:25:15.464881 | 2021-09-04T14:06:02 | 2021-09-04T14:06:02 | 311,221,203 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 960 | py | # print('input >>')
N, Q = map(int,(input().split()))
follows = [[0] * N for _ in range(N)]
logs = []
for _ in range(Q):
logs.append(input())
for log in logs:
log_info = log.split()
person = int(log_info[1])-1
if log_info[0] == '1':
follows[person][int(log_info[2])-1] = 1
elif log_info[0] == '2':
for i in range(N):
if follows[i][person] == 1:
follows[person][i] = 1
elif log_info[0] == '3':
xs = []
for i in range(N):
if follows[person][i] == 1:
xs.append(i)
for x in xs:
for j in range(N):
if follows[x][j] == 1:
follows[person][j] = 1
# print('-----output-----')
for i, fs in enumerate(follows):
for j, f in enumerate(fs):
if f == 1 and i != j:
print('Y', end='')
else:
print('N', end='')
print() | [
"[email protected]"
] | |
080b3808ad65aeadf62c0f3a420f7f9a286b309d | 7a4da5ec2196bf975a9e6115846244788b36b952 | /3.7.0/lldb-3.7.0.src/test/python_api/frame/inlines/TestInlinedFrame.py | 217b87e892696f5a9f974f714461cd306ed5a293 | [
"NCSA",
"MIT"
] | permissive | androm3da/clang_sles | ca4ada2ec85d625c65818ca9b60dcf1bc27f0756 | 2ba6d0711546ad681883c42dfb8661b842806695 | refs/heads/master | 2021-01-10T13:50:25.353394 | 2016-03-31T21:38:29 | 2016-03-31T21:38:29 | 44,787,977 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,812 | py | """
Testlldb Python SBFrame APIs IsInlined() and GetFunctionName().
"""
import os, time
import re
import unittest2
import lldb, lldbutil
from lldbtest import *
class InlinedFrameAPITestCase(TestBase):
mydir = TestBase.compute_mydir(__file__)
@skipUnlessDarwin
@python_api_test
@dsym_test
def test_stop_at_outer_inline_with_dsym(self):
"""Exercise SBFrame.IsInlined() and SBFrame.GetFunctionName()."""
self.buildDsym()
self.do_stop_at_outer_inline()
@python_api_test
@dwarf_test
def test_stop_at_outer_inline_with_dwarf(self):
"""Exercise SBFrame.IsInlined() and SBFrame.GetFunctionName()."""
self.buildDwarf()
self.do_stop_at_outer_inline()
def setUp(self):
# Call super's setUp().
TestBase.setUp(self)
# Find the line number to of function 'c'.
self.source = 'inlines.c'
self.first_stop = line_number(self.source, '// This should correspond to the first break stop.')
self.second_stop = line_number(self.source, '// This should correspond to the second break stop.')
def do_stop_at_outer_inline(self):
"""Exercise SBFrame.IsInlined() and SBFrame.GetFunctionName()."""
exe = os.path.join(os.getcwd(), "a.out")
# Create a target by the debugger.
target = self.dbg.CreateTarget(exe)
self.assertTrue(target, VALID_TARGET)
# Now create a breakpoint on main.c by the name of 'inner_inline'.
breakpoint = target.BreakpointCreateByName('inner_inline', 'a.out')
#print "breakpoint:", breakpoint
self.assertTrue(breakpoint and
breakpoint.GetNumLocations() > 1,
VALID_BREAKPOINT)
# Now launch the process, and do not stop at the entry point.
process = target.LaunchSimple (None, None, self.get_process_working_directory())
process = target.GetProcess()
self.assertTrue(process.GetState() == lldb.eStateStopped,
PROCESS_STOPPED)
import lldbutil
stack_traces1 = lldbutil.print_stacktraces(process, string_buffer=True)
if self.TraceOn():
print "Full stack traces when first stopped on the breakpoint 'inner_inline':"
print stack_traces1
# The first breakpoint should correspond to an inlined call frame.
# If it's an inlined call frame, expect to find, in the stack trace,
# that there is a frame which corresponds to the following call site:
#
# outer_inline (argc);
#
frame0 = process.GetThreadAtIndex(0).GetFrameAtIndex(0)
if frame0.IsInlined():
filename = frame0.GetLineEntry().GetFileSpec().GetFilename()
self.assertTrue(filename == self.source)
self.expect(stack_traces1, "First stop at %s:%d" % (self.source, self.first_stop), exe=False,
substrs = ['%s:%d' % (self.source, self.first_stop)])
# Expect to break again for the second time.
process.Continue()
self.assertTrue(process.GetState() == lldb.eStateStopped,
PROCESS_STOPPED)
stack_traces2 = lldbutil.print_stacktraces(process, string_buffer=True)
if self.TraceOn():
print "Full stack traces when stopped on the breakpoint 'inner_inline' for the second time:"
print stack_traces2
self.expect(stack_traces2, "Second stop at %s:%d" % (self.source, self.second_stop), exe=False,
substrs = ['%s:%d' % (self.source, self.second_stop)])
if __name__ == '__main__':
import atexit
lldb.SBDebugger.Initialize()
atexit.register(lambda: lldb.SBDebugger.Terminate())
unittest2.main()
| [
"[email protected]"
] | |
b83962f8d926061997321482969175d3eafd8d1a | 4e5d3c743db5318c9a5e30823adedd1066b52350 | /payntera/version.py | abadaefae0c99b255acfb15361bd5d30f4c32405 | [] | no_license | saalfeldlab/payntera | 2c68d24da46f45d3261cb2f9451bdfcb6c7866e6 | d7111f318c1b1db8c8f9d4bb3e2fb8919cc9048e | refs/heads/master | 2020-03-23T00:55:42.380542 | 2019-10-02T20:07:12 | 2019-10-02T20:07:15 | 140,892,566 | 1 | 1 | null | 2018-11-13T13:54:05 | 2018-07-13T21:00:17 | Python | UTF-8 | Python | false | false | 23 | py | __version__ = '0.20.1'
| [
"[email protected]"
] | |
6fc9e4a1b64d40596d776205e98565fd67ee4d27 | acb8e84e3b9c987fcab341f799f41d5a5ec4d587 | /langs/6/nOU.py | 6c27926aad7723e383583892ecc75bdfa6a62613 | [] | no_license | G4te-Keep3r/HowdyHackers | 46bfad63eafe5ac515da363e1c75fa6f4b9bca32 | fb6d391aaecb60ab5c4650d4ae2ddd599fd85db2 | refs/heads/master | 2020-08-01T12:08:10.782018 | 2016-11-13T20:45:50 | 2016-11-13T20:45:50 | 73,624,224 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 486 | py | import sys
def printFunction(lineRemaining):
if lineRemaining[0] == '"' and lineRemaining[-1] == '"':
if len(lineRemaining) > 2:
#data to print
lineRemaining = lineRemaining[1:-1]
print ' '.join(lineRemaining)
else:
print
def main(fileName):
with open(fileName) as f:
for line in f:
data = line.split()
if data[0] == 'nOU':
printFunction(data[1:])
else:
print 'ERROR'
return
if __name__ == '__main__':
main(sys.argv[1]) | [
"[email protected]"
] | |
e489b80b813521f9e69b3f3c43c39d02cdba43cf | c526d2f3e457b1b25d5f2cb5bda914236e6c265b | /candidates/urls.py | 5c8fb469cc8b566cb76f30f605fdaab774b81d78 | [
"CC0-1.0"
] | permissive | yhsiang/twly-voter-guide | 99e2269da57a21b5779ec3defd9c7e23c7668f64 | ae87c9f9b9f053f79a12f04afe0d60f227dc68c1 | refs/heads/master | 2020-12-31T02:42:01.568168 | 2015-03-21T13:24:55 | 2015-03-21T13:24:55 | 33,117,399 | 0 | 0 | null | 2015-03-30T10:42:56 | 2015-03-30T10:42:56 | null | UTF-8 | Python | false | false | 536 | py | # -*- coding: utf-8 -*-
from django.conf.urls import patterns, url
from candidates import views
urlpatterns = patterns('',
url(r'^$', views.counties, {"ad": 8}),
url(r'^(?P<ad>\d+)/(?P<county>\S+)/(?P<constituency>\d+)/$', views.district, name='district'),
url(r'^(?P<ad>\d+)/(?P<county>\S+)/$', views.districts, name='districts'),
url(r'^(?P<ad>\d+)/$', views.counties, name='counties'),
url(r'^political_contributions/(?P<uid>\S+)/(?P<ad>\d+)/$', views.political_contributions, name='political_contributions'),
)
| [
"[email protected]"
] | |
58e9f9212ed8b1ac0ced1beb80a3a50936a3d03e | eb3683f9127befb9ef96d8eb801206cf7b84d6a7 | /stypy/sgmc/sgmc_cache/site_packages/numpy/fft/info.py | 98e7d88d37cdf14381335db9fead5f63b6dd4bbf | [] | no_license | ComputationalReflection/stypy | 61ec27333a12f76ac055d13f8969d3e0de172f88 | be66ae846c82ac40ba7b48f9880d6e3990681a5b | refs/heads/master | 2021-05-13T18:24:29.005894 | 2018-06-14T15:42:50 | 2018-06-14T15:42:50 | 116,855,812 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 16,813 | py |
# -*- coding: utf-8 -*-
"""
ORIGINAL PROGRAM SOURCE CODE:
1: '''
2: Discrete Fourier Transform (:mod:`numpy.fft`)
3: =============================================
4:
5: .. currentmodule:: numpy.fft
6:
7: Standard FFTs
8: -------------
9:
10: .. autosummary::
11: :toctree: generated/
12:
13: fft Discrete Fourier transform.
14: ifft Inverse discrete Fourier transform.
15: fft2 Discrete Fourier transform in two dimensions.
16: ifft2 Inverse discrete Fourier transform in two dimensions.
17: fftn Discrete Fourier transform in N-dimensions.
18: ifftn Inverse discrete Fourier transform in N dimensions.
19:
20: Real FFTs
21: ---------
22:
23: .. autosummary::
24: :toctree: generated/
25:
26: rfft Real discrete Fourier transform.
27: irfft Inverse real discrete Fourier transform.
28: rfft2 Real discrete Fourier transform in two dimensions.
29: irfft2 Inverse real discrete Fourier transform in two dimensions.
30: rfftn Real discrete Fourier transform in N dimensions.
31: irfftn Inverse real discrete Fourier transform in N dimensions.
32:
33: Hermitian FFTs
34: --------------
35:
36: .. autosummary::
37: :toctree: generated/
38:
39: hfft Hermitian discrete Fourier transform.
40: ihfft Inverse Hermitian discrete Fourier transform.
41:
42: Helper routines
43: ---------------
44:
45: .. autosummary::
46: :toctree: generated/
47:
48: fftfreq Discrete Fourier Transform sample frequencies.
49: rfftfreq DFT sample frequencies (for usage with rfft, irfft).
50: fftshift Shift zero-frequency component to center of spectrum.
51: ifftshift Inverse of fftshift.
52:
53:
54: Background information
55: ----------------------
56:
57: Fourier analysis is fundamentally a method for expressing a function as a
58: sum of periodic components, and for recovering the function from those
59: components. When both the function and its Fourier transform are
60: replaced with discretized counterparts, it is called the discrete Fourier
61: transform (DFT). The DFT has become a mainstay of numerical computing in
62: part because of a very fast algorithm for computing it, called the Fast
63: Fourier Transform (FFT), which was known to Gauss (1805) and was brought
64: to light in its current form by Cooley and Tukey [CT]_. Press et al. [NR]_
65: provide an accessible introduction to Fourier analysis and its
66: applications.
67:
68: Because the discrete Fourier transform separates its input into
69: components that contribute at discrete frequencies, it has a great number
70: of applications in digital signal processing, e.g., for filtering, and in
71: this context the discretized input to the transform is customarily
72: referred to as a *signal*, which exists in the *time domain*. The output
73: is called a *spectrum* or *transform* and exists in the *frequency
74: domain*.
75:
76: Implementation details
77: ----------------------
78:
79: There are many ways to define the DFT, varying in the sign of the
80: exponent, normalization, etc. In this implementation, the DFT is defined
81: as
82:
83: .. math::
84: A_k = \\sum_{m=0}^{n-1} a_m \\exp\\left\\{-2\\pi i{mk \\over n}\\right\\}
85: \\qquad k = 0,\\ldots,n-1.
86:
87: The DFT is in general defined for complex inputs and outputs, and a
88: single-frequency component at linear frequency :math:`f` is
89: represented by a complex exponential
90: :math:`a_m = \\exp\\{2\\pi i\\,f m\\Delta t\\}`, where :math:`\\Delta t`
91: is the sampling interval.
92:
93: The values in the result follow so-called "standard" order: If ``A =
94: fft(a, n)``, then ``A[0]`` contains the zero-frequency term (the sum of
95: the signal), which is always purely real for real inputs. Then ``A[1:n/2]``
96: contains the positive-frequency terms, and ``A[n/2+1:]`` contains the
97: negative-frequency terms, in order of decreasingly negative frequency.
98: For an even number of input points, ``A[n/2]`` represents both positive and
99: negative Nyquist frequency, and is also purely real for real input. For
100: an odd number of input points, ``A[(n-1)/2]`` contains the largest positive
101: frequency, while ``A[(n+1)/2]`` contains the largest negative frequency.
102: The routine ``np.fft.fftfreq(n)`` returns an array giving the frequencies
103: of corresponding elements in the output. The routine
104: ``np.fft.fftshift(A)`` shifts transforms and their frequencies to put the
105: zero-frequency components in the middle, and ``np.fft.ifftshift(A)`` undoes
106: that shift.
107:
108: When the input `a` is a time-domain signal and ``A = fft(a)``, ``np.abs(A)``
109: is its amplitude spectrum and ``np.abs(A)**2`` is its power spectrum.
110: The phase spectrum is obtained by ``np.angle(A)``.
111:
112: The inverse DFT is defined as
113:
114: .. math::
115: a_m = \\frac{1}{n}\\sum_{k=0}^{n-1}A_k\\exp\\left\\{2\\pi i{mk\\over n}\\right\\}
116: \\qquad m = 0,\\ldots,n-1.
117:
118: It differs from the forward transform by the sign of the exponential
119: argument and the default normalization by :math:`1/n`.
120:
121: Normalization
122: -------------
123: The default normalization has the direct transforms unscaled and the inverse
124: transforms are scaled by :math:`1/n`. It is possible to obtain unitary
125: transforms by setting the keyword argument ``norm`` to ``"ortho"`` (default is
126: `None`) so that both direct and inverse transforms will be scaled by
127: :math:`1/\\sqrt{n}`.
128:
129: Real and Hermitian transforms
130: -----------------------------
131:
132: When the input is purely real, its transform is Hermitian, i.e., the
133: component at frequency :math:`f_k` is the complex conjugate of the
134: component at frequency :math:`-f_k`, which means that for real
135: inputs there is no information in the negative frequency components that
136: is not already available from the positive frequency components.
137: The family of `rfft` functions is
138: designed to operate on real inputs, and exploits this symmetry by
139: computing only the positive frequency components, up to and including the
140: Nyquist frequency. Thus, ``n`` input points produce ``n/2+1`` complex
141: output points. The inverses of this family assumes the same symmetry of
142: its input, and for an output of ``n`` points uses ``n/2+1`` input points.
143:
144: Correspondingly, when the spectrum is purely real, the signal is
145: Hermitian. The `hfft` family of functions exploits this symmetry by
146: using ``n/2+1`` complex points in the input (time) domain for ``n`` real
147: points in the frequency domain.
148:
149: In higher dimensions, FFTs are used, e.g., for image analysis and
150: filtering. The computational efficiency of the FFT means that it can
151: also be a faster way to compute large convolutions, using the property
152: that a convolution in the time domain is equivalent to a point-by-point
153: multiplication in the frequency domain.
154:
155: Higher dimensions
156: -----------------
157:
158: In two dimensions, the DFT is defined as
159:
160: .. math::
161: A_{kl} = \\sum_{m=0}^{M-1} \\sum_{n=0}^{N-1}
162: a_{mn}\\exp\\left\\{-2\\pi i \\left({mk\\over M}+{nl\\over N}\\right)\\right\\}
163: \\qquad k = 0, \\ldots, M-1;\\quad l = 0, \\ldots, N-1,
164:
165: which extends in the obvious way to higher dimensions, and the inverses
166: in higher dimensions also extend in the same way.
167:
168: References
169: ----------
170:
171: .. [CT] Cooley, James W., and John W. Tukey, 1965, "An algorithm for the
172: machine calculation of complex Fourier series," *Math. Comput.*
173: 19: 297-301.
174:
175: .. [NR] Press, W., Teukolsky, S., Vetterline, W.T., and Flannery, B.P.,
176: 2007, *Numerical Recipes: The Art of Scientific Computing*, ch.
177: 12-13. Cambridge Univ. Press, Cambridge, UK.
178:
179: Examples
180: --------
181:
182: For examples, see the various functions.
183:
184: '''
185: from __future__ import division, absolute_import, print_function
186:
187: depends = ['core']
188:
"""
# Import the stypy library necessary elements
from stypy.type_inference_programs.type_inference_programs_imports import *
# Create the module type store
module_type_store = Context(None, __file__)
# ################# Begin of the type inference program ##################
str_101081 = get_builtin_python_type_instance(stypy.reporting.localization.Localization(__file__, 184, (-1)), 'str', '\nDiscrete Fourier Transform (:mod:`numpy.fft`)\n=============================================\n\n.. currentmodule:: numpy.fft\n\nStandard FFTs\n-------------\n\n.. autosummary::\n :toctree: generated/\n\n fft Discrete Fourier transform.\n ifft Inverse discrete Fourier transform.\n fft2 Discrete Fourier transform in two dimensions.\n ifft2 Inverse discrete Fourier transform in two dimensions.\n fftn Discrete Fourier transform in N-dimensions.\n ifftn Inverse discrete Fourier transform in N dimensions.\n\nReal FFTs\n---------\n\n.. autosummary::\n :toctree: generated/\n\n rfft Real discrete Fourier transform.\n irfft Inverse real discrete Fourier transform.\n rfft2 Real discrete Fourier transform in two dimensions.\n irfft2 Inverse real discrete Fourier transform in two dimensions.\n rfftn Real discrete Fourier transform in N dimensions.\n irfftn Inverse real discrete Fourier transform in N dimensions.\n\nHermitian FFTs\n--------------\n\n.. autosummary::\n :toctree: generated/\n\n hfft Hermitian discrete Fourier transform.\n ihfft Inverse Hermitian discrete Fourier transform.\n\nHelper routines\n---------------\n\n.. autosummary::\n :toctree: generated/\n\n fftfreq Discrete Fourier Transform sample frequencies.\n rfftfreq DFT sample frequencies (for usage with rfft, irfft).\n fftshift Shift zero-frequency component to center of spectrum.\n ifftshift Inverse of fftshift.\n\n\nBackground information\n----------------------\n\nFourier analysis is fundamentally a method for expressing a function as a\nsum of periodic components, and for recovering the function from those\ncomponents. When both the function and its Fourier transform are\nreplaced with discretized counterparts, it is called the discrete Fourier\ntransform (DFT). The DFT has become a mainstay of numerical computing in\npart because of a very fast algorithm for computing it, called the Fast\nFourier Transform (FFT), which was known to Gauss (1805) and was brought\nto light in its current form by Cooley and Tukey [CT]_. Press et al. [NR]_\nprovide an accessible introduction to Fourier analysis and its\napplications.\n\nBecause the discrete Fourier transform separates its input into\ncomponents that contribute at discrete frequencies, it has a great number\nof applications in digital signal processing, e.g., for filtering, and in\nthis context the discretized input to the transform is customarily\nreferred to as a *signal*, which exists in the *time domain*. The output\nis called a *spectrum* or *transform* and exists in the *frequency\ndomain*.\n\nImplementation details\n----------------------\n\nThere are many ways to define the DFT, varying in the sign of the\nexponent, normalization, etc. In this implementation, the DFT is defined\nas\n\n.. math::\n A_k = \\sum_{m=0}^{n-1} a_m \\exp\\left\\{-2\\pi i{mk \\over n}\\right\\}\n \\qquad k = 0,\\ldots,n-1.\n\nThe DFT is in general defined for complex inputs and outputs, and a\nsingle-frequency component at linear frequency :math:`f` is\nrepresented by a complex exponential\n:math:`a_m = \\exp\\{2\\pi i\\,f m\\Delta t\\}`, where :math:`\\Delta t`\nis the sampling interval.\n\nThe values in the result follow so-called "standard" order: If ``A =\nfft(a, n)``, then ``A[0]`` contains the zero-frequency term (the sum of\nthe signal), which is always purely real for real inputs. Then ``A[1:n/2]``\ncontains the positive-frequency terms, and ``A[n/2+1:]`` contains the\nnegative-frequency terms, in order of decreasingly negative frequency.\nFor an even number of input points, ``A[n/2]`` represents both positive and\nnegative Nyquist frequency, and is also purely real for real input. For\nan odd number of input points, ``A[(n-1)/2]`` contains the largest positive\nfrequency, while ``A[(n+1)/2]`` contains the largest negative frequency.\nThe routine ``np.fft.fftfreq(n)`` returns an array giving the frequencies\nof corresponding elements in the output. The routine\n``np.fft.fftshift(A)`` shifts transforms and their frequencies to put the\nzero-frequency components in the middle, and ``np.fft.ifftshift(A)`` undoes\nthat shift.\n\nWhen the input `a` is a time-domain signal and ``A = fft(a)``, ``np.abs(A)``\nis its amplitude spectrum and ``np.abs(A)**2`` is its power spectrum.\nThe phase spectrum is obtained by ``np.angle(A)``.\n\nThe inverse DFT is defined as\n\n.. math::\n a_m = \\frac{1}{n}\\sum_{k=0}^{n-1}A_k\\exp\\left\\{2\\pi i{mk\\over n}\\right\\}\n \\qquad m = 0,\\ldots,n-1.\n\nIt differs from the forward transform by the sign of the exponential\nargument and the default normalization by :math:`1/n`.\n\nNormalization\n-------------\nThe default normalization has the direct transforms unscaled and the inverse\ntransforms are scaled by :math:`1/n`. It is possible to obtain unitary\ntransforms by setting the keyword argument ``norm`` to ``"ortho"`` (default is\n`None`) so that both direct and inverse transforms will be scaled by\n:math:`1/\\sqrt{n}`.\n\nReal and Hermitian transforms\n-----------------------------\n\nWhen the input is purely real, its transform is Hermitian, i.e., the\ncomponent at frequency :math:`f_k` is the complex conjugate of the\ncomponent at frequency :math:`-f_k`, which means that for real\ninputs there is no information in the negative frequency components that\nis not already available from the positive frequency components.\nThe family of `rfft` functions is\ndesigned to operate on real inputs, and exploits this symmetry by\ncomputing only the positive frequency components, up to and including the\nNyquist frequency. Thus, ``n`` input points produce ``n/2+1`` complex\noutput points. The inverses of this family assumes the same symmetry of\nits input, and for an output of ``n`` points uses ``n/2+1`` input points.\n\nCorrespondingly, when the spectrum is purely real, the signal is\nHermitian. The `hfft` family of functions exploits this symmetry by\nusing ``n/2+1`` complex points in the input (time) domain for ``n`` real\npoints in the frequency domain.\n\nIn higher dimensions, FFTs are used, e.g., for image analysis and\nfiltering. The computational efficiency of the FFT means that it can\nalso be a faster way to compute large convolutions, using the property\nthat a convolution in the time domain is equivalent to a point-by-point\nmultiplication in the frequency domain.\n\nHigher dimensions\n-----------------\n\nIn two dimensions, the DFT is defined as\n\n.. math::\n A_{kl} = \\sum_{m=0}^{M-1} \\sum_{n=0}^{N-1}\n a_{mn}\\exp\\left\\{-2\\pi i \\left({mk\\over M}+{nl\\over N}\\right)\\right\\}\n \\qquad k = 0, \\ldots, M-1;\\quad l = 0, \\ldots, N-1,\n\nwhich extends in the obvious way to higher dimensions, and the inverses\nin higher dimensions also extend in the same way.\n\nReferences\n----------\n\n.. [CT] Cooley, James W., and John W. Tukey, 1965, "An algorithm for the\n machine calculation of complex Fourier series," *Math. Comput.*\n 19: 297-301.\n\n.. [NR] Press, W., Teukolsky, S., Vetterline, W.T., and Flannery, B.P.,\n 2007, *Numerical Recipes: The Art of Scientific Computing*, ch.\n 12-13. Cambridge Univ. Press, Cambridge, UK.\n\nExamples\n--------\n\nFor examples, see the various functions.\n\n')
# Assigning a List to a Name (line 187):
# Obtaining an instance of the builtin type 'list' (line 187)
list_101082 = get_builtin_python_type_instance(stypy.reporting.localization.Localization(__file__, 187, 10), 'list')
# Adding type elements to the builtin type 'list' instance (line 187)
# Adding element type (line 187)
str_101083 = get_builtin_python_type_instance(stypy.reporting.localization.Localization(__file__, 187, 11), 'str', 'core')
add_contained_elements_type(stypy.reporting.localization.Localization(__file__, 187, 10), list_101082, str_101083)
# Assigning a type to the variable 'depends' (line 187)
module_type_store.set_type_of(stypy.reporting.localization.Localization(__file__, 187, 0), 'depends', list_101082)
# ################# End of the type inference program ##################
module_errors = stypy.errors.type_error.StypyTypeError.get_error_msgs()
module_warnings = stypy.errors.type_warning.TypeWarning.get_warning_msgs()
| [
"[email protected]"
] | |
c76e0a8da89cff2174d9900d2b2d795ccf522914 | be50b4dd0b5b8c3813b8c3158332b1154fe8fe62 | /Math/Python/SortedPermutationRank.py | 28c0906d6624cf1570338c7b46d6235b336d4950 | [] | no_license | Zimmermann25/InterviewBit | a8d89e090068d9644e28085625963c8ce75d3dff | 6d2138e740bd5ba8eab992d9bf090977e077bfc5 | refs/heads/main | 2023-03-24T18:12:48.244950 | 2021-03-24T14:36:48 | 2021-03-24T14:36:48 | 350,835,917 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,274 | py | class Solution:
import math
# @param A : string
# @return an integer
def findRank(self, A):
if len(A) < 1:return 0
if len(A) ==1:return 1
counter = 0
# False oznacza,ze ta litera nie zostala jeszcze wykorzystana
charArr = [[A[i], False] for i in range(len(A))]
charArr.sort()
#print("charArr: ", charArr)
for i in range(len(A)):
curChar = A[i]
j = 0 # do pętli while i szukania mniejszych
smallCounter = 0
while j < len(charArr):
# tutaj uwzględnić etykietę TrueFalse
if charArr[j][0] >= curChar :
charArr[j][1] = True #oznacz tą literę jako użytą
break
if charArr[j][1]==False:
smallCounter +=1
j+=1
#print("fact: ", math.factorial(len(A)-j))
#print("smallCounter: ", smallCounter)
counter += (smallCounter * math.factorial(len(A)-i-1) )
#print("counter: ", counter, " j: ", j, "i: ", i, "f: ", math.factorial(len(A)-i-1))
return (counter+1) % 1000003 | [
"[email protected]"
] | |
45c93dfe5019d6bc09fc9cd7499e5990d2691491 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02821/s486815367.py | c6e104ff5932a2b1ed2561cc6e8d0125a064d8c4 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 983 | py | import sys
from bisect import bisect_left,bisect_right
sys.setrecursionlimit(10**9)
INF=10**18
def input():
return sys.stdin.readline().rstrip()
def main():
N,M=map(int,input().split())
A=sorted(list(map(int,input().split())))
S=[0]*(N+1)
for i in range(N):
S[i+1]=S[i]+A[i]
def nibutan(ok,ng):
while abs(ok-ng) > 1:
mid = (ok + ng) // 2
if solve(mid):
ok = mid
else:
ng = mid
return ok
def solve(mid):
c=0
for i in range(N):
c+=N-bisect_left(A,mid-A[i])
if c>=M:
return True
else:
return False
x=nibutan(0,10**11)
ans=0
count=0
for i in range(N):
b_l=bisect_left(A,x-A[i])
count+=(N-b_l)
ans+=S[N]-S[b_l]+A[i]*(N-b_l)
if count==M:
print(ans)
else:
print(ans+(M-count)*x)
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
0d07714134ac6449e78e4d248375b431f66f16e0 | 16047f965a69893a8cd2c8d18fbd7b9c86a07eb3 | /src/kubernetes/client/models/v1_quobyte_volume_source.py | 52fef80de7b4ac6de368f6c4785f8c2a3414d71f | [
"Apache-2.0",
"LicenseRef-scancode-proprietary-license",
"MIT"
] | permissive | guctum/aws-kube-codesuite | 9ce2cc02fe5fa15c2e175fb697138014fb162f1e | 5d62beaadc13bec745ac7d2fc18f07805e91cef3 | refs/heads/master | 2021-05-24T10:08:00.651840 | 2020-04-23T20:21:46 | 2020-04-23T20:21:46 | 253,511,083 | 0 | 0 | Apache-2.0 | 2020-04-06T13:48:14 | 2020-04-06T13:48:13 | null | UTF-8 | Python | false | false | 6,587 | py | # coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.7.4
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class V1QuobyteVolumeSource(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, group=None, read_only=None, registry=None, user=None, volume=None):
"""
V1QuobyteVolumeSource - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'group': 'str',
'read_only': 'bool',
'registry': 'str',
'user': 'str',
'volume': 'str'
}
self.attribute_map = {
'group': 'group',
'read_only': 'readOnly',
'registry': 'registry',
'user': 'user',
'volume': 'volume'
}
self._group = group
self._read_only = read_only
self._registry = registry
self._user = user
self._volume = volume
@property
def group(self):
"""
Gets the group of this V1QuobyteVolumeSource.
Group to map volume access to Default is no group
:return: The group of this V1QuobyteVolumeSource.
:rtype: str
"""
return self._group
@group.setter
def group(self, group):
"""
Sets the group of this V1QuobyteVolumeSource.
Group to map volume access to Default is no group
:param group: The group of this V1QuobyteVolumeSource.
:type: str
"""
self._group = group
@property
def read_only(self):
"""
Gets the read_only of this V1QuobyteVolumeSource.
ReadOnly here will force the Quobyte volume to be mounted with read-only permissions. Defaults to false.
:return: The read_only of this V1QuobyteVolumeSource.
:rtype: bool
"""
return self._read_only
@read_only.setter
def read_only(self, read_only):
"""
Sets the read_only of this V1QuobyteVolumeSource.
ReadOnly here will force the Quobyte volume to be mounted with read-only permissions. Defaults to false.
:param read_only: The read_only of this V1QuobyteVolumeSource.
:type: bool
"""
self._read_only = read_only
@property
def registry(self):
"""
Gets the registry of this V1QuobyteVolumeSource.
Registry represents a single or multiple Quobyte Registry services specified as a string as host:port pair (multiple entries are separated with commas) which acts as the central registry for volumes
:return: The registry of this V1QuobyteVolumeSource.
:rtype: str
"""
return self._registry
@registry.setter
def registry(self, registry):
"""
Sets the registry of this V1QuobyteVolumeSource.
Registry represents a single or multiple Quobyte Registry services specified as a string as host:port pair (multiple entries are separated with commas) which acts as the central registry for volumes
:param registry: The registry of this V1QuobyteVolumeSource.
:type: str
"""
if registry is None:
raise ValueError("Invalid value for `registry`, must not be `None`")
self._registry = registry
@property
def user(self):
"""
Gets the user of this V1QuobyteVolumeSource.
User to map volume access to Defaults to serivceaccount user
:return: The user of this V1QuobyteVolumeSource.
:rtype: str
"""
return self._user
@user.setter
def user(self, user):
"""
Sets the user of this V1QuobyteVolumeSource.
User to map volume access to Defaults to serivceaccount user
:param user: The user of this V1QuobyteVolumeSource.
:type: str
"""
self._user = user
@property
def volume(self):
"""
Gets the volume of this V1QuobyteVolumeSource.
Volume is a string that references an already created Quobyte volume by name.
:return: The volume of this V1QuobyteVolumeSource.
:rtype: str
"""
return self._volume
@volume.setter
def volume(self, volume):
"""
Sets the volume of this V1QuobyteVolumeSource.
Volume is a string that references an already created Quobyte volume by name.
:param volume: The volume of this V1QuobyteVolumeSource.
:type: str
"""
if volume is None:
raise ValueError("Invalid value for `volume`, must not be `None`")
self._volume = volume
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, V1QuobyteVolumeSource):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| [
"[email protected]"
] | |
a7f5239914d25e60fde6bf4ad74825ca1a302698 | 360ae1188ad79e71ccc72da0b9ae709bda678f91 | /ryu/lib/xflow/netflow.py | f41a9f57341ddbaf6c3a1e32928888653be34be0 | [
"Apache-2.0"
] | permissive | faucetsdn/ryu | 47b3523e7ccb381f3bdf2877a3f9f01cb1876054 | d6cda4f427ff8de82b94c58aa826824a106014c2 | refs/heads/master | 2023-09-05T06:37:21.991029 | 2022-06-09T23:09:40 | 2022-06-09T23:09:40 | 2,945,007 | 385 | 215 | Apache-2.0 | 2022-11-13T10:50:25 | 2011-12-09T03:43:50 | Python | UTF-8 | Python | false | false | 4,009 | py | # Copyright (C) 2013 Nippon Telegraph and Telephone Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import struct
NETFLOW_V1 = 0x01
NETFLOW_V5 = 0x05
NETFLOW_V6 = 0x06
NETFLOW_V7 = 0x07
NETFLOW_V8 = 0x08
NETFLOW_V9 = 0x09
class NetFlow(object):
_PACK_STR = '!H'
_NETFLOW_VERSIONS = {}
@staticmethod
def register_netflow_version(version):
def _register_netflow_version(cls):
NetFlow._NETFLOW_VERSIONS[version] = cls
return cls
return _register_netflow_version
def __init__(self):
super(NetFlow, self).__init__()
@classmethod
def parser(cls, buf):
(version,) = struct.unpack_from(cls._PACK_STR, buf)
cls_ = cls._NETFLOW_VERSIONS.get(version, None)
if cls_:
return cls_.parser(buf)
else:
return None
@NetFlow.register_netflow_version(NETFLOW_V5)
class NetFlowV5(object):
_PACK_STR = '!HHIIIIBBH'
_MIN_LEN = struct.calcsize(_PACK_STR)
def __init__(self, version, count, sys_uptime, unix_secs,
unix_nsecs, flow_sequence, engine_type, engine_id,
sampling_interval, flows=None):
self.version = version
self.count = count
self.sys_uptime = sys_uptime
self.unix_secs = unix_secs
self.unix_nsecs = unix_nsecs
self.flow_sequence = flow_sequence
self.engine_type = engine_type
self.engine_id = engine_id
self.sampling_interval = sampling_interval
@classmethod
def parser(cls, buf):
(version, count, sys_uptime, unix_secs, unix_nsecs,
flow_sequence, engine_type, engine_id, sampling_interval) = \
struct.unpack_from(cls._PACK_STR, buf)
msg = cls(version, count, sys_uptime, unix_secs, unix_nsecs,
flow_sequence, engine_type, engine_id,
sampling_interval)
offset = cls._MIN_LEN
msg.flows = []
while len(buf) > offset:
f = NetFlowV5Flow.parser(buf, offset)
offset += NetFlowV5Flow._MIN_LEN
msg.flows.append(f)
return msg
class NetFlowV5Flow(object):
_PACK_STR = '!IIIHHIIIIHHxBBBHHBB2x'
_MIN_LEN = struct.calcsize(_PACK_STR)
def __init__(self, srcaddr, dstaddr, nexthop, input_, output,
dpkts, doctets, first, last, srcport, dstport,
tcp_flags, prot, tos, src_as, dst_as, src_mask,
dst_mask):
self.srcaddr = srcaddr
self.dstaddr = dstaddr
self.nexthop = nexthop
self.input = input_
self.output = output
self.dpkts = dpkts
self.doctets = doctets
self.first = first
self.last = last
self.srcport = srcport
self.dstport = dstport
self.tcp_flags = tcp_flags
self.prot = prot
self.tos = tos
self.src_as = src_as
self.dst_as = dst_as
self.src_mask = src_mask
self.dst_mask = dst_mask
@classmethod
def parser(cls, buf, offset):
(srcaddr, dstaddr, nexthop, input_, output, dpkts, doctets,
first, last, srcport, dstport, tcp_flags, prot, tos, src_as,
dst_as, src_mask, dst_mask) = struct.unpack_from(
cls._PACK_STR, buf, offset)
msg = cls(srcaddr, dstaddr, nexthop, input_, output, dpkts,
doctets, first, last, srcport, dstport, tcp_flags,
prot, tos, src_as, dst_as, src_mask, dst_mask)
return msg
| [
"[email protected]"
] | |
acfaee88fd5930d05e23e2b881fb591112e14a8a | abaa004b41f63aa489be12a6e4be8f92ef2ef6d3 | /mcred/mcred/wsgi.py | 26961adf6a84faf2e27608df4342bc50a52c5ba2 | [] | no_license | vshaladhav97/django_practise_projects | 30dcc8dd909626c1d624d9c5895fc90ad55c79d0 | 83455c50e2ee910f03db47fbe1420d1cbd7eb292 | refs/heads/master | 2023-03-28T14:08:08.244694 | 2021-03-26T03:56:56 | 2021-03-26T03:56:56 | 351,655,547 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 387 | py | """
WSGI config for mcred project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'mcred.settings')
application = get_wsgi_application()
| [
"[email protected]"
] | |
22c15be1586d632b333fa96826a4638948b75d8e | 8a102033a266d39128e4b64aa0780cf67055e196 | /15552.py | 0bfab577f1d44f67f2be860eabace0e46000ab0d | [] | no_license | yuseungwoo/baekjoon | 4dec0798b8689b9378121b9d178713c9cf14a53f | 099031e2c4401e27edcdc05bd6c9e6a558b09bb9 | refs/heads/master | 2020-09-03T15:25:40.764723 | 2018-10-08T02:35:27 | 2018-10-08T02:35:27 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 269 | py | # coding-utf-8
import sys
count = sys.stdin.readline().rstrip()
count = int(count)
for _ in range(count):
numbers = sys.stdin.readline().rstrip().split()
number = sum(list(map(int, numbers)))
number = str(number) + '\n'
sys.stdout.write(number)
| [
"[email protected]"
] | |
92f1518267d637703c7a7e2205d182907358658a | a35d07b11f013a26901942f730d4b720f4e27355 | /warmup1/near_hundred.py | 5cb06a22dcabd67a5cd1a9ba4cf2f360c62fd633 | [] | no_license | PMiskew/codingbat_solutions_python | 7cbbf293fb6b230e274a8cee373a2222a5a27e8d | 6e62fd0080c2a9bcd59fd4f803cc7966a2cb88d1 | refs/heads/master | 2022-11-13T13:24:53.078833 | 2020-07-14T18:38:06 | 2020-07-14T18:38:06 | 255,197,455 | 5 | 0 | null | null | null | null | UTF-8 | Python | false | false | 839 | py | '''
QUESTION:
Given an int n, return True if it is within 10 of 100 or 200. Note: abs(num) computes the absolute value of a number.
near_hundred(93) → True
near_hundred(90) → True
near_hundred(89) → False
'''
def near_hundred(n):
#Approach 1:
'''
if (n < 100):
if 100 - n <= 10:
return True
return False
elif (n > 100 and n < 190):
if n - 100 <= 10:
return True
return False
elif (n <= 200 and n > 110):
if 200 - n <= 10:
return True
return False
elif (n > 200):
if n - 200 <= 10:
return True
return False
#'''
#Approach 2:
#Here we use the abs function so we don't have to check the value relative
#to 100 or 200 to decide if it is 100 - n or n - 100.
'''
if abs(n - 100) <= 10 or abs(n - 200) <= 10:
return True
return False
#''' | [
"[email protected]"
] | |
37cb5f11bdcd8f63dd000d2f706336c3c37ee0ec | 64bf39b96a014b5d3f69b3311430185c64a7ff0e | /intro-ansible/venv3/lib/python3.8/site-packages/ansible_collections/sensu/sensu_go/tests/unit/modules/test_role_binding_info.py | 6dc780d18f9e93a04704512448a963a692889b1f | [
"GPL-3.0-only",
"MIT"
] | permissive | SimonFangCisco/dne-dna-code | 7072eba7da0389e37507b7a2aa5f7d0c0735a220 | 2ea7d4f00212f502bc684ac257371ada73da1ca9 | refs/heads/master | 2023-03-10T23:10:31.392558 | 2021-02-25T15:04:36 | 2021-02-25T15:04:36 | 342,274,373 | 0 | 0 | MIT | 2021-02-25T14:39:22 | 2021-02-25T14:39:22 | null | UTF-8 | Python | false | false | 1,977 | py | from __future__ import absolute_import, division, print_function
__metaclass__ = type
import pytest
from ansible_collections.sensu.sensu_go.plugins.module_utils import (
errors, utils,
)
from ansible_collections.sensu.sensu_go.plugins.modules import role_binding_info
from .common.utils import (
AnsibleExitJson, AnsibleFailJson, ModuleTestCase, set_module_args,
)
class TestRoleBindingInfo(ModuleTestCase):
def test_get_all_role_bindings(self, mocker):
get_mock = mocker.patch.object(utils, "get")
get_mock.return_value = [1, 2, 3]
set_module_args(namespace="my")
with pytest.raises(AnsibleExitJson) as context:
role_binding_info.main()
_client, path = get_mock.call_args[0]
assert path == "/api/core/v2/namespaces/my/rolebindings"
assert context.value.args[0]["objects"] == [1, 2, 3]
def test_get_single_role_binding(self, mocker):
get_mock = mocker.patch.object(utils, "get")
get_mock.return_value = 1
set_module_args(name="test-role-binding")
with pytest.raises(AnsibleExitJson) as context:
role_binding_info.main()
_client, path = get_mock.call_args[0]
assert path == "/api/core/v2/namespaces/default/rolebindings/test-role-binding"
assert context.value.args[0]["objects"] == [1]
def test_missing_single_role_binding(self, mocker):
get_mock = mocker.patch.object(utils, "get")
get_mock.return_value = None
set_module_args(name="sample-role-binding")
with pytest.raises(AnsibleExitJson) as context:
role_binding_info.main()
assert context.value.args[0]["objects"] == []
def test_failure(self, mocker):
get_mock = mocker.patch.object(utils, "get")
get_mock.side_effect = errors.Error("Bad error")
set_module_args(name="sample-role-binding")
with pytest.raises(AnsibleFailJson):
role_binding_info.main()
| [
"[email protected]"
] | |
8c0b7a66053ff8a78c350d3e918291d75673b78a | b11d97bf5731bf6faeef14814292d1aff6866e3a | /seq2annotation/server/tensorflow_inference.py | 9d6ce6042c4c74192440f99d7b707623aee40e82 | [
"Apache-2.0"
] | permissive | tyc1922/seq2annotation | 2e2193aff1281242c2b66da8cbe27571e2c7f3fc | c161099570be544881c14105f4392d764d6d8247 | refs/heads/master | 2022-04-21T19:14:03.117606 | 2020-04-25T09:24:02 | 2020-04-25T09:24:02 | 259,069,353 | 1 | 0 | Apache-2.0 | 2020-04-26T15:46:48 | 2020-04-26T15:46:48 | null | UTF-8 | Python | false | false | 2,370 | py | from typing import List
import keras
from tokenizer_tools.tagset.NER.BILUO import BILUOSequenceEncoderDecoder
from tokenizer_tools.tagset.offset.sequence import Sequence
from tensorflow.contrib import predictor
from tokenizer_tools.tagset.exceptions import TagSetDecodeError
decoder = BILUOSequenceEncoderDecoder()
class Inference(object):
def __init__(self, model_path):
# load model
self.model_dir = model_path
self.predict_fn = predictor.from_saved_model(model_path)
def infer(self, input_text: str):
infer_result = self._infer(input_text)
return infer_result[0]
def batch_infer(self, input_text: List[str]):
return self._infer(input_text)
def _infer(self, input_text):
if isinstance(input_text, str):
input_list = [input_text]
else:
input_list = input_text
raw_sequences = [[i for i in text] for text in input_list]
sentence = keras.preprocessing.sequence.pad_sequences(
raw_sequences, dtype='object',
padding='post', truncating='post', value=['<pad>']
).tolist()
# TODO: batch infer will cause padding, which will maybe cause decoder to offset bug.
# TODO: feature translate should out of this main program for better compatible with keras and estimator model
input_feature = {
'words': [[i for i in text] for text in sentence],
'words_len': [len(text) for text in raw_sequences],
}
# print(input_feature)
predictions = self.predict_fn(input_feature)
tags_list = predictions['tags']
infer_result = []
for raw_input_text, raw_text, normalized_text, tags in zip(input_list, raw_sequences, sentence, tags_list):
# decode Unicode
tags_seq = [i.decode() for i in tags]
# print(tags_seq)
# BILUO to offset
failed = False
try:
seq = decoder.to_offset(tags_seq, raw_text)
except TagSetDecodeError as e:
print(e)
# invalid tag sequence will raise exception
# so return a empty result
seq = Sequence(input_text)
failed = True
infer_result.append((raw_input_text, seq, tags_seq, failed))
return infer_result
| [
"[email protected]"
] | |
90060297c37f8438877900ed28743d74da252c12 | 70e77b4e49fa1be07a89aa9370aa8069f4dd17cc | /imb_manager/asgi.py | d88f76b423017178dd9fba08e2652cdcf0103e46 | [] | no_license | rosoba/imb_manager | 7a542da0fb032839dcabd3a7d9073f69616cfaeb | 13f277cb5170ef17deebb2e4305c99f73421e2a2 | refs/heads/master | 2023-01-31T04:16:56.526477 | 2020-12-12T14:52:03 | 2020-12-12T14:52:03 | 320,850,276 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 399 | py | """
ASGI config for imb_manager project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'imb_manager.settings')
application = get_asgi_application()
| [
"[email protected]"
] | |
b1ad6df8c1fd9c67554b4f8f8f19ab2cc90e7283 | 5dd190725aaaeb7287d935b3c99c20480b208816 | /official/vision/keras_cv/metrics/iou.py | b6391a61c7d3cbf38407a26b17b068ba77b3fb66 | [
"Apache-2.0",
"MIT"
] | permissive | DemonDamon/mask-detection-based-on-tf2odapi | 32d947164fb54395b9e45368c0d4bcf3a6ea1c28 | 192ae544169c1230c21141c033800aa1bd94e9b6 | refs/heads/main | 2023-05-13T05:05:44.534885 | 2021-06-08T05:56:09 | 2021-06-08T05:56:09 | 369,463,131 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 4,723 | py | # Copyright 2021 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""IOU Metrics used for semantic segmentation models."""
import numpy as np
import tensorflow as tf
class PerClassIoU(tf.keras.metrics.Metric):
"""Computes the per-class Intersection-Over-Union metric.
Mean Intersection-Over-Union is a common evaluation metric for semantic image
segmentation, which first computes the IOU for each semantic class.
IOU is defined as follows:
IOU = true_positive / (true_positive + false_positive + false_negative).
The predictions are accumulated in a confusion matrix, weighted by
`sample_weight` and the metric is then calculated from it.
If `sample_weight` is `None`, weights default to 1.
Use `sample_weight` of 0 to mask values.
Example:
>>> # cm = [[1, 1],
>>> # [1, 1]]
>>> # sum_row = [2, 2], sum_col = [2, 2], true_positives = [1, 1]
>>> # iou = true_positives / (sum_row + sum_col - true_positives))
>>> # result = [(1 / (2 + 2 - 1), 1 / (2 + 2 - 1)] = 0.33
>>> m = tf.keras.metrics.MeanIoU(num_classes=2)
>>> m.update_state([0, 0, 1, 1], [0, 1, 0, 1])
>>> m.result().numpy()
[0.33333334, 0.33333334]
"""
def __init__(self, num_classes, name=None, dtype=None):
"""Initializes `PerClassIoU`.
Args:
num_classes: The possible number of labels the prediction task can have.
This value must be provided, since a confusion matrix of dimension =
[num_classes, num_classes] will be allocated.
name: (Optional) string name of the metric instance.
dtype: (Optional) data type of the metric result.
"""
super(PerClassIoU, self).__init__(name=name, dtype=dtype)
self.num_classes = num_classes
# Variable to accumulate the predictions in the confusion matrix.
self.total_cm = self.add_weight(
'total_confusion_matrix',
shape=(num_classes, num_classes),
initializer=tf.compat.v1.zeros_initializer)
def update_state(self, y_true, y_pred, sample_weight=None):
"""Accumulates the confusion matrix statistics.
Args:
y_true: The ground truth values.
y_pred: The predicted values.
sample_weight: Optional weighting of each example. Defaults to 1. Can be a
`Tensor` whose rank is either 0, or the same rank as `y_true`, and must
be broadcastable to `y_true`.
Returns:
IOU per class.
"""
y_true = tf.cast(y_true, self._dtype)
y_pred = tf.cast(y_pred, self._dtype)
# Flatten the input if its rank > 1.
if y_pred.shape.ndims > 1:
y_pred = tf.reshape(y_pred, [-1])
if y_true.shape.ndims > 1:
y_true = tf.reshape(y_true, [-1])
if sample_weight is not None:
sample_weight = tf.cast(sample_weight, self._dtype)
if sample_weight.shape.ndims > 1:
sample_weight = tf.reshape(sample_weight, [-1])
# Accumulate the prediction to current confusion matrix.
current_cm = tf.math.confusion_matrix(
y_true,
y_pred,
self.num_classes,
weights=sample_weight,
dtype=self._dtype)
return self.total_cm.assign_add(current_cm)
def result(self):
"""Compute the mean intersection-over-union via the confusion matrix."""
sum_over_row = tf.cast(
tf.reduce_sum(self.total_cm, axis=0), dtype=self._dtype)
sum_over_col = tf.cast(
tf.reduce_sum(self.total_cm, axis=1), dtype=self._dtype)
true_positives = tf.cast(
tf.linalg.tensor_diag_part(self.total_cm), dtype=self._dtype)
# sum_over_row + sum_over_col =
# 2 * true_positives + false_positives + false_negatives.
denominator = sum_over_row + sum_over_col - true_positives
return tf.math.divide_no_nan(true_positives, denominator)
def reset_states(self):
tf.keras.backend.set_value(
self.total_cm, np.zeros((self.num_classes, self.num_classes)))
def get_config(self):
config = {'num_classes': self.num_classes}
base_config = super(PerClassIoU, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
| [
"[email protected]"
] | |
3262a9aae0eca54f26f482a58a5c1b4c27d466ef | f39439548beba34b26f2e0cb40d9bcdfc5c85c71 | /runtag/bootcamp.py | ee077f8e2a9e0ac5ddee6ac10f77e65dfb76a5a6 | [] | no_license | willook/ape-x2 | 0a7b813c59efc572b3a5b0c3b63d738bbec2a8e1 | b299e75d20746f4d83ee7227fad9d8d3ef21a192 | refs/heads/master | 2023-02-21T15:41:27.241782 | 2021-01-21T06:37:01 | 2021-01-21T06:37:01 | 331,537,600 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 404 | py | from .entities import Commander, Subordinate, Squad
class Bootcamp:
def __init__(self, grid):
self.grid = grid
def recruit(self, name, number_of_subordinates=1):
squad = Squad(self.grid, name)
squad.assign(Commander(self.grid))
for index in range(number_of_subordinates):
squad.assign(Subordinate(self.grid, identifier=index))
return squad | [
"[email protected]"
] | |
045edd1c218f527ab6ff454da5507798a547fdd8 | 32cb84dd41e4be24c065bb205f226f9b121a6db2 | /cconf/migrations/0001_initial.py | f89cbce98d36e32d7722b4cebcb2e3294a10711b | [] | no_license | InformatykaNaStart/staszic-sio2 | b38fda84bd8908472edb2097774838ceed08fcfa | 60a127e687ef8216d2ba53f9f03cfaa201c59e26 | refs/heads/master | 2022-06-29T11:09:28.765166 | 2022-06-13T21:56:19 | 2022-06-13T21:56:19 | 115,637,960 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,221 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.9.13 on 2019-10-05 08:58
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('contests', '0005_submission_auto_rejudges'),
]
operations = [
migrations.CreateModel(
name='CConfiguration',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('compiler', models.CharField(choices=[(b'GCC 4.6 (32 bits)', b'compiler-gcc.4_6_3'), (b'GCC 4.8 (32 bits)', b'compiler-gcc.4_8_2'), (b'GCC 8.3 (32 bits)', b'compiler-gcc.8_3_0-i386'), (b'GCC 8.3 (64 bits)', b'compiler-gcc.8_3_0-amd64')], default=b'compiler-gcc.4_8_2', max_length=128)),
('cflags', models.CharField(default=b'-std=gnuc99 -static -O2 -s -lm', max_length=256)),
('cxxflags', models.CharField(default=b'-std=c++11 -static -O2 -s -lm', max_length=256)),
('contest', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to='contests.Contest')),
],
),
]
| [
"[email protected]"
] | |
de0b0b059a80c07749a16ea129918524290a5f28 | 487ce91881032c1de16e35ed8bc187d6034205f7 | /codes/CodeJamCrawler/16_0_3/David.Liu/q3.py | a26e4881ffe0696d7c67a6a0d0631ed73764fc4d | [] | no_license | DaHuO/Supergraph | 9cd26d8c5a081803015d93cf5f2674009e92ef7e | c88059dc66297af577ad2b8afa4e0ac0ad622915 | refs/heads/master | 2021-06-14T16:07:52.405091 | 2016-08-21T13:39:13 | 2016-08-21T13:39:13 | 49,829,508 | 2 | 0 | null | 2021-03-19T21:55:46 | 2016-01-17T18:23:00 | Python | UTF-8 | Python | false | false | 866 | py | import math
n=32
x=500
outf=open("q3large.out","w")
def factor(num):
r=500
for i in range(2, r):
if num%i==0:
return i
return -1
def makelist(n):
lst=[]
l=len(n)
for i in range(2, 11):
num=0
for j in range(0, l):
num+=int(n[j])*(i**(l-1-j))
fac=factor(num)
if fac==-1:
break;
lst.append(fac)
return lst
def f(n, k):
if n==0:
l=makelist(k+"1")
if len(l)==9:
outf.write(k+"1")
for p in l:
outf.write(" "+str(p))
outf.write("\n")
global x
x=x-1
print(x)
if x==0:
outf.close()
exit()
else:
f(n-1, k+"0")
f(n-1, k+"1")
f(n-2, "1") | [
"[[email protected]]"
] | |
4a1758e7ca32cd345aa0c5b376f92a5dc0a0b52f | 7996d7fefe2d3e5b4d53df4376d6fd8908407a1a | /authentication/urls.py | 139def747c67ed7664c5e93050e1419ada49d7e8 | [] | no_license | Imraj423/twitterclone | 2aa1446ef6e5dec6548f26c6254d478a696970ec | 0c3dfab5436de9095248305d3994dc77549e0b1e | refs/heads/master | 2021-01-15T02:07:06.684002 | 2020-04-04T23:47:39 | 2020-04-04T23:47:39 | 242,843,822 | 0 | 0 | null | 2020-03-07T04:02:21 | 2020-02-24T21:08:23 | Python | UTF-8 | Python | false | false | 229 | py | from django.urls import path
from . import views
urlpatterns = [
path('signup/', views.signup, name='signup'),
path('login/', views.login_view, name='login'),
path('logout/', views.logoutUser, name='logout'),
]
| [
"[email protected]"
] | |
5ed6ab127cba5918dd12490bf579baafac9dc250 | 1fa262359f91768f1b98c45944fd4a63645f4567 | /variable_examples.py | 03bd0c75fcb7f7afb46ffb09f440a337d5d26ae4 | [] | no_license | feleHaile/20190225KAPL | d1a95dd6632ba83b6cd3380d92e2a2a18a5a4942 | 3957c1d738cc3e42d5dac0fb4a6f6071a1bb391a | refs/heads/master | 2020-05-15T22:31:51.881632 | 2019-02-28T20:11:29 | 2019-02-28T20:11:29 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 278 | py | #!/usr/bin/env python
x = 5
print(x)
y = x
things = [1, 2, 3]
t = things
print(t)
t.append(42)
print(things)
print(t is things)
print(id(t), id(things))
print(type(x), type(t), type(type), type('spam'))
t = 42
print(type(t))
t = "amazon"
print(type(t))
m = None
print(m)
| [
"[email protected]"
] | |
a8cfde36a731a0cfeb460159e2cc73d43db7c46e | 101d866f8e2f84dc8f76181341180c13b38e0ecf | /case/Demo/test_global_init.py | 96c3320be20e782b490bbf14bbb0cf12cef8b2c5 | [] | no_license | cming091/autotest | 1d9a6f5f750c04b043a6bc45efa423f2e730b3aa | 0f6fe31a27de9bcf0697c28574b97555fe36d1e1 | refs/heads/master | 2023-06-02T18:22:24.971786 | 2021-06-21T08:52:47 | 2021-06-21T08:52:47 | 378,858,969 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 659 | py | # coding=utf-8
import pytest
import allure
from case.base import TestBase
@allure.feature('测试初始化全局数据')
@allure.link(url="https://pages/viewpage.action?pageId=84191585", name="测试用例")
@pytest.mark.usefixtures("init_module_data")
@pytest.mark.usefixtures("init_global_data")
class TestGlobalDataInit(TestBase):
"""test global data init"""
@allure.story("1.第一步")
def test_step_one(self, request):
print('test step one...done')
def test_step_two(self, request):
print('test step two...done')
assert 1 == 2
def test_step_three(self, request):
print('test step three... done')
| [
"[email protected]"
] | |
54439c245d7fae5f35ec6680b74a2d298e21bec7 | 38422c3edeb269926502fed31a0761aff8dd3d3b | /Zeiss_spectrometer/Zeiss_spectrometer_Python3_v191002/Calib_Zeiss_spectrometer_GUI_v5.py | a35fdd8d0e04f888df7c292ff847d4857b865b8a | [] | no_license | vfurtula/Alle-projekter | 2dab3ccbf7ddb6be3ee09f9f5e87085f354dd84a | da3d7c9611088043e2aea5d844f1ae6056215e04 | refs/heads/master | 2022-06-07T05:17:35.327228 | 2020-04-30T10:28:48 | 2020-04-30T10:28:48 | 260,180,957 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 47,654 | py | import os, sys, imp, serial, time, numpy
import scipy.interpolate
#from numpy.polynomial import polynomial as P
import pyqtgraph as pg
from pyqtgraph.Qt import QtCore, QtGui
import pyqtgraph.exporters
#from PyQt4 import QtGui, QtCore
from PyQt4.QtCore import QThread, QTimer, SIGNAL
import config_zeiss, ZaberXmcb_ascii
class zaber_Thread(QThread):
def __init__(self, sender, Za, adjust_mode, calib_file, *argv):
QThread.__init__(self)
self.end_flag=False
self.sender=sender
self.Za=Za
self.calib_mode=adjust_mode
self.calib_file=calib_file
if self.calib_mode=="wavelength":
self.axs=1
elif self.calib_mode=="slit":
self.axs=2
if argv:
self.move_num=argv[0]
def __del__(self):
self.wait()
def abort_move(self):
self.Za.set_Stop(1,self.axs)
self.end_flag=True
def return_pos_if_stopped(self):
return self.Za.return_Position_When_Stopped(1,self.axs)
def update(self):
min_pos=self.return_pos_if_stopped()
if min_pos in [serial.SerialException, ValueError]:
self.emit(SIGNAL('bad_zaber_val(PyQt_PyObject)'),min_pos)
return
pos_val=self.get_zeiss_value(min_pos)
if self.calib_mode=="wavelength":
self.replace_line("config_zeiss.py",4,''.join(["Last_position_lambda=[",str(min_pos),",",str(pos_val),"]\n"]))
elif self.calib_mode=="slit":
self.replace_line("config_zeiss.py",5,''.join(["Last_position_slit=[",str(min_pos),",",str(pos_val),"]\n"]))
imp.reload(config_zeiss)
more_tals_obj=type('more_tals_obj',(object,),{'min_pos':min_pos, 'pos_val':pos_val})
self.emit(SIGNAL('more_tals(PyQt_PyObject)'), more_tals_obj)
def replace_line(self,file_name, line_num, text):
lines = open(file_name, 'r').readlines()
lines[line_num] = text
out = open(file_name, 'w')
out.writelines(lines)
out.close()
def run(self):
if self.sender==u'\u25b2':
check=self.Za.move_Relative(1,self.axs,10)
if check in [serial.SerialException, ValueError]:
self.emit(SIGNAL('bad_zaber_val(PyQt_PyObject)'),check)
return
self.update()
elif self.sender==u'\u25bc':
check=self.Za.move_Relative(1,self.axs,-10)
if check in [serial.SerialException, ValueError]:
self.emit(SIGNAL('bad_zaber_val(PyQt_PyObject)'),check)
return
self.update()
elif self.sender=='Move rel':
check=self.Za.move_Relative(1,self.axs,self.move_num)
if check in [serial.SerialException, ValueError]:
self.emit(SIGNAL('bad_zaber_val(PyQt_PyObject)'),check)
return
self.update()
elif self.sender=='Move abs':
check=self.Za.move_Absolute(1,self.axs,self.move_num)
if check in [serial.SerialException, ValueError]:
self.emit(SIGNAL('bad_zaber_val(PyQt_PyObject)'),check)
return
self.update()
elif 'Move to' in self.sender or 'Adjust' in self.sender:
position=self.get_pos(self.move_num)
check=self.Za.move_Absolute(1,self.axs,position)
if check in [serial.SerialException, ValueError]:
self.emit(SIGNAL('bad_zaber_val(PyQt_PyObject)'),check)
return
self.update()
elif self.sender=='-> nm':
min_pos=self.get_pos(self.move_num)
check=self.Za.set_Current_Position(1,self.axs,min_pos)
if check in [serial.SerialException, ValueError]:
self.emit(SIGNAL('bad_zaber_val(PyQt_PyObject)'),check)
return
more_tals_obj=type('more_tals_obj',(object,),{'min_pos':min_pos, 'pos_val':self.move_num})
self.emit(SIGNAL('more_tals(PyQt_PyObject)'), more_tals_obj)
self.replace_line("config_zeiss.py",4,''.join(["Last_position_lambda=[",str(min_pos),",",str(self.move_num),"]\n"]))
imp.reload(config_zeiss)
elif self.sender=='-> mm':
min_pos=self.get_pos(self.move_num)
check=self.Za.set_Current_Position(1,self.axs,min_pos)
if min_pos in [serial.SerialException, ValueError]:
self.emit(SIGNAL('bad_zaber_val(PyQt_PyObject)'),min_pos)
return
more_tals_obj=type('more_tals_obj',(object,),{'min_pos':min_pos, 'pos_val':self.move_num})
self.emit(SIGNAL('more_tals(PyQt_PyObject)'), more_tals_obj)
self.replace_line("config_zeiss.py",5,''.join(["Last_position_slit=[",str(min_pos),",",str(self.move_num),"]\n"]))
imp.reload(config_zeiss)
else:
pass
def get_pos(self,nm):
x=[]
y=[]
with open(self.calib_file, 'r') as thefile:
for line in thefile:
columns = line.split()
x.extend([int(columns[0])]) #microstep pos.
if self.calib_mode=="wavelength":
y.extend([round(float(columns[1]),1)]) #wavelength
elif self.calib_mode=="slit":
y.extend([round(float(columns[1]),2)]) #slit
if numpy.min(nm)>=min(y) and numpy.max(nm)<=max(y):
#spline
pos_curve=scipy.interpolate.splrep(y, x, k=3, s=0)
#linear
#wv_curve = scipy.interpolate.splrep(x, y, k=1, s=0)
pos_pos = scipy.interpolate.splev(nm, pos_curve, der=0)
nums=numpy.rint(pos_pos) # round the up/down floats
return nums.astype(int)
def get_zeiss_value(self,pos):
x=[]
y=[]
with open(self.calib_file, 'r') as thefile:
for line in thefile:
columns = line.split()
x.extend([int(columns[0])]) #microstep pos.
if self.calib_mode=="wavelength":
y.extend([round(float(columns[1]),1)]) #wavelength
elif self.calib_mode=="slit":
y.extend([round(float(columns[1]),2)]) #slit
if numpy.min(pos)>=min(x) and numpy.max(pos)<=max(x):
#spline
wv_curve=scipy.interpolate.splrep(x, y, k=3, s=0)
#linear
#wv_curve = scipy.interpolate.splrep(x, y, k=1, s=0)
pos = scipy.interpolate.splev(pos, wv_curve, der=0)
if self.calib_mode=="wavelength":
return numpy.round(pos,1)
elif self.calib_mode=="slit":
return numpy.round(pos,2)
class Run_gui(QtGui.QDialog):
def __init__(self, MyBar, parent=None):
QtGui.QDialog.__init__(self, parent)
#super(Run_gui, self).__init__()
#####################################################
# constants
self.Validrange_lambda = config_zeiss.Validrange_lambda
self.Validrange_slit = config_zeiss.Validrange_slit
self.Range_lambda = config_zeiss.Range_lambda
self.Range_slit = config_zeiss.Range_slit
self.calibfile_lambda_str = config_zeiss.calibfile_lambda
self.calibfile_slit_str = config_zeiss.calibfile_slit
self.lambda_str=config_zeiss.lambdafile
self.slit_str=config_zeiss.slitfile
self.timestr = config_zeiss.timestr
self.filename_str = config_zeiss.filename
self.folder_str = config_zeiss.foldername
self.zaberport_str = config_zeiss.zaberport
self.sr510port_str = config_zeiss.sr510port
self.all_pos=[config_zeiss.Last_position_lambda[0]]
self.MyBar=MyBar
self.initUI()
def initUI(self):
self.infoCalibButton = QtGui.QPushButton('Calib files info',self)
################### MENU BARS START ##################
#MyBar = QtGui.QMenuBar(self)
fileMenu = self.MyBar.addMenu("File")
fileSavePlt = fileMenu.addAction("Save calib plot")
fileSavePlt.triggered.connect(self.set_save_plots)
fileSavePlt.setShortcut('Ctrl+P')
fileSaveSet = fileMenu.addAction("Save settings")
fileSaveSet.triggered.connect(self.set_save) # triggers closeEvent()
fileSaveSet.setShortcut('Ctrl+S')
fileClose = fileMenu.addAction("Close")
fileClose.triggered.connect(self.close) # triggers closeEvent()
fileClose.setShortcut('Ctrl+X')
modeMenu = self.MyBar.addMenu("Mode")
self.conMode = modeMenu.addAction("Connect to serial")
self.conMode.triggered.connect(self.set_connect)
self.disconMode = modeMenu.addAction("Disconnect from serial")
self.disconMode.triggered.connect(self.set_disconnect)
serialMenu = self.MyBar.addMenu("Serial")
self.serialZaber = serialMenu.addAction("Zaber stepper")
self.serialZaber.triggered.connect(self.ZaberDialog)
calibMenu = self.MyBar.addMenu("Calib")
self.calibWaveZeiss = calibMenu.addAction("Load ref lambda calib file")
self.waveZeiss = calibMenu.addAction("Load wavelength file")
self.waveZeiss.setShortcut('Ctrl+W')
self.calibWaveZeiss.triggered.connect(self.loadCalibLambdaDialog)
self.waveZeiss.triggered.connect(self.loadWaveDialog)
self.calibSlitZeiss = calibMenu.addAction("Load ref slit calib file")
self.slitZeiss = calibMenu.addAction("Load slit width file")
self.slitZeiss.setShortcut('Ctrl+Q')
self.calibSlitZeiss.triggered.connect(self.loadCalibSlitDialog)
self.slitZeiss.triggered.connect(self.loadSlitDialog)
################### MENU BARS END ##################
lb3 = QtGui.QLabel("CALIBRATE:",self)
lb3.setStyleSheet("color: blue")
self.cb3 = QtGui.QComboBox(self)
mylist3=["wavelength","slit"]
self.cb3.addItems(mylist3)
self.cb3.setCurrentIndex(mylist3.index("wavelength"))
#self.cb3.setEnabled(True)
##############################################
filename = QtGui.QLabel("File name",self)
foldername = QtGui.QLabel("Folder name",self)
self.filenameEdit = QtGui.QLineEdit(self.filename_str,self)
self.folderEdit = QtGui.QLineEdit(self.folder_str,self)
##############################################
# status info which button has been pressed
self.motorstep_lbl = QtGui.QLabel("ZABER stepper postion:", self)
self.motorstep_lbl.setStyleSheet("color: blue")
self.upButton = QtGui.QPushButton(u'\u25b2',self)
self.set_bstyle_v1(self.upButton)
self.downButton = QtGui.QPushButton(u'\u25bc',self)
self.set_bstyle_v1(self.downButton)
self.moverelButton = QtGui.QPushButton('Move rel',self)
self.moveabsButton = QtGui.QPushButton('Move abs',self)
#self.moveabsButton.setStyleSheet('QPushButton {color: red}')
self.moverelEdit = QtGui.QLineEdit(str(100),self)
self.moveabsEdit = QtGui.QLineEdit(str(10000),self)
self.moveButton = QtGui.QPushButton('Move to nm',self)
self.moveButton.setStyleSheet('QPushButton {color: magenta}')
self.moveEdit = QtGui.QLineEdit("",self)
self.stopButton = QtGui.QPushButton('STOP MOVE',self)
##############################################
# status info which button has been pressed
self.zeiss_lbl = QtGui.QLabel("ZEISS alignment:", self)
self.zeiss_lbl.setStyleSheet("color: blue")
self.alignEdit = QtGui.QLineEdit("",self)
self.setzeroButton = QtGui.QPushButton('-> nm',self)
self.setzeroButton.setStyleSheet('QPushButton {color: magenta}')
self.setzeroButton.setFixedWidth(70)
#self.setzeroButton.setStyleSheet('QPushButton {color: black}')
##############################################
# status info which button has been pressed
self.zeiss_cal_lbl = QtGui.QLabel("ZEISS calibration:", self)
self.zeiss_cal_lbl.setStyleSheet("color: blue")
self.calibButton = QtGui.QPushButton('',self)
self.calibButton.setStyleSheet('QPushButton {color: magenta}')
self.calibSaveButton = QtGui.QPushButton('',self)
#self.reorderButton = QtGui.QPushButton('Reorder',self)
#self.reorderButton.setFixedWidth(65)
#self.reorderButton.setEnabled(False)
##############################################
self.timetrace_str = QtGui.QLabel("TIME trace for storing plots and data:", self)
self.timetrace_str.setStyleSheet("color: blue")
##############################################
self.lcd1 = QtGui.QLCDNumber(self)
self.lcd1.setStyleSheet("color: black")
self.lcd1.setSegmentStyle(QtGui.QLCDNumber.Flat)
self.lcd1.setNumDigits(6)
self.lcd2 = QtGui.QLCDNumber(self)
self.lcd2.setStyleSheet("color: magenta")
self.lcd2.setSegmentStyle(QtGui.QLCDNumber.Flat)
self.lcd2.setNumDigits(6)
#self.lcd2.setFixedWidth(120)
self.lcd3 = QtGui.QLCDNumber(self)
self.lcd3.setStyleSheet("color: red")
self.lcd3.setSegmentStyle(QtGui.QLCDNumber.Flat)
self.lcd3.setNumDigits(11)
self.lcd3.setFixedHeight(60)
self.lcd3.display(self.timestr)
##############################################
#g4_0=QtGui.QGridLayout()
#g4_0.addWidget(MyBar,0,0)
v4 = QtGui.QVBoxLayout()
#v4.addLayout(g4_0)
v4.addWidget(self.infoCalibButton)
g2_0 = QtGui.QSplitter(QtCore.Qt.Horizontal)
g2_0.addWidget(lb3)
g2_0.addWidget(self.cb3)
g8_1 = QtGui.QSplitter(QtCore.Qt.Vertical)
g8_1.addWidget(filename)
g8_1.addWidget(foldername)
g8_2 = QtGui.QSplitter(QtCore.Qt.Vertical)
g8_2.addWidget(self.filenameEdit)
g8_2.addWidget(self.folderEdit)
g8_3 = QtGui.QSplitter(QtCore.Qt.Horizontal)
g8_3.addWidget(g8_1)
g8_3.addWidget(g8_2)
v8 = QtGui.QSplitter(QtCore.Qt.Vertical)
v8.addWidget(g2_0)
v8.addWidget(g8_3)
g0_0 = QtGui.QSplitter(QtCore.Qt.Vertical)
g0_0.addWidget(self.motorstep_lbl)
g0_1 = QtGui.QSplitter(QtCore.Qt.Vertical)
g0_1.addWidget(self.upButton)
g0_1.addWidget(self.downButton)
g0_2 = QtGui.QSplitter(QtCore.Qt.Vertical)
g0_2.addWidget(self.lcd1)
h0 = QtGui.QSplitter(QtCore.Qt.Horizontal)
h0.addWidget(g0_1)
h0.addWidget(g0_2)
g0_3=QtGui.QSplitter(QtCore.Qt.Vertical)
g0_3.addWidget(self.moverelButton)
g0_3.addWidget(self.moveabsButton)
g0_3.addWidget(self.moveButton)
g0_4=QtGui.QSplitter(QtCore.Qt.Vertical)
g0_4.addWidget(self.moverelEdit)
g0_4.addWidget(self.moveabsEdit)
g0_4.addWidget(self.moveEdit)
g0_5=QtGui.QSplitter(QtCore.Qt.Vertical)
g0_5.addWidget(self.stopButton)
h1 = QtGui.QSplitter(QtCore.Qt.Horizontal)
h1.addWidget(g0_5)
h1.addWidget(g0_3)
h1.addWidget(g0_4)
v1 = QtGui.QSplitter(QtCore.Qt.Vertical)
v1.addWidget(g0_0)
v1.addWidget(h0)
v1.addWidget(h1)
g3_0=QtGui.QSplitter(QtCore.Qt.Vertical)
g3_0.addWidget(self.zeiss_lbl)
g3_1=QtGui.QSplitter(QtCore.Qt.Vertical)
g3_1.addWidget(self.alignEdit)
g3_1.addWidget(self.setzeroButton)
g3_2=QtGui.QSplitter(QtCore.Qt.Horizontal)
g3_2.addWidget(g3_1)
g3_2.addWidget(self.lcd2)
h4 = QtGui.QSplitter(QtCore.Qt.Vertical)
h4.addWidget(g3_0)
h4.addWidget(g3_2)
g5_0=QtGui.QSplitter(QtCore.Qt.Vertical)
g5_0.addWidget(self.zeiss_cal_lbl)
g5_1=QtGui.QSplitter(QtCore.Qt.Horizontal)
g5_1.addWidget(self.calibButton)
g5_1.addWidget(self.calibSaveButton)
h5 = QtGui.QSplitter(QtCore.Qt.Vertical)
h5.addWidget(g5_0)
h5.addWidget(g5_1)
g9_0 = QtGui.QSplitter(QtCore.Qt.Vertical)
g9_0.addWidget(self.timetrace_str)
g9_0.addWidget(self.lcd3)
# SET ALL VERTICAL COLUMNS TOGETHER
v_all = QtGui.QVBoxLayout()
v_all.addLayout(v4)
v_all.addWidget(v8)
v_all.addWidget(h4)
v_all.addWidget(h5)
v_all.addWidget(v1)
v_all.addWidget(g9_0)
# set graph and toolbar to a new vertical group vcan
pw = pg.GraphicsLayoutWidget()
pw.setFixedWidth(750)
self.p0 = pw.addPlot()
self.p0.setTitle('Ref calib file')
self.p0.setLabel('left', u'\u03bb', units='m')
self.p0.setLabel('bottom', 'stepper micropos.')
self.p1 = pw.addPlot()
self.p1.setTitle('New calib file')
self.p1.setLabel('left', u'\u03bb', units='m')
self.p1.setLabel('bottom', 'stepper micropos.')
# SET ALL VERTICAL COLUMNS TOGETHER
h_all = QtGui.QHBoxLayout()
h_all.addLayout(v_all)
h_all.addWidget(pw)
self.setLayout(h_all)
########################################
# create plot and add it to the figure canvas
self.p0.addLegend()
#self.curve0=self.p0.plot(pen='r')
self.curve0_1=self.p0.plot(pen='m',name='raw data')
self.curve0_2=self.p0.plot(pen='b',name='spline')
#self.p0.setDownsampling(mode='peak')
#self.p0.setClipToView(True)
# PLOT 3 settings
self.p1.addLegend()
self.curve2=self.p1.plot(pen='m',name='raw data')
#self.curve1=self.p1.plot(pen='b',name='spline')
#self.curve3=self.p1.plot(pen='w',name='scan')
#########################################
self.calibSaveButton.clicked.connect(self.add_calib)
self.calibButton.clicked.connect(self.move_to_val2)
self.calibButton.clicked.connect(self.update_calib_button)
self.setzeroButton.clicked.connect(self.set_zero)
self.cb3.activated[str].connect(self.onActivated3)
#self.reorderButton.clicked.connect(self.set_reorder)
self.upButton.clicked.connect(self.move_jog)
self.downButton.clicked.connect(self.move_jog)
self.moverelButton.clicked.connect(self.move_rel)
self.moveabsButton.clicked.connect(self.move_abs)
self.moveButton.clicked.connect(self.move_to_val)
self.stopButton.clicked.connect(self.move_stop)
self.infoCalibButton.clicked.connect(self.showInfoCalibFiles)
#self.move(0,175)
#self.setWindowTitle('Zeiss spectrometer calibration')
self.show()
self.allButtons_torf(False)
self.stopButton.setEnabled(False)
self.bad_zaber_vals=False
self.calib_mode="wavelength"
self.set_lambda_calib_data()
self.loadWaveCalibValues()
##########################################
def ZaberDialog(self):
text, ok = QtGui.QInputDialog.getText(self, 'Serial Port Dialog','Enter Zaber stepper serial:', text=self.zaberport_str)
if ok:
self.zaberport_str = str(text)
def loadCalibLambdaDialog(self):
fname = QtGui.QFileDialog.getOpenFileName(self, 'Load ref wavelength calib','Calib_files')
old_calib=self.calibfile_lambda_str
if fname:
try:
self.calibfile_lambda_str = str(fname)
self.showInfoCalibFiles()
except ValueError as e:
self.calibfile_lambda_str = old_calib
QtGui.QMessageBox.warning(self, 'Message', "Something is wrong with lambda calib file! Do you have a file with 2 columns, no headers, and all inputs are digits?")
return
if self.calib_mode=="wavelength":
self.set_lambda_calib_data()
def loadWaveDialog(self):
fname = QtGui.QFileDialog.getOpenFileName(self, 'Load wavelength vals for calib','Calib_files')
if fname:
self.lambda_str = str(fname)
if self.calib_mode=="wavelength":
self.loadWaveCalibValues()
def loadWaveCalibValues(self):
try:
self.y_local=[]
self.calibCounter=0
with open(self.lambda_str, 'r') as thefile:
for line in thefile:
columns = line.split()
self.y_local.extend([round(float(columns[0]),1)])
except ValueError as e:
QtGui.QMessageBox.warning(self, 'Message',"Something is wrong with the wavelength file! Do you have a wavelength file with 1 column, no headers, and all inputs are digits?")
self.calibButton.setText('Adjust to ---- nm')
self.calibButton.setEnabled(False)
self.calibSaveButton.setEnabled(False)
self.calibButton.setText(''.join(['Adjust to ',str(self.y_local[self.calibCounter]),' nm']))
self.val_point=self.y_local[self.calibCounter]
self.calibSaveButton.setText(''.join(['Save at ',str(self.val_point),' nm' ]))
#self.calibCounter+=1
def loadCalibSlitDialog(self):
fname = QtGui.QFileDialog.getOpenFileName(self, 'Load ref slit width calib','Calib_files')
old_calib=self.calibfile_slit_str
if fname:
try:
self.calibfile_slit_str = str(fname)
self.showInfoCalibFiles()
except ValueError as e:
self.calibfile_slit_str = old_calib
QtGui.QMessageBox.warning(self, 'Message', "Something is wrong with slit calib file! Do you have a file with 2 columns, no headers, and all inputs are digits?")
return
if self.calib_mode=="slit":
self.set_slit_calib_data()
def loadSlitDialog(self):
fname = QtGui.QFileDialog.getOpenFileName(self, 'Load slit width vals for calib','Calib_files')
if fname:
self.slit_str = str(fname)
if self.calib_mode=="slit":
self.loadSlitCalibValues()
def loadSlitCalibValues(self):
try:
self.y_local=[]
self.calibCounter=0
with open(self.slit_str, 'r') as thefile:
for line in thefile:
columns = line.split()
self.y_local.extend([round(float(columns[0]),2)])
except Exception, e:
QtGui.QMessageBox.warning(self, 'Message',"Something is wrong with the slit width file! Do you have a slit width file with 1 column, no headers, and all inputs are digits?")
self.calibButton.setText('Adjust to ---- mm')
self.calibButton.setEnabled(False)
self.calibSaveButton.setEnabled(False)
self.calibButton.setText(''.join(['Adjust to ',str(self.y_local[self.calibCounter]),' mm']))
self.val_point=self.y_local[self.calibCounter]
self.calibSaveButton.setText(''.join(['Save at ',str(self.val_point),' mm' ]))
#self.calibCounter+=1
def showInfoCalibFiles(self):
head, tail1 = os.path.split(self.calibfile_lambda_str)
head, tail2 = os.path.split(self.calibfile_slit_str)
x0=[]
y0=[]
with open(self.calibfile_lambda_str, 'r') as thefile:
for line in thefile:
columns = line.split()
x0.extend([int(columns[0])]) #microstep pos.
y0.extend([round(float(columns[1]),1)]) #wavelength
x1=[]
y1=[]
with open(self.calibfile_slit_str, 'r') as thefile:
for line in thefile:
columns = line.split()
x1.extend([int(columns[0])]) #microstep pos.
y1.extend([round(float(columns[1]),2)]) #wavelength
QtGui.QMessageBox.information(self, "Drive files information",''.join(["<font color=\"black\">Calib lambda: </font> <font color=\"green\">",tail1,"< </font> <br> <font color=\"black\">Calib lambda range: </font> <font color=\"green\">",str(y0[0])," to ",str(y0[-1])," nm < </font> <br> <font color=\"black\">Calib slit:< </font> <font color=\"blue\">",tail2,"< </font> <br> <font color=\"black\">Calib slit range: </font> <font color=\"blue\">",str(y1[0])," to ",str(y1[-1])," mm <" ]))
def set_connect(self):
try:
self.Za = ZaberXmcb_ascii.Zaber_Xmcb(self.zaberport_str)
except Exception as e:
QtGui.QMessageBox.warning(self, 'Message',"No response from the Zaber serial port! Check the serial port name and connections.")
return
# constants
min_pos_lambda=self.Validrange_lambda[0]
max_pos_lambda=self.Validrange_lambda[1]
min_pos_slit=self.Validrange_slit[0]
max_pos_slit=self.Validrange_slit[1]
hc=25
rc=50
ms=2048
try:
self.Za.set_timeout(0.5)
self.Za.set_Maximum_Position(1,1,max_pos_lambda)
self.Za.set_Minimum_Position(1,1,min_pos_lambda)
self.Za.set_Hold_Current(1,1,hc)
self.Za.set_Running_Current(1,1,rc)
self.Za.set_Max_Speed(1,1,ms)
# Enable user to edit advanced settings
#self.Za.set_System_Access(1,2) # OPEN ADVANCED
#self.Za.set_Motor_Dir(1,2,1) # REVERSE motor direction
#self.Za.set_System_Access(1,1) # CLOSE ADVANCED
self.Za.set_Maximum_Position(1,2,max_pos_slit)
self.Za.set_Minimum_Position(1,2,min_pos_slit)
self.Za.set_Hold_Current(1,2,hc)
self.Za.set_Running_Current(1,2,rc)
self.Za.set_Max_Speed(1,2,ms)
micstep=self.Za.return_Microstep_Resolution(1,1)
sc=self.Za.return_System_Current(1)
# TURN ON/OFF ALERTS
if self.Za.return_Alert(1)==0:
self.Za.set_Alert(1,1)
self.Za.set_Current_Position(1,1,config_zeiss.Last_position_lambda[0])
self.Za.set_Current_Position(1,2,config_zeiss.Last_position_slit[0])
except Exception as e:
self.Za.close()
QtGui.QMessageBox.warning(self, 'Message',"No response from the Zaber stepper! Is stepper powered and connected to the serial?")
return None
hc_str=''.join([str(hc*25e-3)," / ",str(rc*25e-3)," Amps" ])
print "Hold / Running current:", hc_str
sys_str=''.join([ str(sc), " Amps" ])
print "System current (0-5):", sys_str
ms_str=''.join([str(ms/1.6384), " microsteps/s"])
print "Max speed:", ms_str
micstep_str=''.join([str(micstep), " microsteps/step"])
print "Microstep resolution:", str(micstep_str)
pos_lambda_str=''.join([str(min_pos_lambda)," to ", str(max_pos_lambda)," microsteps"])
print "Stepper range for the wavelengths:", pos_lambda_str
pos_slit_str=''.join([str(min_pos_slit)," to ", str(max_pos_slit)," microsteps"])
print "Stepper range for the slits:", pos_slit_str
self.allButtons_torf(True)
self.stopButton.setEnabled(False)
self.conMode.setEnabled(False)
self.serialZaber.setEnabled(False)
if self.calib_mode=="wavelength":
self.set_lambda_calib_data()
elif self.calib_mode=="slit":
self.set_slit_calib_data()
self.timer = QTimer(self)
self.connect(self.timer, SIGNAL("timeout()"), self.set_disconnect)
self.timer.setSingleShot(True)
self.timer.start(1000*60*10)
def set_disconnect(self):
self.Za.set_Hold_Current(1,1,0)
self.Za.set_Hold_Current(1,2,0)
self.Za.close()
self.allButtons_torf(False)
self.conMode.setEnabled(True)
def allButtons_torf(self,trueorfalse):
self.calibWaveZeiss.setEnabled(trueorfalse)
self.waveZeiss.setEnabled(trueorfalse)
self.calibSlitZeiss.setEnabled(trueorfalse)
self.slitZeiss.setEnabled(trueorfalse)
self.disconMode.setEnabled(trueorfalse)
self.calibButton.setEnabled(trueorfalse)
self.alignEdit.setEnabled(trueorfalse)
self.setzeroButton.setEnabled(trueorfalse)
self.calibSaveButton.setEnabled(trueorfalse)
self.cb3.setEnabled(trueorfalse)
self.infoCalibButton.setEnabled(trueorfalse)
#self.reorderButton.setEnabled(trueorfalse)
self.filenameEdit.setEnabled(trueorfalse)
self.folderEdit.setEnabled(trueorfalse)
self.upButton.setEnabled(trueorfalse)
self.downButton.setEnabled(trueorfalse)
self.moverelButton.setEnabled(trueorfalse)
self.moveabsButton.setEnabled(trueorfalse)
self.moveButton.setEnabled(trueorfalse)
self.moverelEdit.setEnabled(trueorfalse)
self.moveabsEdit.setEnabled(trueorfalse)
self.moveEdit.setEnabled(trueorfalse)
#self.stopButton.setEnabled(trueorfalse)
##########################################
def set_bstyle_v1(self,button):
button.setStyleSheet('QPushButton {font-size: 25pt}')
button.setFixedWidth(40)
button.setFixedHeight(40)
def onActivated3(self, text):
if str(text)=="wavelength":
reply = QtGui.QMessageBox.question(self, 'Message', "Do you want to calibrate wavelengths stepper positions?", QtGui.QMessageBox.Yes | QtGui.QMessageBox.No)
if reply == QtGui.QMessageBox.Yes:
self.calib_mode="wavelength"
self.p0.setLabel('left', u'\u03bb', units='m')
self.p1.setLabel('left', u'\u03bb', units='m')
self.setzeroButton.setText("-> nm")
self.moveButton.setText("Move to nm")
self.all_pos=[config_zeiss.Last_position_lambda[0]]
self.set_lambda_calib_data()
self.loadWaveCalibValues()
self.curve2.clear()
self.set_save()
else:
self.cb3.setCurrentIndex(1)
elif str(text)=="slit":
reply = QtGui.QMessageBox.question(self, 'Message', "Do you want to calibrate slit widths stepper positions?", QtGui.QMessageBox.Yes | QtGui.QMessageBox.No)
if reply == QtGui.QMessageBox.Yes:
self.calib_mode="slit"
self.p0.setLabel('left', 'slit width', units='m')
self.p1.setLabel('left', 'slit width', units='m')
self.setzeroButton.setText("-> mm")
self.moveButton.setText("Move to mm")
self.all_pos=[config_zeiss.Last_position_slit[0]]
self.set_slit_calib_data()
self.loadSlitCalibValues()
self.curve2.clear()
self.set_save()
else:
self.cb3.setCurrentIndex(0)
def add_calib(self):
if str(self.filenameEdit.text()):
saveinfile=''.join([str(self.filenameEdit.text()),'_',self.timestr])
else:
saveinfile=''.join(["calib_",self.timestr])
if str(self.folderEdit.text()):
if not os.path.isdir(str(self.folderEdit.text())):
os.mkdir(str(self.folderEdit.text()))
saveinfolder=''.join([str(self.folderEdit.text()),"/"])
else:
saveinfolder=""
save_to_file=''.join([saveinfolder,saveinfile,".txt"])
if not os.path.exists(save_to_file):
print "Calib file created: ", save_to_file
with open(save_to_file, 'w') as thefile:
pass
with open(save_to_file, 'r') as thefile:
# read a list of lines into data
data = thefile.readlines()
if data and str(self.val_point) in data[-1]:
if self.calib_mode=="wavelength":
data[-1]=''.join([str(self.all_pos[-1]),'\t',str(self.val_point),'\n'])
elif self.calib_mode=="slit":
data[-1]=''.join([str(self.all_pos[-1]),'\t',str(self.val_point),'\n'])
with open(save_to_file, 'w') as thefile:
thefile.writelines(data)
else:
if self.calib_mode=="wavelength":
with open(save_to_file, 'a') as thefile:
thefile.write('%s' %self.all_pos[-1] )
thefile.write('\t%s\n' %self.val_point)
elif self.calib_mode=="slit":
with open(save_to_file, 'a') as thefile:
thefile.write('%s' %self.all_pos[-1] )
thefile.write('\t%s\n' %self.val_point)
# open calib file and plot
x=[]
y=[]
with open(save_to_file, 'r') as thefile:
for line in thefile:
columns = line.split()
x.extend([int(columns[0])])
if self.calib_mode=="wavelength":
y.extend([round(float(columns[1]),1)])
elif self.calib_mode=="slit":
y.extend([round(float(columns[1]),2)])
if self.calib_mode=="wavelength":
self.curve2.setData(x,numpy.array(y)/1.0e9)
elif self.calib_mode=="slit":
self.curve2.setData(x,numpy.array(y)/1.0e3)
#self.reorderButton.setEnabled(True)
def update_calib_button(self):
if self.calib_mode=="wavelength":
if len(self.y_local)-1>self.calibCounter:
self.val_point=self.y_local[self.calibCounter]
self.calibSaveButton.setText(''.join(['Save at ',str(self.val_point),' nm' ]))
self.calibCounter+=1
self.calibButton.setText(''.join(['Adjust to ',str(self.y_local[self.calibCounter]),' nm']))
else:
self.val_point=self.y_local[self.calibCounter]
self.calibSaveButton.setText(''.join(['Save at ',str(self.val_point),' nm' ]))
self.calibCounter=0
self.calibButton.setText(''.join(['Adjust to ',str(self.y_local[self.calibCounter]),' nm']))
elif self.calib_mode=="slit":
if len(self.y_local)-1>self.calibCounter:
self.val_point=self.y_local[self.calibCounter]
self.calibSaveButton.setText(''.join(['Save at ',str(self.val_point),' mm' ]))
self.calibCounter+=1
self.calibButton.setText(''.join(['Adjust to ',str(self.y_local[self.calibCounter]),' mm']))
else:
self.val_point=self.y_local[self.calibCounter]
self.calibSaveButton.setText(''.join(['Save at ',str(self.val_point),' mm' ]))
self.calibCounter=0
self.calibButton.setText(''.join(['Adjust to ',str(self.y_local[self.calibCounter]),' mm']))
'''
def set_reorder(self):
if os.path.exists(self.filenameEdit.text()):
# open calib file and get all x and y
x=[]
y=[]
with open(self.save_to_file, 'r') as thefile:
for line in thefile:
columns = line.split()
x.extend([int(columns[0])])
y.extend([round(float(columns[1]),1)])
# sort x element and their respective y
sort_index = numpy.argsort(x)
sort_x=[]
sort_y=[]
with open(self.save_to_file, 'w') as thefile:
for i in sort_index:
thefile.write('%s\t' %x[i])
thefile.write('%s\n' %y[i])
sort_x.extend([ x[i] ])
sort_y.extend([ y[i] ])
self.curve2.setData(sort_x,sort_y)
#self.alignEdit.setText('')
else:
print "Warning! The calib file path does not exists!"
'''
def set_lambda_calib_data(self):
try:
x=[]
y=[]
with open(self.calibfile_lambda_str, 'r') as thefile:
for line in thefile:
columns = line.split()
x.extend([int(columns[0])])
y.extend([round(float(columns[1]),1)])
except ValueError as e:
QtGui.QMessageBox.warning(self, 'Message',"Something is wrong with the ref calib file! Do you have a ref Calib file with 2 columns, no headers, and all inputs are numbers?")
return
self.min_y_calib=min(y)
self.max_y_calib=max(y)
if min(x)<self.Validrange_lambda[0] or max(x)>self.Validrange_lambda[1]:
QtGui.QMessageBox.warning(self, 'Message',''.join(['Valid wavelength stepper range is from ',str(self.Validrange_lambda[0]),' to ',str(self.Validrange_lambda[1]),' microsteps.' ]) )
return
self.curve0_1.setData(x,numpy.array(y)/1.0e9)
wv_fine=numpy.arange(y[0],y[-1]+0.1,0.1)
#spline
wv_curve=scipy.interpolate.splrep(y, x, k=3, s=0)
#linear
#wv_curve = scipy.interpolate.splrep(y, x, k=1, s=0)
positions_fine = scipy.interpolate.splev(wv_fine, wv_curve, der=0)
self.curve0_2.setData(positions_fine,numpy.array(wv_fine)/1.0e9)
my_pos = self.all_pos[-1]
if my_pos<min(x) or my_pos>max(x):
QtGui.QMessageBox.warning(self, 'Message', "Current wavelength position is outside range of the calibration lambda file!")
self.lcd1.display('-')
self.lcd2.display('-')
else:
# Update the LCD display lcd2 with the wavelength which
# corresponds to the saved Zaber microposition
wv_curve2=scipy.interpolate.splrep(x, y, k=3, s=0)
first_pos_val = round(scipy.interpolate.splev(my_pos, wv_curve2, der=0), 1)
lcd_obj=type('lcd_obj',(object,),{'min_pos':my_pos, 'pos_val':first_pos_val})
self.more_tals(lcd_obj)
self.replace_line("config_zeiss.py",4,''.join(["Last_position_lambda=[",str(my_pos),",",str(first_pos_val),"]\n"]))
imp.reload(config_zeiss)
def set_slit_calib_data(self):
try:
x=[]
y=[]
with open(self.calibfile_slit_str, 'r') as thefile:
for line in thefile:
columns = line.split()
x.extend([int(columns[0])])
y.extend([round(float(columns[1]),2)])
except ValueError as e:
QtGui.QMessageBox.warning(self, 'Message',"Something is wrong with the ref calib file! Do you have a ref Calib file with 2 columns, no headers, and all inputs are numbers?")
return
self.min_y_calib=min(y)
self.max_y_calib=max(y)
if min(x)<self.Validrange_slit[0] or max(x)>self.Validrange_slit[1]:
QtGui.QMessageBox.warning(self, 'Message',''.join(['Valid slit stepper range is from ',str(self.Validrange_slit[0]),' to ',str(self.Validrange_slit[1]),' microsteps.' ]) )
return
self.curve0_1.setData(x,numpy.array(y)/1.0e3)
slit_fine=numpy.arange(y[0],y[-1]+0.01,0.01)
#spline
slit_curve=scipy.interpolate.splrep(y, x, k=3, s=0)
#linear
#slit_curve = scipy.interpolate.splrep(y, x, k=1, s=0)
positions_fine = scipy.interpolate.splev(slit_fine, slit_curve, der=0)
self.curve0_2.setData(positions_fine,numpy.array(slit_fine)/1.0e3)
my_pos = self.all_pos[-1]
if my_pos<min(x) or my_pos>max(x):
QtGui.QMessageBox.warning(self, 'Message', "Current slit position is outside range of the calibration slit file!")
self.lcd1.display('-')
self.lcd2.display('-')
else:
# Update the LCD display lcd2 with the wavelength which
# corresponds to the saved Zaber microposition
wv_curve2=scipy.interpolate.splrep(x, y, k=3, s=0)
first_pos_val = round(scipy.interpolate.splev(my_pos, wv_curve2, der=0), 2)
lcd_obj=type('lcd_obj',(object,),{'min_pos':my_pos, 'pos_val':first_pos_val})
self.more_tals(lcd_obj)
self.replace_line("config_zeiss.py",5,''.join(["Last_position_slit=[",str(my_pos),",",str(first_pos_val),"]\n"]))
imp.reload(config_zeiss)
def move_stop(self):
self.get_zaber_Thread.abort_move()
def set_zero(self):
if self.calib_mode=="wavelength":
try:
move_num=round(float(self.alignEdit.text()),1)
except ValueError as e:
QtGui.QMessageBox.warning(self, 'Message', "Only real decimal numbers are accepted as a wavelength!")
return
if move_num<self.min_y_calib or move_num>self.max_y_calib:
QtGui.QMessageBox.warning(self, 'Message',''.join(['Valid wavelength range is from ',str(self.min_y_calib),' nm to ',str(self.max_y_calib),' nm.' ]) )
return
else:
self.timer.stop()
sender = self.sender()
self.get_zaber_Thread=zaber_Thread(sender.text(),self.Za,self.calib_mode,self.calibfile_lambda_str,move_num)
elif self.calib_mode=="slit":
try:
move_num=round(float(self.alignEdit.text()),2)
except ValueError as e:
QtGui.QMessageBox.warning(self, 'Message', "Only real decimal numbers are accepted as a slit width!")
return
if move_num<self.min_y_calib or move_num>self.max_y_calib:
QtGui.QMessageBox.warning(self, 'Message',''.join(['Valid slit range is from ',str(self.min_y_calib),' mm to ',str(self.max_y_calib),' mm.' ]) )
return
else:
self.timer.stop()
sender = self.sender()
self.get_zaber_Thread=zaber_Thread(sender.text(),self.Za,self.calib_mode,self.calibfile_slit_str,move_num)
self.connect(self.get_zaber_Thread,SIGNAL("more_tals(PyQt_PyObject)"),self.more_tals)
self.connect(self.get_zaber_Thread,SIGNAL('bad_zaber_val(PyQt_PyObject)'), self.bad_zaber_val)
self.connect(self.get_zaber_Thread,SIGNAL('finished()'), self.set_finished)
self.get_zaber_Thread.start()
def move_to_val(self):
if self.calib_mode=="wavelength":
try:
move_num=round(float(self.moveEdit.text()),1)
except ValueError as e:
QtGui.QMessageBox.warning(self, 'Message', "Only real decimal numbers are accepted as a wavelength!")
return
if move_num<self.min_y_calib or move_num>self.max_y_calib:
QtGui.QMessageBox.warning(self, 'Message',''.join(['Valid wavelength range is from ',str(self.min_y_calib),' nm to ',str(self.max_y_calib),' nm.' ]) )
return
else:
self.timer.stop()
sender = self.sender()
self.get_zaber_Thread=zaber_Thread(sender.text(),self.Za,self.calib_mode,self.calibfile_lambda_str,move_num)
elif self.calib_mode=="slit":
try:
move_num=round(float(self.moveEdit.text()),2)
except ValueError as e:
QtGui.QMessageBox.warning(self, 'Message', "Only real decimal numbers are accepted as a slit width!")
return
if move_num<self.min_y_calib or move_num>self.max_y_calib:
QtGui.QMessageBox.warning(self, 'Message',''.join(['Valid slit width range is from ',str(self.min_y_calib),' mm to ',str(self.max_y_calib),' mm.' ]) )
return
else:
self.timer.stop()
sender = self.sender()
self.get_zaber_Thread=zaber_Thread(sender.text(),self.Za,self.calib_mode,self.calibfile_slit_str,move_num)
self.allButtons_torf(False)
self.stopButton.setEnabled(True)
self.connect(self.get_zaber_Thread,SIGNAL("more_tals(PyQt_PyObject)"),self.more_tals)
self.connect(self.get_zaber_Thread,SIGNAL('bad_zaber_val(PyQt_PyObject)'), self.bad_zaber_val)
self.connect(self.get_zaber_Thread,SIGNAL('finished()'), self.set_finished)
self.get_zaber_Thread.start()
def move_to_val2(self):
if self.calib_mode=="wavelength":
move_num = round(self.y_local[self.calibCounter],1)
self.timer.stop()
sender = self.sender()
self.get_zaber_Thread=zaber_Thread(sender.text(),self.Za,self.calib_mode,self.calibfile_lambda_str,move_num)
elif self.calib_mode=="slit":
move_num = round(self.y_local[self.calibCounter],2)
self.timer.stop()
sender = self.sender()
self.get_zaber_Thread=zaber_Thread(sender.text(),self.Za,self.calib_mode,self.calibfile_slit_str,move_num)
print move_num
self.allButtons_torf(False)
self.stopButton.setEnabled(True)
self.connect(self.get_zaber_Thread,SIGNAL("more_tals(PyQt_PyObject)"),self.more_tals)
self.connect(self.get_zaber_Thread,SIGNAL('bad_zaber_val(PyQt_PyObject)'), self.bad_zaber_val)
self.connect(self.get_zaber_Thread,SIGNAL('finished()'), self.set_finished)
self.get_zaber_Thread.start()
def move_rel(self):
move_num = int(self.moverelEdit.text())
if self.calib_mode=="wavelength":
move_tot = move_num+self.all_pos[-1]
if move_tot<self.Validrange_lambda[0] or move_tot>self.Validrange_lambda[1]:
QtGui.QMessageBox.warning(self, 'Message',''.join(['Valid wavelength stepper range is from ',str(self.Validrange_lambda[0]),' to ',str(self.Validrange_lambda[1]),' microsteps.' ]) )
return
else:
self.timer.stop()
sender = self.sender()
self.get_zaber_Thread=zaber_Thread(sender.text(),self.Za,self.calib_mode,self.calibfile_lambda_str,move_num)
elif self.calib_mode=="slit":
move_tot = move_num+self.all_pos[-1]
if move_tot<self.Validrange_slit[0] or move_tot>self.Validrange_slit[1]:
QtGui.QMessageBox.warning(self, 'Message',''.join(['Valid slit width stepper range is from ',str(self.Validrange_slit[0]),' to ',str(self.Validrange_slit[1]),' microsteps.' ]) )
return
else:
self.timer.stop()
sender = self.sender()
self.get_zaber_Thread=zaber_Thread(sender.text(),self.Za,self.calib_mode,self.calibfile_slit_str,move_num)
self.allButtons_torf(False)
self.stopButton.setEnabled(True)
self.connect(self.get_zaber_Thread,SIGNAL("more_tals(PyQt_PyObject)"),self.more_tals)
self.connect(self.get_zaber_Thread,SIGNAL('bad_zaber_val(PyQt_PyObject)'), self.bad_zaber_val)
self.connect(self.get_zaber_Thread,SIGNAL('finished()'), self.set_finished)
self.get_zaber_Thread.start()
def move_abs(self):
move_num = int(self.moveabsEdit.text())
if self.calib_mode=="wavelength":
if move_num<self.Validrange_lambda[0] or move_num>self.Validrange_lambda[1]:
QtGui.QMessageBox.warning(self, 'Message',''.join(['Valid wavelength stepper range is from ',str(self.Validrange_lambda[0]),' to ',str(self.Validrange_lambda[1]),' microsteps.' ]) )
return
else:
self.timer.stop()
sender = self.sender()
self.get_zaber_Thread=zaber_Thread(sender.text(),self.Za,self.calib_mode,self.calibfile_lambda_str,move_num)
elif self.calib_mode=="slit":
if move_num<self.Validrange_slit[0] or move_num>self.Validrange_slit[1]:
QtGui.QMessageBox.warning(self, 'Message',''.join(['Valid slit stepper range is from ',str(self.Validrange_slit[0]),' to ',str(self.Validrange_slit[1]),' microsteps.' ]) )
return
else:
self.timer.stop()
sender = self.sender()
self.get_zaber_Thread=zaber_Thread(sender.text(),self.Za,self.calib_mode,self.calibfile_slit_str,move_num)
self.allButtons_torf(False)
self.stopButton.setEnabled(True)
self.connect(self.get_zaber_Thread,SIGNAL("more_tals(PyQt_PyObject)"),self.more_tals)
self.connect(self.get_zaber_Thread,SIGNAL('bad_zaber_val(PyQt_PyObject)'), self.bad_zaber_val)
self.connect(self.get_zaber_Thread,SIGNAL('finished()'), self.set_finished)
self.get_zaber_Thread.start()
def move_jog(self):
sender = self.sender()
if sender.text()==u'\u25b2':
move_num=10
elif sender.text()==u'\u25bc':
move_num=-10
######################################
if self.calib_mode=="wavelength":
# update the lcd motorstep position
validrange_min, validrange_max=self.Validrange_lambda
move_tot = move_num+self.all_pos[-1]
if move_tot<validrange_min or move_tot>validrange_max:
QtGui.QMessageBox.warning(self, 'Message',''.join(['Valid range is from ',str(validrange_min),' to ',str(validrange_max),' microsteps.' ]) )
return
else:
self.timer.stop()
self.get_zaber_Thread=zaber_Thread(sender.text(),self.Za,self.calib_mode,self.calibfile_lambda_str)
if self.calib_mode=="slit":
# update the lcd motorstep position
validrange_min, validrange_max=self.Validrange_slit
move_tot = move_num+self.all_pos[-1]
if move_tot<validrange_min or move_tot>validrange_max:
QtGui.QMessageBox.warning(self, 'Message',''.join(['Valid range is from ',str(validrange_min),' to ',str(validrange_max),' microsteps.' ]) )
return
else:
self.timer.stop()
self.get_zaber_Thread=zaber_Thread(sender.text(),self.Za,self.calib_mode,self.calibfile_slit_str)
self.connect(self.get_zaber_Thread,SIGNAL("more_tals(PyQt_PyObject)"),self.more_tals)
self.connect(self.get_zaber_Thread,SIGNAL('bad_zaber_val(PyQt_PyObject)'), self.bad_zaber_val)
self.connect(self.get_zaber_Thread,SIGNAL('finished()'), self.set_finished)
self.get_zaber_Thread.start()
def more_tals(self,more_tals_obj):
self.all_pos.extend([ int(more_tals_obj.min_pos) ])
self.lcd1.display(str(more_tals_obj.min_pos))
self.lcd2.display(str(more_tals_obj.pos_val))
def bad_zaber_val(self,pyqt_object):
self.bad_zaber_vals=pyqt_object
def set_finished(self):
if self.bad_zaber_vals==serial.SerialException:
QtGui.QMessageBox.warning(self, 'Message',"Zaber serial severed. Closing the program..." )
sys.exit()
self.stopButton.setEnabled(False)
if self.bad_zaber_vals==ValueError:
QtGui.QMessageBox.warning(self, 'Message',"Zaber getting bad values. Closing the serial..." )
self.bad_zaber_vals=False
self.set_disconnect()
return
self.allButtons_torf(True)
self.timer.start(1000*60*10)
def closeEvent(self, event):
if hasattr(self, 'Za'):
reply = QtGui.QMessageBox.question(self, 'Message', "Quit calibration now? The microstep position will be saved and the stepper hold current will be set to zero!", QtGui.QMessageBox.Yes | QtGui.QMessageBox.No)
else:
reply = QtGui.QMessageBox.question(self, 'Message', "Quit calibration now?", QtGui.QMessageBox.Yes | QtGui.QMessageBox.No)
if reply==QtGui.QMessageBox.Yes:
if hasattr(self, 'Za'):
if not hasattr(self, 'get_zaber_Thread'):
if self.Za.is_open():
self.Za.set_Hold_Current(1,1,0)
self.Za.set_Hold_Current(1,2,0)
self.Za.close()
else:
if self.get_zaber_Thread.isRunning():
QtGui.QMessageBox.warning(self, 'Message', "Calibration in progress. Cancel the move then quit!")
event.ignore()
return
else:
if self.Za.is_open():
self.Za.set_Hold_Current(1,1,0)
self.Za.set_Hold_Current(1,2,0)
self.Za.close()
if hasattr(self, 'timer'):
if self.timer.isActive():
self.timer.stop()
event.accept()
else:
event.ignore()
def set_save_plots(self):
if str(self.folderEdit.text()):
if not os.path.isdir(str(self.folderEdit.text())):
os.mkdir(str(self.folderEdit.text()))
saveinfolder=''.join([str(self.folderEdit.text()),"/"])
else:
saveinfolder=""
save_pic_to_file=''.join([saveinfolder,self.timestr,'.png'])
# create an exporter instance, as an argument give it
# the item you wish to export
exporter = pg.exporters.ImageExporter(self.p0.scene())
# set export parameters if needed
#exporter.parameters()['width'] = 100 # (note this also affects height parameter)
# save to file
exporter.export(save_pic_to_file)
def set_save(self):
self.timestr=time.strftime("%y%m%d-%H%M")
self.replace_line("config_zeiss.py",4, ''.join(["Last_position_lambda=",str(config_zeiss.Last_position_lambda),"\n"]) )
self.replace_line("config_zeiss.py",5, ''.join(["Last_position_slit=",str(config_zeiss.Last_position_slit),"\n"]) )
self.replace_line("config_zeiss.py",7, ''.join(["calibfile_lambda=\"",self.calibfile_lambda_str,"\"\n"]) )
self.replace_line("config_zeiss.py",8, ''.join(["calibfile_slit=\"",self.calibfile_slit_str,"\"\n"]) )
self.replace_line("config_zeiss.py",9, ''.join(["lambdafile=\"",self.lambda_str,"\"\n"]) )
self.replace_line("config_zeiss.py",10, ''.join(["slitfile=\"",self.slit_str,"\"\n"]) )
self.replace_line("config_zeiss.py",11, ''.join(["calib_lambda_filename=\"",str(self.filenameEdit.text()),"\"\n"]) )
self.replace_line("config_zeiss.py",12, ''.join(["calib_lambda_foldername=\"",str(self.folderEdit.text()),"\"\n"]) )
self.replace_line("config_zeiss.py",13, ''.join(["calib_slit_filename=\"",str(self.filenameEdit.text()),"\"\n"]) )
self.replace_line("config_zeiss.py",14, ''.join(["calib_slit_foldername=\"",str(self.folderEdit.text()),"\"\n"]) )
self.replace_line("config_zeiss.py",17, ''.join(["timestr=\"",self.timestr,"\"\n"]) )
self.replace_line("config_zeiss.py",18, ''.join(["zaberport=\"",self.zaberport_str,"\"\n"]) )
self.replace_line("config_zeiss.py",19, ''.join(["sr510port=\"",self.sr510port_str,"\"\n"]) )
self.lcd3.display(self.timestr)
imp.reload(config_zeiss)
def replace_line(self,file_name, line_num, text):
lines = open(file_name, 'r').readlines()
lines[line_num] = text
out = open(file_name, 'w')
out.writelines(lines)
out.close()
def main():
app=QtGui.QApplication(sys.argv)
ex=Run_gui()
#sys.exit(app.exec_())
# avoid message 'Segmentation fault (core dumped)' with app.deleteLater()
app.exec_()
app.deleteLater()
sys.exit()
if __name__=='__main__':
main()
| [
"[email protected]"
] | |
eb5b33dc0fc012d521bf71c982068b71534887b6 | a6e4a6f0a73d24a6ba957277899adbd9b84bd594 | /sdk/python/pulumi_azure_native/sql/get_database_vulnerability_assessment.py | a6aa84ede40a8f73706d9941a81575ccf9b1a66e | [
"BSD-3-Clause",
"Apache-2.0"
] | permissive | MisinformedDNA/pulumi-azure-native | 9cbd75306e9c8f92abc25be3f73c113cb93865e9 | de974fd984f7e98649951dbe80b4fc0603d03356 | refs/heads/master | 2023-03-24T22:02:03.842935 | 2021-03-08T21:16:19 | 2021-03-08T21:16:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,722 | py | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from .. import _utilities, _tables
from . import outputs
__all__ = [
'GetDatabaseVulnerabilityAssessmentResult',
'AwaitableGetDatabaseVulnerabilityAssessmentResult',
'get_database_vulnerability_assessment',
]
@pulumi.output_type
class GetDatabaseVulnerabilityAssessmentResult:
"""
A database vulnerability assessment.
"""
def __init__(__self__, id=None, name=None, recurring_scans=None, storage_account_access_key=None, storage_container_path=None, storage_container_sas_key=None, type=None):
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if recurring_scans and not isinstance(recurring_scans, dict):
raise TypeError("Expected argument 'recurring_scans' to be a dict")
pulumi.set(__self__, "recurring_scans", recurring_scans)
if storage_account_access_key and not isinstance(storage_account_access_key, str):
raise TypeError("Expected argument 'storage_account_access_key' to be a str")
pulumi.set(__self__, "storage_account_access_key", storage_account_access_key)
if storage_container_path and not isinstance(storage_container_path, str):
raise TypeError("Expected argument 'storage_container_path' to be a str")
pulumi.set(__self__, "storage_container_path", storage_container_path)
if storage_container_sas_key and not isinstance(storage_container_sas_key, str):
raise TypeError("Expected argument 'storage_container_sas_key' to be a str")
pulumi.set(__self__, "storage_container_sas_key", storage_container_sas_key)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def id(self) -> str:
"""
Resource ID.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def name(self) -> str:
"""
Resource name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="recurringScans")
def recurring_scans(self) -> Optional['outputs.VulnerabilityAssessmentRecurringScansPropertiesResponse']:
"""
The recurring scans settings
"""
return pulumi.get(self, "recurring_scans")
@property
@pulumi.getter(name="storageAccountAccessKey")
def storage_account_access_key(self) -> Optional[str]:
"""
Specifies the identifier key of the storage account for vulnerability assessment scan results. If 'StorageContainerSasKey' isn't specified, storageAccountAccessKey is required.
"""
return pulumi.get(self, "storage_account_access_key")
@property
@pulumi.getter(name="storageContainerPath")
def storage_container_path(self) -> Optional[str]:
"""
A blob storage container path to hold the scan results (e.g. https://myStorage.blob.core.windows.net/VaScans/). It is required if server level vulnerability assessment policy doesn't set
"""
return pulumi.get(self, "storage_container_path")
@property
@pulumi.getter(name="storageContainerSasKey")
def storage_container_sas_key(self) -> Optional[str]:
"""
A shared access signature (SAS Key) that has write access to the blob container specified in 'storageContainerPath' parameter. If 'storageAccountAccessKey' isn't specified, StorageContainerSasKey is required.
"""
return pulumi.get(self, "storage_container_sas_key")
@property
@pulumi.getter
def type(self) -> str:
"""
Resource type.
"""
return pulumi.get(self, "type")
class AwaitableGetDatabaseVulnerabilityAssessmentResult(GetDatabaseVulnerabilityAssessmentResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetDatabaseVulnerabilityAssessmentResult(
id=self.id,
name=self.name,
recurring_scans=self.recurring_scans,
storage_account_access_key=self.storage_account_access_key,
storage_container_path=self.storage_container_path,
storage_container_sas_key=self.storage_container_sas_key,
type=self.type)
def get_database_vulnerability_assessment(database_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
server_name: Optional[str] = None,
vulnerability_assessment_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetDatabaseVulnerabilityAssessmentResult:
"""
A database vulnerability assessment.
API Version: 2020-08-01-preview.
:param str database_name: The name of the database for which the vulnerability assessment is defined.
:param str resource_group_name: The name of the resource group that contains the resource. You can obtain this value from the Azure Resource Manager API or the portal.
:param str server_name: The name of the server.
:param str vulnerability_assessment_name: The name of the vulnerability assessment.
"""
__args__ = dict()
__args__['databaseName'] = database_name
__args__['resourceGroupName'] = resource_group_name
__args__['serverName'] = server_name
__args__['vulnerabilityAssessmentName'] = vulnerability_assessment_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-native:sql:getDatabaseVulnerabilityAssessment', __args__, opts=opts, typ=GetDatabaseVulnerabilityAssessmentResult).value
return AwaitableGetDatabaseVulnerabilityAssessmentResult(
id=__ret__.id,
name=__ret__.name,
recurring_scans=__ret__.recurring_scans,
storage_account_access_key=__ret__.storage_account_access_key,
storage_container_path=__ret__.storage_container_path,
storage_container_sas_key=__ret__.storage_container_sas_key,
type=__ret__.type)
| [
"[email protected]"
] | |
79db5f1c36777c88d7fa3bc39575c57b377af1e3 | 81d2815060bdf51e59f40366df72954ad28b2398 | /4th_hw/fourth_homework/settings.py | fd3cb961f456cae77e2fd2c6099a1d6763910875 | [] | no_license | ningpop/LikeLion_7th_HW | 6016604427e335250f2e3daeec27f17731612b47 | b2c65a0b7a9a928a45cf07b67cd9ed18fb86d799 | refs/heads/master | 2020-06-30T18:08:54.024617 | 2019-12-30T16:17:03 | 2019-12-30T16:17:03 | 200,902,655 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,613 | py | """
Django settings for fourth_homework project.
Generated by 'django-admin startproject' using Django 2.2.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
import dj_database_url
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = os.environ.get('SECRET_KEY')
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = bool( os.environ.get('DJANGO_DEBUG', False))
ALLOWED_HOSTS = ['*']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'learning.apps.LearningConfig',
'accounts.apps.AccountsConfig',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'whitenoise.middleware.WhiteNoiseMiddleware',
]
ROOT_URLCONF = 'fourth_homework.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': ['fourth_homework/templates'],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'fourth_homework.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = [
os.path.join(BASE_DIR, 'learning', 'static')
]
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
STATIC_URL = '/static/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
MEDIA_URL = '/media/'
db_from_env = dj_database_url.config(conn_max_age=500)
DATABASES['default'].update(db_from_env) | [
"[email protected]"
] | |
048c333f5f321f508763e1bc3d96c4ec5a465231 | 3bddb2814881bb5e4679de3d31ac0bde57b86148 | /trax/data/tokenizer.py | 64081f4da0735026efb1c20851a2a900e708ad02 | [
"Apache-2.0"
] | permissive | google/trax | 7a2b1a83eb8848136a5f5e07988efcef2f0b704f | 1bb3b89427f669f2f0ec84633952e21b68964a23 | refs/heads/master | 2023-08-30T22:36:09.651644 | 2023-03-29T01:14:20 | 2023-03-29T01:15:47 | 213,020,264 | 8,180 | 917 | Apache-2.0 | 2023-08-29T14:30:03 | 2019-10-05T15:09:14 | Python | UTF-8 | Python | false | false | 5,810 | py | # coding=utf-8
# Copyright 2022 The Trax Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A simple invertible tokenizer.
Converts from a unicode string to a list of tokens
(represented as Unicode strings).
This tokenizer has the following desirable properties:
- It is invertible.
- Alphanumeric characters are broken away from non-alphanumeric characters.
- A single space between words does not produce an extra token.
- The full Unicode punctuation and separator set is recognized.
The tokenization algorithm is as follows:
1. Split the text into a list of tokens, splitting at every boundary of an
alphanumeric character and a non-alphanumeric character. This produces
a list which alternates between "alphanumeric tokens"
(strings of alphanumeric characters) and "non-alphanumeric tokens"
(strings of non-alphanumeric characters).
2. Remove every token consisting of a single space, unless it is
the very first or very last token in the list. These tokens are now
implied by the fact that there are two adjacent alphanumeric tokens.
e.g. u"Dude - that's so cool."
-> [u"Dude", u" - ", u"that", u"'", u"s", u"so", u"cool", u"."]
"""
import collections
import sys
import unicodedata
from absl import logging
import six
import tensorflow as tf
# This set contains all letter and number characters.
_ALPHANUMERIC_CHAR_SET = set(
six.unichr(i) for i in range(sys.maxunicode)
if (unicodedata.category(six.unichr(i)).startswith("L") or
unicodedata.category(six.unichr(i)).startswith("N")))
def encode(text):
"""Encode a unicode string as a list of tokens.
Args:
text: a unicode string
Returns:
a list of tokens as Unicode strings
"""
if not text:
return []
ret = []
token_start = 0
# Classify each character in the input string
is_alnum = [c in _ALPHANUMERIC_CHAR_SET for c in text]
for pos in range(1, len(text)):
if is_alnum[pos] != is_alnum[pos - 1]:
token = text[token_start:pos]
if token != u" " or token_start == 0:
ret.append(token)
token_start = pos
final_token = text[token_start:]
ret.append(final_token)
return ret
def decode(tokens):
"""Decode a list of tokens to a unicode string.
Args:
tokens: a list of Unicode strings
Returns:
a unicode string
"""
token_is_alnum = [t[0] in _ALPHANUMERIC_CHAR_SET for t in tokens]
ret = []
for i, token in enumerate(tokens):
if i > 0 and token_is_alnum[i - 1] and token_is_alnum[i]:
ret.append(u" ")
ret.append(token)
return "".join(ret)
def _read_filepattern(filepattern, max_lines=None, split_on_newlines=True):
"""Reads files matching a wildcard pattern, yielding the contents.
Args:
filepattern: A wildcard pattern matching one or more files.
max_lines: If set, stop reading after reading this many lines.
split_on_newlines: A boolean. If true, then split files by lines and strip
leading and trailing whitespace from each line. Otherwise, treat each
file as a single string.
Yields:
The contents of the files as lines, if split_on_newlines is True, or
the entire contents of each file if False.
"""
filenames = sorted(tf.io.gfile.glob(filepattern))
lines_read = 0
for filename in filenames:
with tf.io.gfile.GFile(filename) as f:
if split_on_newlines:
for line in f:
yield line.strip()
lines_read += 1
if max_lines and lines_read >= max_lines:
return
else:
if max_lines:
doc = []
for line in f:
doc.append(line)
lines_read += 1
if max_lines and lines_read >= max_lines:
yield "".join(doc)
return
yield "".join(doc)
else:
yield f.read()
def corpus_token_counts(
text_filepattern, corpus_max_lines, split_on_newlines=True):
"""Read the corpus and compute a dictionary of token counts.
Args:
text_filepattern: A pattern matching one or more files.
corpus_max_lines: An integer; maximum total lines to read.
split_on_newlines: A boolean. If true, then split files by lines and strip
leading and trailing whitespace from each line. Otherwise, treat each
file as a single string.
Returns:
a dictionary mapping token to count.
"""
counts = collections.Counter()
for doc in _read_filepattern(
text_filepattern,
max_lines=corpus_max_lines,
split_on_newlines=split_on_newlines):
counts.update(encode(doc))
return counts
def vocab_token_counts(text_filepattern, max_lines):
"""Read a vocab file and return a dictionary of token counts.
Reads a two-column CSV file of tokens and their frequency in a dataset. The
tokens are presumed to be generated by encode() or the equivalent.
Args:
text_filepattern: A pattern matching one or more files.
max_lines: An integer; maximum total lines to read.
Returns:
a dictionary mapping token to count.
"""
ret = {}
for i, line in enumerate(
_read_filepattern(text_filepattern, max_lines=max_lines)):
if "," not in line:
logging.warning("Malformed vocab line #%d '%s'", i, line)
continue
token, count = line.rsplit(",", 1)
ret[token] = int(count)
return ret
| [
"[email protected]"
] | |
b64ec8ccaf0a47dd9f85266b92faf3122e5e57ff | 6896fce8ee082f9730c056436e49ef0d16a6ea03 | /exception/exceptions.py | cbec08fae3c703e147a7daef31cd584579c057d3 | [] | no_license | Sugeei/python-practice | 5022ae7c34bc04972edebc15936248cb9869ec54 | 048df40500a059e4380f3ecc2581de96c9a1fc9b | refs/heads/master | 2022-12-07T06:34:40.740379 | 2022-11-13T11:48:29 | 2022-11-13T11:48:29 | 121,074,877 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 854 | py | from bs4 import BeautifulSoup
ERROR_MAP = {
"200000": "invalid input parameter",
"500000": "load data error",
"600000": "dump data error",
"700000": "data verify error",
"800000": "algorithm error"
}
class UranusError(Exception):
def __init__(self, error_code=None, message=''):
Exception.__init__(self,
'%s%s' % (ERROR_MAP[error_code] if ERROR_MAP.get(
error_code) is not None else '', message))
self.error_code = error_code
# assertion
# https://realpython.com/python-exceptions/
def divide(a, b):
try:
r = a / b
except:
raise ValueError
else: # no exceptions , run this code
print('divide result is %s' % r)
finally: # always run this code
print("done")
# divide(4,0)
print('--------')
divide(4,1) | [
"[email protected]"
] | |
b6ecbef1faf3aab95571a56f1eaf1dece622f4c0 | 5ec06dab1409d790496ce082dacb321392b32fe9 | /clients/python/generated/test/test_org_apache_sling_distribution_monitor_distribution_queue_health_check_properties.py | 78c34d346fd02e2be860bd78e70e6726077ba3fc | [
"Apache-2.0"
] | permissive | shinesolutions/swagger-aem-osgi | e9d2385f44bee70e5bbdc0d577e99a9f2525266f | c2f6e076971d2592c1cbd3f70695c679e807396b | refs/heads/master | 2022-10-29T13:07:40.422092 | 2021-04-09T07:46:03 | 2021-04-09T07:46:03 | 190,217,155 | 3 | 3 | Apache-2.0 | 2022-10-05T03:26:20 | 2019-06-04T14:23:28 | null | UTF-8 | Python | false | false | 1,475 | py | # coding: utf-8
"""
Adobe Experience Manager OSGI config (AEM) API
Swagger AEM OSGI is an OpenAPI specification for Adobe Experience Manager (AEM) OSGI Configurations API # noqa: E501
The version of the OpenAPI document: 1.0.0
Contact: [email protected]
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import unittest
import swaggeraemosgi
from swaggeraemosgi.models.org_apache_sling_distribution_monitor_distribution_queue_health_check_properties import OrgApacheSlingDistributionMonitorDistributionQueueHealthCheckProperties # noqa: E501
from swaggeraemosgi.rest import ApiException
class TestOrgApacheSlingDistributionMonitorDistributionQueueHealthCheckProperties(unittest.TestCase):
"""OrgApacheSlingDistributionMonitorDistributionQueueHealthCheckProperties unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testOrgApacheSlingDistributionMonitorDistributionQueueHealthCheckProperties(self):
"""Test OrgApacheSlingDistributionMonitorDistributionQueueHealthCheckProperties"""
# FIXME: construct object with mandatory attributes with example values
# model = swaggeraemosgi.models.org_apache_sling_distribution_monitor_distribution_queue_health_check_properties.OrgApacheSlingDistributionMonitorDistributionQueueHealthCheckProperties() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| [
"[email protected]"
] | |
57145b59e685e5f01020f461aa0f8d6d30d4aaa9 | c9ddbdb5678ba6e1c5c7e64adf2802ca16df778c | /cases/synthetic/coverage-big-2414.py | 3053991009fd4d98c0485cb6eb9c680b4184c3a6 | [] | no_license | Virtlink/ccbench-chocopy | c3f7f6af6349aff6503196f727ef89f210a1eac8 | c7efae43bf32696ee2b2ee781bdfe4f7730dec3f | refs/heads/main | 2023-04-07T15:07:12.464038 | 2022-02-03T15:42:39 | 2022-02-03T15:42:39 | 451,969,776 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,337 | py | count:int = 0
count2:int = 0
count3:int = 0
count4:int = 0
count5:int = 0
def foo(s: str) -> int:
return len(s)
def foo2(s: str, s2: str) -> int:
return len(s)
def foo3(s: str, s2: str, s3: str) -> int:
return len(s)
def foo4(s: str, s2: str, s3: str, s4: str) -> int:
return len(s)
def foo5(s: str, s2: str, s3: str, s4: str, s5: str) -> int:
return len(s)
class bar(object):
p: bool = True
def baz(self:"bar", xx: [int]) -> str:
global count
x:int = 0
y:int = 1
def qux(y: int) -> object:
nonlocal x
if x > y:
x = -1
for x in xx:
self.p = x == 2
qux(0) # Yay! ChocoPy
count = count + 1
while x <= 0:
if self.p:
xx[0] = xx[1]
self.p = not self.p
x = x + 1
elif foo("Long"[0]) == 1:
self.p = self is None
return "Nope"
class bar2(object):
p: bool = True
p2: bool = True
def baz(self:"bar2", xx: [int]) -> str:
global count
x:int = 0
y:int = 1
def qux(y: int) -> object:
nonlocal x
if x > y:
x = -1
for x in xx:
self.p = x == 2
qux(0) # Yay! ChocoPy
count = count + 1
while x <= 0:
if self.p:
xx[0] = xx[1]
self.p = not self.p
x = x + 1
elif foo("Long"[0]) == 1:
self.p = self is None
return "Nope"
def baz2(self:"bar2", xx: [int], xx2: [int]) -> str:
global count
x:int = 0
x2:int = 0
y:int = 1
y2:int = 1
def qux(y: int) -> object:
nonlocal x
if x > y:
x = -1
def qux2(y: int, y2: int) -> object:
nonlocal x
nonlocal x2
if x > y:
x = -1
for x in xx:
self.p = x == 2
qux(0) # Yay! ChocoPy
count = count + 1
while x <= 0:
if self.p:
xx[0] = xx[1]
self.p = not self.p
x = x + 1
elif foo("Long"[0]) == 1:
self.p = self is None
return "Nope"
class bar3(object):
p: bool = True
p2: bool = True
p3: bool = True
def baz(self:"bar3", xx: [int]) -> str:
global count
x:int = 0
y:int = 1
def qux(y: int) -> object:
nonlocal x
if x > y:
x = -1
for x in xx:
self.p = x == 2
qux(0) # Yay! ChocoPy
count = count + 1
while x <= 0:
if self.p:
xx[0] = xx[1]
self.p = not self.p
x = x + 1
elif foo("Long"[0]) == 1:
self.p = self is None
return "Nope"
def baz2(self:"bar3", xx: [int], xx2: [int]) -> str:
global count
x:int = 0
x2:int = 0
y:int = 1
y2:int = 1
def qux(y: int) -> object:
nonlocal x
if x > y:
x = -1
def qux2(y: int, y2: int) -> object:
nonlocal x
nonlocal x2
if x > y:
x = -1
for x in xx:
self.p = x == 2
qux(0) # Yay! ChocoPy
count = count + 1
while x <= 0:
if self.p:
xx[0] = xx[1]
self.p = not self.p
x = x + 1
elif foo("Long"[0]) == 1:
self.p = self is None
return "Nope"
def baz3(self:"bar3", xx: [int], xx2: [int], xx3: [int]) -> str:
global count
x:int = 0
x2:int = 0
x3:int = 0
y:int = 1
y2:int = 1
y3:int = 1
def qux(y: int) -> object:
nonlocal x
if x > y:
x = -1
def qux2(y: int, y2: int) -> object:
nonlocal x
nonlocal x2
if x > y:
x = -1
def qux3(y: int, y2: int, y3: int) -> object:
nonlocal x
nonlocal x2
nonlocal x3
if x > y:
x = -1
for x in xx:
self.p = x == 2
qux(0) # Yay! ChocoPy
count = count + 1
while x <= 0:
if self.p:
xx[0] = xx[1]
self.p = not self.p
x = x + 1
elif foo("Long"[0]) == 1:
self.p = self is None
return "Nope"
class bar4(object):
p: bool = True
p2: bool = True
p3: bool = True
p4: bool = True
def baz(self:"bar4", xx: [int]) -> str:
global count
x:int = 0
y:int = 1
def qux(y: int) -> object:
nonlocal x
if x > y:
x = -1
for x in xx:
self.p = x == 2
qux(0) # Yay! ChocoPy
count = count + 1
while x <= 0:
if self.p:
xx[0] = xx[1]
self.p = not self.p
x = x + 1
elif foo("Long"[0]) == 1:
self.p = self is None
return "Nope"
def baz2(self:"bar4", xx: [int], xx2: [int]) -> str:
global count
x:int = 0
x2:int = 0
y:int = 1
y2:int = 1
def qux(y: int) -> object:
nonlocal x
if x > y:
x = -1
def qux2(y: int, y2: int) -> object:
nonlocal x
nonlocal x2
if x > y:
x = -1
for x in xx:
self.p = x == 2
qux(0) # Yay! ChocoPy
count = count + 1
while x <= 0:
if self.p:
xx[0] = xx[1]
self.p = not self.p
x = x + 1
elif foo("Long"[0]) == 1:
$Statement
return "Nope"
def baz3(self:"bar4", xx: [int], xx2: [int], xx3: [int]) -> str:
global count
x:int = 0
x2:int = 0
x3:int = 0
y:int = 1
y2:int = 1
y3:int = 1
def qux(y: int) -> object:
nonlocal x
if x > y:
x = -1
def qux2(y: int, y2: int) -> object:
nonlocal x
nonlocal x2
if x > y:
x = -1
def qux3(y: int, y2: int, y3: int) -> object:
nonlocal x
nonlocal x2
nonlocal x3
if x > y:
x = -1
for x in xx:
self.p = x == 2
qux(0) # Yay! ChocoPy
count = count + 1
while x <= 0:
if self.p:
xx[0] = xx[1]
self.p = not self.p
x = x + 1
elif foo("Long"[0]) == 1:
self.p = self is None
return "Nope"
def baz4(self:"bar4", xx: [int], xx2: [int], xx3: [int], xx4: [int]) -> str:
global count
x:int = 0
x2:int = 0
x3:int = 0
x4:int = 0
y:int = 1
y2:int = 1
y3:int = 1
y4:int = 1
def qux(y: int) -> object:
nonlocal x
if x > y:
x = -1
def qux2(y: int, y2: int) -> object:
nonlocal x
nonlocal x2
if x > y:
x = -1
def qux3(y: int, y2: int, y3: int) -> object:
nonlocal x
nonlocal x2
nonlocal x3
if x > y:
x = -1
def qux4(y: int, y2: int, y3: int, y4: int) -> object:
nonlocal x
nonlocal x2
nonlocal x3
nonlocal x4
if x > y:
x = -1
for x in xx:
self.p = x == 2
qux(0) # Yay! ChocoPy
count = count + 1
while x <= 0:
if self.p:
xx[0] = xx[1]
self.p = not self.p
x = x + 1
elif foo("Long"[0]) == 1:
self.p = self is None
return "Nope"
class bar5(object):
p: bool = True
p2: bool = True
p3: bool = True
p4: bool = True
p5: bool = True
def baz(self:"bar5", xx: [int]) -> str:
global count
x:int = 0
y:int = 1
def qux(y: int) -> object:
nonlocal x
if x > y:
x = -1
for x in xx:
self.p = x == 2
qux(0) # Yay! ChocoPy
count = count + 1
while x <= 0:
if self.p:
xx[0] = xx[1]
self.p = not self.p
x = x + 1
elif foo("Long"[0]) == 1:
self.p = self is None
return "Nope"
def baz2(self:"bar5", xx: [int], xx2: [int]) -> str:
global count
x:int = 0
x2:int = 0
y:int = 1
y2:int = 1
def qux(y: int) -> object:
nonlocal x
if x > y:
x = -1
def qux2(y: int, y2: int) -> object:
nonlocal x
nonlocal x2
if x > y:
x = -1
for x in xx:
self.p = x == 2
qux(0) # Yay! ChocoPy
count = count + 1
while x <= 0:
if self.p:
xx[0] = xx[1]
self.p = not self.p
x = x + 1
elif foo("Long"[0]) == 1:
self.p = self is None
return "Nope"
def baz3(self:"bar5", xx: [int], xx2: [int], xx3: [int]) -> str:
global count
x:int = 0
x2:int = 0
x3:int = 0
y:int = 1
y2:int = 1
y3:int = 1
def qux(y: int) -> object:
nonlocal x
if x > y:
x = -1
def qux2(y: int, y2: int) -> object:
nonlocal x
nonlocal x2
if x > y:
x = -1
def qux3(y: int, y2: int, y3: int) -> object:
nonlocal x
nonlocal x2
nonlocal x3
if x > y:
x = -1
for x in xx:
self.p = x == 2
qux(0) # Yay! ChocoPy
count = count + 1
while x <= 0:
if self.p:
xx[0] = xx[1]
self.p = not self.p
x = x + 1
elif foo("Long"[0]) == 1:
self.p = self is None
return "Nope"
def baz4(self:"bar5", xx: [int], xx2: [int], xx3: [int], xx4: [int]) -> str:
global count
x:int = 0
x2:int = 0
x3:int = 0
x4:int = 0
y:int = 1
y2:int = 1
y3:int = 1
y4:int = 1
def qux(y: int) -> object:
nonlocal x
if x > y:
x = -1
def qux2(y: int, y2: int) -> object:
nonlocal x
nonlocal x2
if x > y:
x = -1
def qux3(y: int, y2: int, y3: int) -> object:
nonlocal x
nonlocal x2
nonlocal x3
if x > y:
x = -1
def qux4(y: int, y2: int, y3: int, y4: int) -> object:
nonlocal x
nonlocal x2
nonlocal x3
nonlocal x4
if x > y:
x = -1
for x in xx:
self.p = x == 2
qux(0) # Yay! ChocoPy
count = count + 1
while x <= 0:
if self.p:
xx[0] = xx[1]
self.p = not self.p
x = x + 1
elif foo("Long"[0]) == 1:
self.p = self is None
return "Nope"
def baz5(self:"bar5", xx: [int], xx2: [int], xx3: [int], xx4: [int], xx5: [int]) -> str:
global count
x:int = 0
x2:int = 0
x3:int = 0
x4:int = 0
x5:int = 0
y:int = 1
y2:int = 1
y3:int = 1
y4:int = 1
y5:int = 1
def qux(y: int) -> object:
nonlocal x
if x > y:
x = -1
def qux2(y: int, y2: int) -> object:
nonlocal x
nonlocal x2
if x > y:
x = -1
def qux3(y: int, y2: int, y3: int) -> object:
nonlocal x
nonlocal x2
nonlocal x3
if x > y:
x = -1
def qux4(y: int, y2: int, y3: int, y4: int) -> object:
nonlocal x
nonlocal x2
nonlocal x3
nonlocal x4
if x > y:
x = -1
def qux5(y: int, y2: int, y3: int, y4: int, y5: int) -> object:
nonlocal x
nonlocal x2
nonlocal x3
nonlocal x4
nonlocal x5
if x > y:
x = -1
for x in xx:
self.p = x == 2
qux(0) # Yay! ChocoPy
count = count + 1
while x <= 0:
if self.p:
xx[0] = xx[1]
self.p = not self.p
x = x + 1
elif foo("Long"[0]) == 1:
self.p = self is None
return "Nope"
print(bar().baz([1,2]))
| [
"[email protected]"
] | |
7b4ea0f4ff1d23cb5acb9a1696155e58d41a06ed | deb3c16ef887b6c496b8c920809d79b9f73aa2fe | /libs/telewizjaonline.py | 3ff44fd184c1e68bbc06dccfa2babf9394c94358 | [] | no_license | Yaser7440/cmdline_iptvplayer | 1ea35f4fd36c708176a43d402a49342c4cf723a5 | 4e287021d86cab8d6525262b647d144c6141d6b1 | refs/heads/master | 2021-01-24T10:49:29.278730 | 2016-09-21T09:24:26 | 2016-09-21T09:24:26 | null | 0 | 0 | null | null | null | null | WINDOWS-1250 | Python | false | false | 4,018 | py | # -*- coding: utf-8 -*-
###################################################
# LOCAL import
###################################################
from Plugins.Extensions.IPTVPlayer.dToolsSet.iptvplayerinit import TranslateTXT as _
from Plugins.Extensions.IPTVPlayer.dToolsSet.iptvtools import printDBG, printExc, GetCookieDir
from Plugins.Extensions.IPTVPlayer.tools.iptvtypes import strwithmeta
from Plugins.Extensions.IPTVPlayer.libs.pCommon import common
from Plugins.Extensions.IPTVPlayer.libs.urlparser import urlparser
###################################################
###################################################
# FOREIGN import
###################################################
from Components.config import config, ConfigSelection, ConfigYesNo, ConfigText, getConfigListEntry
import re
try: import simplejson as json
except: import json
from os import path as os_path
############################################
###################################################
# E2 GUI COMMPONENTS
###################################################
from Plugins.Extensions.IPTVPlayer.components.asynccall import MainSessionWrapper
###################################################
###################################################
# Config options for HOST
###################################################
config.plugins.iptvplayer.telewizjaonline_sort = ConfigSelection(default = "date", choices = [("date", "Date"), ("ostatnio-ogladane", "ostatnio oglądane"), ("title", "Title"), ("view", "Views"), ("like", "Likes"), ("comment", "Comments")])
def GetConfigList():
optionList = []
optionList.append(getConfigListEntry("Sortuj kanały według:", config.plugins.iptvplayer.telewizjaonline_sort))
return optionList
###################################################
class TelewizjaOnline:
MAINURL = 'http://telewizja-online.pl/'
HTTP_HEADER = { 'User-Agent': 'Mozilla/5.0 (X11; Ubuntu; Linux i686; rv:12.0) Gecko/20100101 Firefox/12.0', 'Referer': MAINURL }
def __init__(self):
self.cm = common()
self.up = urlparser()
def getCategoriesList(self):
printDBG("TelewizjaOnline.getCategoriesList")
catsList = []
sts,data = self.cm.getPage(TelewizjaOnline.MAINURL)
if not sts: return catsList
data = self.cm.ph.getDataBeetwenMarkers(data, 'Kategorie Stacji TV', '</ul>', False)[1]
data = re.compile('<a[^>]+?href="([^"]+?)"[^>]*?>([^<]+?)<').findall(data)
for item in data:
catsList.append({'url':item[0], 'title':item[1]})
return catsList
def getChannelsList(self, baseUrl):
printDBG("TelewizjaOnline.getChannelsList baseUrl[%s]" % baseUrl )
channelsList = []
url = baseUrl + '?orderby=' + config.plugins.iptvplayer.telewizjaonline_sort.value
sts,data = self.cm.getPage(url)
if not sts: return channelsList
data = self.cm.ph.getDataBeetwenMarkers(data, '<div class="col-md-3', '<center>', False)[1]
data = data.split('<div class="col-md-3')
for item in data:
title = self.cm.ph.getSearchGroups(item, 'title="([^"]+?)"')[0]
url = self.cm.ph.getSearchGroups(item, 'href="([^"]+?)"')[0]
icon = self.cm.ph.getSearchGroups(item, 'src="(http[^"]+?)"')[0]
channelsList.append({'title':title, 'url':url, 'icon':icon})
return channelsList
def getVideoLink(self, baseUrl):
printDBG("TelewizjaOnline.getVideoLink url[%s]" % baseUrl)
def _url_path_join(a, b):
from urlparse import urljoin
return urljoin(a, b)
sts,data = self.cm.getPage(baseUrl)
if not sts: return []
data = self.cm.ph.getDataBeetwenMarkers(data, '<div id="player-embed">', '<div class="player-button">', False)[1]
url = self.cm.ph.getSearchGroups(data, '<iframe[^>]+?src="([^"]+?)"')[0]
if '' != url:
data = None
return self.up.getAutoDetectedStreamLink(url, data)
| [
"[email protected]"
] | |
85276507b54d3f216c070a9c8873c8ff120d8120 | 72a8181e5502128fec62b132fbe19cd9d50dab4c | /rules/plot.smk | 28df3be0b7d220ab200f373be5f95348d4b02f2c | [] | no_license | EthanHolleman/DRIP-AGS-ENCODE | 1fd3b7065ec7f47e783674df14955a7b655edc08 | e3bb63b6d1cae82ddc6fe8857a1e66e2f41b2781 | refs/heads/main | 2023-04-03T21:07:34.651467 | 2021-04-23T02:42:18 | 2021-04-23T02:42:18 | 360,375,206 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 324 | smk |
rule make_feature_intersection_plot:
conda:
'../envs/R.yml'
input:
'output/intersect/all_samples_concat.intersection.bed'
output:
'output/plots/feature_intersection_plot.png'
shell:'''
mkdir -p output/plots
Rscript scripts/plot_encode_intersections.R {input} {output}
''' | [
"[email protected]"
] | |
0b2bc07bfe47ebc246deec181f61d7fa55a65b8f | e8d5471bd4a47794d66162060343f740e0febca4 | /server/src/uds/auths/RegexLdap/__init__.py | 4065f8086cf40c30d7e64bfeaa397d4232fd9e6e | [] | no_license | git38438/openuds | ef939c2196d6877e00e92416609335d57dd1bd55 | 7d66d92f85f01ad1ffd549304672dd31008ecc12 | refs/heads/master | 2020-06-22T14:07:33.227703 | 2019-07-18T11:03:56 | 2019-07-18T11:03:56 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,687 | py | # -*- coding: utf-8 -*-
#
# Copyright (c) 2012 Virtual Cable S.L.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of Virtual Cable S.L. nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
@author: Adolfo Gómez, dkmaster at dkmon dot com
"""
from .Authenticator import RegexLdap
| [
"[email protected]"
] | |
1af643695b4192619ffcd424991f063d051f610c | 6cac02f4df495f1acec3fde64335aa4881230cba | /tutorials/foo-tutorial/foo/foo.py | c828d610c07b3232e3f034ebfbced761d19fd565 | [] | no_license | ivannz/pkg_deploy_repo_renamed | 96610728c097f0bb77a047b09681bb1d5fe6ffc3 | 9ce24ffcc5db6235dd3946f8a63123c3955ea957 | refs/heads/master | 2020-07-16T17:28:59.668633 | 2019-09-03T07:08:29 | 2019-09-03T07:08:29 | 205,832,544 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 193 | py | def this():
from this import s
d = {}
for c in (65, 97):
for i in range(26):
d[chr(i + c)] = chr((i + 13) % 26 + c)
return "".join(d.get(c, c) for c in s)
| [
"[email protected]"
] | |
2a21ac1ec7913bc31720e0eb686e858987acfe58 | 75117becf9f75122e60cd806599ae24c16065689 | /python_models8/neuron/builds/IF_curr_exp_i.py | 307db7fb5aee6fefae5f3e8176e659b1466f3901 | [] | no_license | chanokin/sPyNNaker8NewModelTemplate | d911443fa650a4016828341fd252ddb2d7bad313 | 2d64f34ed5a8f5312a3176792bee57339785c5ea | refs/heads/master | 2020-11-27T01:10:50.593741 | 2020-01-07T15:56:54 | 2020-01-07T15:56:54 | 229,252,692 | 0 | 0 | null | 2019-12-20T11:28:48 | 2019-12-20T11:28:48 | null | UTF-8 | Python | false | false | 2,945 | py | # A PyNN Model for standard neurons built from components
from spynnaker.pyNN.models.neuron import AbstractPyNNNeuronModelStandard
# Components from main tools
from spynnaker.pyNN.models.neuron.input_types import InputTypeCurrent
from spynnaker.pyNN.models.neuron.synapse_types import SynapseTypeExponential
from spynnaker.pyNN.models.defaults import default_initial_values
from spynnaker.pyNN.models.neuron.neuron_models.neuron_model_leaky_integrate_and_fire import NeuronModelLeakyIntegrateAndFire
from python_models8.neuron.threshold_types.AdaptiveThreshold import AdaptiveThreshold
class IF_curr_exp_i(AbstractPyNNNeuronModelStandard):
@default_initial_values({"v_init", "isyn_exc", "isyn_inh"})
def __init__(self,
# neuron model parameters and state variables
i_offset=0.0,
v_init=-70.0,
v_rest=-70.0,
v_reset=-100.0,
tau_m=10.0,
cm=2.0,
tau_refrac=3.0,
# threshold type parameters
v_threshold=-10.0,
tau_threshold=120,
w_threshold=1.8,
# synapse type parameters
tau_syn_E=5.0,
tau_syn_I=5.0,
isyn_exc=0.0,
isyn_inh=0.0
):
if v_init is None:
v_init = v_rest
self.__v_init = v_init
self.__v_threshold = v_threshold
self.__tau_threshold = tau_threshold
self.__w_threshold = w_threshold
neuron_model = NeuronModelLeakyIntegrateAndFire(v_init, v_rest, tau_m, cm, i_offset, v_reset, tau_refrac)
synapse_type = SynapseTypeExponential(tau_syn_E, tau_syn_I, isyn_exc, isyn_inh)
input_type = InputTypeCurrent()
threshold_type = AdaptiveThreshold(v_threshold, w_threshold, tau_threshold, v_rest)
super(IF_curr_exp_i, self).__init__(
model_name="IF_curr_exp_i",
binary="IF_curr_exp_i.aplx",
neuron_model=neuron_model,
input_type=input_type,
synapse_type=synapse_type,
threshold_type=threshold_type
)
@property
def v_init(self):
return self.__v_init
@v_init.setter
def v_init(self, v_init):
self.__v_init = v_init
@property
def v_threshold(self):
return self.__v_threshold
@v_threshold.setter
def v_threshold(self, v_threshold):
self.__v_threshold = v_threshold
@property
def tau_threshold(self):
return self.__tau_threshold
@tau_threshold.setter
def tau_threshold(self, tau_threshold):
self.__tau_threshold = tau_threshold
@property
def w_threshold(self):
return self.__w_threshold
@w_threshold.setter
def w_threshold(self, w_threshold):
self.__w_threshold = w_threshold
| [
"[email protected]"
] | |
5f69045f7aa47cdf638b20fe0213be9eee7ea9cf | 37c38b97d0a4b8098ec3c35b7122afb1fbb9eac9 | /newke/py36/py36/class_biao.py | ffce719d491f697100ed5daab2206f4b953fd2aa | [] | no_license | lionheartStark/sword_towards_offer | 8c2f9015a427317375d53eee982d630ffd4fa9c0 | cb3587242195bb3f2626231af2da13b90945a4d5 | refs/heads/master | 2022-12-02T20:50:18.789828 | 2020-08-23T02:00:48 | 2020-08-23T02:00:48 | 266,257,109 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,076 | py | from typing import List
from collections import defaultdict,deque
class Solution:
def canFinish(self, numCourses: int, prerequisites: List[List[int]]) -> bool:
need_map = defaultdict(set)
for i in prerequisites:
need, be_need = i
need_map[need].add(be_need)
a_solution = []
queue = deque()
count = 0
for i in range(numCourses):
if i not in need_map:
queue.append(i)
count += 1
while queue:
nowican = queue.popleft()
a_solution.append(nowican)
should_rm = []
for k,v in need_map.items():
if nowican in v:
v.remove(nowican)
if len(v) == 0:
should_rm.append(k)
queue.append(k)
count += 1
for m in should_rm:
need_map.pop(m)
can = (count == numCourses)
if can:
return a_solution
else:
return []
| [
"[email protected]"
] | |
5a520bbe602829e4a1a651efc846844f07970208 | bcfa02c21a73798872bbb28303233d1f0039cf00 | /server/www/teleport/webroot/app/controller/dashboard.py | 65a5f2eea63c35642406ac5a3c52e530667224cb | [
"Apache-2.0"
] | permissive | zhoulhb/teleport | 6301cd50c951bcbac21cbe24017eb8421ff57adc | 54da194697898ef77537cfe7032d774555dc1335 | refs/heads/master | 2021-11-10T17:10:59.661130 | 2021-11-09T11:16:19 | 2021-11-09T11:16:19 | 192,643,069 | 0 | 0 | Apache-2.0 | 2019-06-19T02:20:53 | 2019-06-19T02:20:52 | null | UTF-8 | Python | false | false | 361 | py | # -*- coding: utf-8 -*-
from app.const import *
from app.base.controller import TPBaseHandler, TPBaseJsonHandler
from app.model import stats
class IndexHandler(TPBaseHandler):
def get(self):
ret = self.check_privilege(TP_PRIVILEGE_LOGIN_WEB)
if ret != TPE_OK:
return
self.render('dashboard/index.mako')
| [
"[email protected]"
] | |
0f2ac223d96f5a6d71a7a54cad6006c3bc48733c | b6f8b2f023004fc0ea185b5e1ef2cbccce9ef513 | /misc/figures_thesis.py | 05bcf26cc2a72e5051b3bd7f7406d3d6a1d50359 | [
"BSD-3-Clause"
] | permissive | tenglongcong/petibm-examples | a73a6cdba864269fe9402d0a8b44582f2bcbcd9f | 3817d50b0b26df5901701c0cfe82a2d57c964e89 | refs/heads/master | 2020-11-27T17:12:28.335357 | 2019-12-04T23:51:54 | 2019-12-04T23:51:54 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 979 | py | """Gather figures (to be included in thesis)."""
import os
import pathlib
import shutil
rootdir = pathlib.Path(__file__).absolute().parents[1]
n_parts = len(rootdir.parts)
# Create the output directory.
figdir = rootdir / 'figures_thesis'
figdir.mkdir(parents=True, exist_ok=True)
# Load paths of figures to gather.
inpath = rootdir / 'misc' / 'figures_thesis.txt'
filepaths = []
with open(inpath, 'r') as infile:
filepaths = [rootdir / line.strip() for line in infile.readlines()
if not line.startswith('#')]
# Define new names of the output figures.
filenames = []
for filepath in filepaths:
filename, filedir = filepath.name, filepath.parent
prefix = '_'.join([e for e in filedir.parts[n_parts + 2:]
if e != 'figures'])
filenames.append('_'.join([prefix, filename]).lstrip('_'))
# Copy figures to output directory.
for filepath, filename in zip(filepaths, filenames):
shutil.copy(filepath, figdir / filename)
| [
"[email protected]"
] | |
862e1582b1eea05a10d17fec0afe45b0ba83391c | 17e08f795273d6f4233ab440c2706130f6520b58 | /fannypack/utils/_deprecation.py | 9174fd10a6bbb73f059b87105a1183e6c2716f63 | [
"MIT"
] | permissive | HaoWen470/fannypack | db5e6bb670004e470254e1e632899aeec38ee041 | 7e2c949de0e0cac69a95a5a777f8a4b1fa0fc17a | refs/heads/master | 2023-01-03T20:35:35.248848 | 2020-10-31T09:01:01 | 2020-10-31T09:01:01 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,373 | py | import warnings
from typing import Callable
def deprecation_wrapper(message: str, function_or_class: Callable) -> Callable:
"""Creates a wrapper for a deprecated function or class. Prints a warning
the first time a function or class is called.
Args:
message (str): Warning message.
function_or_class (Callable): Function or class to wrap.
Returns:
Callable: Wrapped function/class.
"""
warned = False
def curried(*args, **kwargs): # pragma: no cover
nonlocal warned
if not warned:
warnings.warn(message, DeprecationWarning, stacklevel=2)
warned = True
return function_or_class(*args, **kwargs)
return curried
def new_name_wrapper(
old_name: str, new_name: str, function_or_class: Callable
) -> Callable:
"""Creates a wrapper for a renamed function or class. Prints a warning the first
time a function or class is called with the old name.
Args:
old_name (str): Old name of function or class. Printed in warning.
new_name (str): New name of function or class. Printed in warning.
function_or_class (Callable): Function or class to wrap.
Returns:
Callable: Wrapped function/class.
"""
return deprecation_wrapper(
f"{old_name} is deprecated! Use {new_name} instead.", function_or_class
)
| [
"[email protected]"
] | |
91e61e3f950f46f177a4001f65690a53add7f6f1 | 1765ebc1c393ab4720c5fc5f9397516f5d66cfce | /setup.py | de4fc91885a05f03e2c2bf89d2af47d2323230c1 | [
"MIT"
] | permissive | Lukasa/rproxy | d1db08aa99470c3649258254ead291c6dbd2d202 | c15f9f56608a53db19d4f3737f05dfd02d66bc60 | refs/heads/master | 2020-12-28T22:46:48.225095 | 2016-06-30T08:27:23 | 2016-06-30T08:27:23 | 67,593,070 | 0 | 0 | null | 2016-09-07T09:35:22 | 2016-09-07T09:35:22 | null | UTF-8 | Python | false | false | 502 | py | from setuptools import setup
setup(
name='rproxy',
description='A super simple reverse proxy.',
long_description=open("README.rst").read(),
author='Amber Brown',
author_email='[email protected]',
packages=['rproxy', 'twisted.plugins'],
package_dir={"": "src"},
install_requires=[
'twisted >= 15.5.0',
'pyopenssl',
'txsni',
'incremental',
],
zip_safe=False,
setup_requires=["incremental"],
use_incremental=True,
)
| [
"[email protected]"
] | |
3040eece0cb8864c9e7d39ddab4a66343a0f3988 | 2112e4cfd9568128573098f8e209962002f66a23 | /app.py | 23284a6ccf1befdf6ba398a9fa834d9e7048b7e3 | [] | no_license | amazingguni/stock-trader | 0bd39cce6f6462c9648e3c8b2893b3e8379e37ca | 252c9230885200cfde845f2a03677140564cfc62 | refs/heads/main | 2023-05-05T18:26:04.124690 | 2021-05-30T13:12:58 | 2021-05-30T13:12:58 | 362,616,293 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,296 | py | from flask import Flask, Response
from flask_login import LoginManager
from flask_cors import CORS
from config import get_config_by_env
from container import Container
from web.admin import admin
from mongodb import db
login_manager = LoginManager()
def create_app():
app = Flask(__name__, template_folder='./web/templates')
@app.route('/')
# pylint: disable=unused-variable
def index():
return Response(status=200)
app.config.from_object(get_config_by_env())
CORS(app)
login_manager.init_app(app)
container = Container()
app.container = container
from web.admin.views import sync as admin_sync_views
from web.admin.views import stock as admin_stock_views
from web.admin.views import portfolio as admin_portfolio_views
admin_views = [admin_sync_views,
admin_stock_views, admin_portfolio_views, ]
with app.app_context():
container.wire(modules=admin_views)
admin.init_app(app)
db.init_app(app)
return app
def register_blueprints(app, views):
for view in views:
app.register_blueprint(view.bp)
@login_manager.user_loader
def load_user(user_id):
from core.user.domain.user import User
return User.query.filter(User.id == user_id).first()
app = create_app()
| [
"[email protected]"
] | |
7493629d6e1853839179716db93c9a7fcec7dbf8 | 42c48f3178a48b4a2a0aded547770027bf976350 | /google/ads/google_ads/v5/proto/resources/customer_negative_criterion_pb2.py | a77e1ccec1e3bee71b3e49346e6e67070a3c1c2b | [
"Apache-2.0"
] | permissive | fiboknacky/google-ads-python | e989464a85f28baca1f28d133994c73759e8b4d6 | a5b6cede64f4d9912ae6ad26927a54e40448c9fe | refs/heads/master | 2021-08-07T20:18:48.618563 | 2020-12-11T09:21:29 | 2020-12-11T09:21:29 | 229,712,514 | 0 | 0 | Apache-2.0 | 2019-12-23T08:44:49 | 2019-12-23T08:44:49 | null | UTF-8 | Python | false | true | 14,503 | py | # -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: google/ads/googleads_v5/proto/resources/customer_negative_criterion.proto
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from google.ads.google_ads.v5.proto.common import criteria_pb2 as google_dot_ads_dot_googleads__v5_dot_proto_dot_common_dot_criteria__pb2
from google.ads.google_ads.v5.proto.enums import criterion_type_pb2 as google_dot_ads_dot_googleads__v5_dot_proto_dot_enums_dot_criterion__type__pb2
from google.api import field_behavior_pb2 as google_dot_api_dot_field__behavior__pb2
from google.api import resource_pb2 as google_dot_api_dot_resource__pb2
from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='google/ads/googleads_v5/proto/resources/customer_negative_criterion.proto',
package='google.ads.googleads.v5.resources',
syntax='proto3',
serialized_options=b'\n%com.google.ads.googleads.v5.resourcesB\036CustomerNegativeCriterionProtoP\001ZJgoogle.golang.org/genproto/googleapis/ads/googleads/v5/resources;resources\242\002\003GAA\252\002!Google.Ads.GoogleAds.V5.Resources\312\002!Google\\Ads\\GoogleAds\\V5\\Resources\352\002%Google::Ads::GoogleAds::V5::Resources',
create_key=_descriptor._internal_create_key,
serialized_pb=b'\nIgoogle/ads/googleads_v5/proto/resources/customer_negative_criterion.proto\x12!google.ads.googleads.v5.resources\x1a\x33google/ads/googleads_v5/proto/common/criteria.proto\x1a\x38google/ads/googleads_v5/proto/enums/criterion_type.proto\x1a\x1fgoogle/api/field_behavior.proto\x1a\x19google/api/resource.proto\x1a\x1cgoogle/api/annotations.proto\"\xe5\x06\n\x19\x43ustomerNegativeCriterion\x12Q\n\rresource_name\x18\x01 \x01(\tB:\xe0\x41\x05\xfa\x41\x34\n2googleads.googleapis.com/CustomerNegativeCriterion\x12\x14\n\x02id\x18\n \x01(\x03\x42\x03\xe0\x41\x03H\x01\x88\x01\x01\x12Q\n\x04type\x18\x03 \x01(\x0e\x32>.google.ads.googleads.v5.enums.CriterionTypeEnum.CriterionTypeB\x03\xe0\x41\x03\x12N\n\rcontent_label\x18\x04 \x01(\x0b\x32\x30.google.ads.googleads.v5.common.ContentLabelInfoB\x03\xe0\x41\x05H\x00\x12X\n\x12mobile_application\x18\x05 \x01(\x0b\x32\x35.google.ads.googleads.v5.common.MobileApplicationInfoB\x03\xe0\x41\x05H\x00\x12Y\n\x13mobile_app_category\x18\x06 \x01(\x0b\x32\x35.google.ads.googleads.v5.common.MobileAppCategoryInfoB\x03\xe0\x41\x05H\x00\x12G\n\tplacement\x18\x07 \x01(\x0b\x32-.google.ads.googleads.v5.common.PlacementInfoB\x03\xe0\x41\x05H\x00\x12N\n\ryoutube_video\x18\x08 \x01(\x0b\x32\x30.google.ads.googleads.v5.common.YouTubeVideoInfoB\x03\xe0\x41\x05H\x00\x12R\n\x0fyoutube_channel\x18\t \x01(\x0b\x32\x32.google.ads.googleads.v5.common.YouTubeChannelInfoB\x03\xe0\x41\x05H\x00:\x85\x01\xea\x41\x81\x01\n2googleads.googleapis.com/CustomerNegativeCriterion\x12Kcustomers/{customer}/customerNegativeCriteria/{customer_negative_criterion}B\x0b\n\tcriterionB\x05\n\x03_idB\x8b\x02\n%com.google.ads.googleads.v5.resourcesB\x1e\x43ustomerNegativeCriterionProtoP\x01ZJgoogle.golang.org/genproto/googleapis/ads/googleads/v5/resources;resources\xa2\x02\x03GAA\xaa\x02!Google.Ads.GoogleAds.V5.Resources\xca\x02!Google\\Ads\\GoogleAds\\V5\\Resources\xea\x02%Google::Ads::GoogleAds::V5::Resourcesb\x06proto3'
,
dependencies=[google_dot_ads_dot_googleads__v5_dot_proto_dot_common_dot_criteria__pb2.DESCRIPTOR,google_dot_ads_dot_googleads__v5_dot_proto_dot_enums_dot_criterion__type__pb2.DESCRIPTOR,google_dot_api_dot_field__behavior__pb2.DESCRIPTOR,google_dot_api_dot_resource__pb2.DESCRIPTOR,google_dot_api_dot_annotations__pb2.DESCRIPTOR,])
_CUSTOMERNEGATIVECRITERION = _descriptor.Descriptor(
name='CustomerNegativeCriterion',
full_name='google.ads.googleads.v5.resources.CustomerNegativeCriterion',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='resource_name', full_name='google.ads.googleads.v5.resources.CustomerNegativeCriterion.resource_name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\340A\005\372A4\n2googleads.googleapis.com/CustomerNegativeCriterion', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='id', full_name='google.ads.googleads.v5.resources.CustomerNegativeCriterion.id', index=1,
number=10, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\340A\003', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='type', full_name='google.ads.googleads.v5.resources.CustomerNegativeCriterion.type', index=2,
number=3, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\340A\003', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='content_label', full_name='google.ads.googleads.v5.resources.CustomerNegativeCriterion.content_label', index=3,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\340A\005', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='mobile_application', full_name='google.ads.googleads.v5.resources.CustomerNegativeCriterion.mobile_application', index=4,
number=5, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\340A\005', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='mobile_app_category', full_name='google.ads.googleads.v5.resources.CustomerNegativeCriterion.mobile_app_category', index=5,
number=6, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\340A\005', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='placement', full_name='google.ads.googleads.v5.resources.CustomerNegativeCriterion.placement', index=6,
number=7, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\340A\005', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='youtube_video', full_name='google.ads.googleads.v5.resources.CustomerNegativeCriterion.youtube_video', index=7,
number=8, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\340A\005', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='youtube_channel', full_name='google.ads.googleads.v5.resources.CustomerNegativeCriterion.youtube_channel', index=8,
number=9, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\340A\005', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=b'\352A\201\001\n2googleads.googleapis.com/CustomerNegativeCriterion\022Kcustomers/{customer}/customerNegativeCriteria/{customer_negative_criterion}',
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
_descriptor.OneofDescriptor(
name='criterion', full_name='google.ads.googleads.v5.resources.CustomerNegativeCriterion.criterion',
index=0, containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[]),
_descriptor.OneofDescriptor(
name='_id', full_name='google.ads.googleads.v5.resources.CustomerNegativeCriterion._id',
index=1, containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[]),
],
serialized_start=314,
serialized_end=1183,
)
_CUSTOMERNEGATIVECRITERION.fields_by_name['type'].enum_type = google_dot_ads_dot_googleads__v5_dot_proto_dot_enums_dot_criterion__type__pb2._CRITERIONTYPEENUM_CRITERIONTYPE
_CUSTOMERNEGATIVECRITERION.fields_by_name['content_label'].message_type = google_dot_ads_dot_googleads__v5_dot_proto_dot_common_dot_criteria__pb2._CONTENTLABELINFO
_CUSTOMERNEGATIVECRITERION.fields_by_name['mobile_application'].message_type = google_dot_ads_dot_googleads__v5_dot_proto_dot_common_dot_criteria__pb2._MOBILEAPPLICATIONINFO
_CUSTOMERNEGATIVECRITERION.fields_by_name['mobile_app_category'].message_type = google_dot_ads_dot_googleads__v5_dot_proto_dot_common_dot_criteria__pb2._MOBILEAPPCATEGORYINFO
_CUSTOMERNEGATIVECRITERION.fields_by_name['placement'].message_type = google_dot_ads_dot_googleads__v5_dot_proto_dot_common_dot_criteria__pb2._PLACEMENTINFO
_CUSTOMERNEGATIVECRITERION.fields_by_name['youtube_video'].message_type = google_dot_ads_dot_googleads__v5_dot_proto_dot_common_dot_criteria__pb2._YOUTUBEVIDEOINFO
_CUSTOMERNEGATIVECRITERION.fields_by_name['youtube_channel'].message_type = google_dot_ads_dot_googleads__v5_dot_proto_dot_common_dot_criteria__pb2._YOUTUBECHANNELINFO
_CUSTOMERNEGATIVECRITERION.oneofs_by_name['criterion'].fields.append(
_CUSTOMERNEGATIVECRITERION.fields_by_name['content_label'])
_CUSTOMERNEGATIVECRITERION.fields_by_name['content_label'].containing_oneof = _CUSTOMERNEGATIVECRITERION.oneofs_by_name['criterion']
_CUSTOMERNEGATIVECRITERION.oneofs_by_name['criterion'].fields.append(
_CUSTOMERNEGATIVECRITERION.fields_by_name['mobile_application'])
_CUSTOMERNEGATIVECRITERION.fields_by_name['mobile_application'].containing_oneof = _CUSTOMERNEGATIVECRITERION.oneofs_by_name['criterion']
_CUSTOMERNEGATIVECRITERION.oneofs_by_name['criterion'].fields.append(
_CUSTOMERNEGATIVECRITERION.fields_by_name['mobile_app_category'])
_CUSTOMERNEGATIVECRITERION.fields_by_name['mobile_app_category'].containing_oneof = _CUSTOMERNEGATIVECRITERION.oneofs_by_name['criterion']
_CUSTOMERNEGATIVECRITERION.oneofs_by_name['criterion'].fields.append(
_CUSTOMERNEGATIVECRITERION.fields_by_name['placement'])
_CUSTOMERNEGATIVECRITERION.fields_by_name['placement'].containing_oneof = _CUSTOMERNEGATIVECRITERION.oneofs_by_name['criterion']
_CUSTOMERNEGATIVECRITERION.oneofs_by_name['criterion'].fields.append(
_CUSTOMERNEGATIVECRITERION.fields_by_name['youtube_video'])
_CUSTOMERNEGATIVECRITERION.fields_by_name['youtube_video'].containing_oneof = _CUSTOMERNEGATIVECRITERION.oneofs_by_name['criterion']
_CUSTOMERNEGATIVECRITERION.oneofs_by_name['criterion'].fields.append(
_CUSTOMERNEGATIVECRITERION.fields_by_name['youtube_channel'])
_CUSTOMERNEGATIVECRITERION.fields_by_name['youtube_channel'].containing_oneof = _CUSTOMERNEGATIVECRITERION.oneofs_by_name['criterion']
_CUSTOMERNEGATIVECRITERION.oneofs_by_name['_id'].fields.append(
_CUSTOMERNEGATIVECRITERION.fields_by_name['id'])
_CUSTOMERNEGATIVECRITERION.fields_by_name['id'].containing_oneof = _CUSTOMERNEGATIVECRITERION.oneofs_by_name['_id']
DESCRIPTOR.message_types_by_name['CustomerNegativeCriterion'] = _CUSTOMERNEGATIVECRITERION
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
CustomerNegativeCriterion = _reflection.GeneratedProtocolMessageType('CustomerNegativeCriterion', (_message.Message,), {
'DESCRIPTOR' : _CUSTOMERNEGATIVECRITERION,
'__module__' : 'google.ads.googleads_v5.proto.resources.customer_negative_criterion_pb2'
,
'__doc__': """A negative criterion for exclusions at the customer level.
Attributes:
resource_name:
Immutable. The resource name of the customer negative
criterion. Customer negative criterion resource names have the
form: ``customers/{customer_id}/customerNegativeCriteria/{cri
terion_id}``
id:
Output only. The ID of the criterion.
type:
Output only. The type of the criterion.
criterion:
The customer negative criterion. Exactly one must be set.
content_label:
Immutable. ContentLabel.
mobile_application:
Immutable. MobileApplication.
mobile_app_category:
Immutable. MobileAppCategory.
placement:
Immutable. Placement.
youtube_video:
Immutable. YouTube Video.
youtube_channel:
Immutable. YouTube Channel.
""",
# @@protoc_insertion_point(class_scope:google.ads.googleads.v5.resources.CustomerNegativeCriterion)
})
_sym_db.RegisterMessage(CustomerNegativeCriterion)
DESCRIPTOR._options = None
_CUSTOMERNEGATIVECRITERION.fields_by_name['resource_name']._options = None
_CUSTOMERNEGATIVECRITERION.fields_by_name['id']._options = None
_CUSTOMERNEGATIVECRITERION.fields_by_name['type']._options = None
_CUSTOMERNEGATIVECRITERION.fields_by_name['content_label']._options = None
_CUSTOMERNEGATIVECRITERION.fields_by_name['mobile_application']._options = None
_CUSTOMERNEGATIVECRITERION.fields_by_name['mobile_app_category']._options = None
_CUSTOMERNEGATIVECRITERION.fields_by_name['placement']._options = None
_CUSTOMERNEGATIVECRITERION.fields_by_name['youtube_video']._options = None
_CUSTOMERNEGATIVECRITERION.fields_by_name['youtube_channel']._options = None
_CUSTOMERNEGATIVECRITERION._options = None
# @@protoc_insertion_point(module_scope)
| [
"[email protected]"
] | |
60c586549370e3fbb1ebd8bbe3f0cd9caba71e15 | 3f29503e6d776ef0914217b1c922f4bc78af4fdd | /13.HASH MAP/1338_Reduce Array Size to The Half_MED/solution.py | 8a43eec458a5c93f42b5aa20c4251801a04035a9 | [] | no_license | kimmyoo/python_leetcode | cd4ff3c4f6d190840bbf5fb9acdca2b92554a6fa | 813235789ce422a3bab198317aafc46fbc61625e | refs/heads/master | 2023-08-16T07:36:38.688871 | 2023-08-15T22:38:00 | 2023-08-15T22:38:00 | 132,544,297 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 498 | py | class Solution(object):
def minSetSize(self, arr):
"""
:type arr: List[int]
:rtype: int
"""
half = len(arr)/2
d = collections.Counter(arr)
c = d.values()
c.sort(reverse=True)
if max(c) >= half:
return 1
begin, end = 0, 1
sum = max(c)
while True:
sum += c[end]
if sum >= half:
return end+1-begin
else:
end+=1
| [
"[email protected]"
] | |
a2bd66f70a6a7d7e1b9e8f3c16a7f2d37623a9d3 | 7ed4a49fd0906ed156c1846c9c9fc1dd94c47303 | /torch/ao/quantization/_pt2e/quantizer/qnnpack_quantizer.py | 10880f20705bdbe4aa55d8602e9370acf9c72230 | [
"BSD-2-Clause",
"LicenseRef-scancode-secret-labs-2011",
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"BSL-1.0",
"Apache-2.0"
] | permissive | XinYao1994/pytorch | e052f2f44acf623da9d5ebc6b46f68311e73d66a | 351c2ea2fbb09add93980e2942435f31b114047c | refs/heads/master | 2023-05-27T17:20:51.876780 | 2023-05-21T06:31:53 | 2023-05-21T06:31:53 | 153,064,994 | 1 | 0 | null | 2018-10-15T06:42:28 | 2018-10-15T06:42:28 | null | UTF-8 | Python | false | false | 25,134 | py | from __future__ import annotations
import copy
import functools
import operator
from typing import Callable, Dict, List, Optional, Set
import torch
import torch._dynamo as torchdynamo
import torch.nn.functional as F
from torch.ao.quantization._pt2e.quantizer.utils import (
get_act_obs_or_fq_ctr,
get_bias_obs_or_fq_ctr,
get_weight_obs_or_fq_ctr,
)
from torch.ao.quantization.observer import PlaceholderObserver
from torch.fx import Node
from torch.fx.passes.utils.source_matcher_utils import get_source_partitions
from .quantizer import (
OperatorConfig,
OperatorPatternType,
QuantizationConfig,
QuantizationSpec,
Quantizer,
QuantizationAnnotation,
_annotate_input_qspec_map,
_annotate_output_qspec,
)
__all__ = [
"QNNPackQuantizer",
"get_symmetric_quantization_config",
]
_QUANT_CONFIG_TO_ANNOTATOR = {}
def _mark_nodes_as_annotated(nodes: List[Node]):
for node in nodes:
if node is not None:
if "quantization_annotation" not in node.meta:
node.meta["quantization_annotation"] = QuantizationAnnotation()
node.meta["quantization_annotation"]._annotated = True
def _get_dynamo_graph(function: Callable, inputs) -> torch.fx.Graph:
gm, _ = torchdynamo.export(function, *inputs, aten_graph=True)
gm.graph.eliminate_dead_code()
return gm.graph
def _get_linear_patterns(input_size: List[int]):
in_channels = input_size[-1]
out_channels = 8 # hard coding but this should not matter
weight = torch.ones((out_channels, in_channels))
bias = torch.ones((out_channels,))
act = torch.ones(input_size)
def linear_op(act, weight, bias=None):
return F.linear(act, weight, bias)
pattern_w_bias = _get_dynamo_graph(linear_op, (act, weight, bias))
pattern_wo_bias = _get_dynamo_graph(linear_op, (act, weight))
return [pattern_w_bias, pattern_wo_bias]
def register_annotator(quantization_configs: List[QuantizationConfig]):
def decorator(fn: Callable):
for quantization_config in quantization_configs:
if quantization_config in _QUANT_CONFIG_TO_ANNOTATOR:
raise KeyError(
f"Annotator for quantization config {quantization_config} is already registered"
)
_QUANT_CONFIG_TO_ANNOTATOR[quantization_config] = functools.partial(
fn, config=quantization_config
)
return decorator
def supported_symmetric_quantized_operators() -> Dict[str, List[OperatorPatternType]]:
supported_operators: Dict[str, List[OperatorPatternType]] = {
# Both conv and linear should be able to handle relu + hardtanh fusion since
# those are clamp ops
"conv2d": [
[torch.nn.Conv2d, torch.nn.ReLU],
[torch.nn.Conv2d, F.relu],
[F.conv2d, torch.nn.ReLU],
[F.conv2d, F.relu],
],
"linear": [[torch.nn.Linear], [F.linear]],
"add": [[torch.add]],
"maxpool2d": [[torch.nn.MaxPool2d], [F.max_pool2d]],
"hardtanh": [[torch.nn.Hardtanh], [F.hardtanh]],
"mean": [[torch.mean]],
"adaptive_avgpool2d": [
[torch.nn.AdaptiveAvgPool2d],
[F.adaptive_avg_pool2d],
],
}
return copy.deepcopy(supported_operators)
def get_supported_symmetric_config_and_operators() -> List[OperatorConfig]:
supported_config_and_operators: List[OperatorConfig] = []
for quantization_config in [
get_symmetric_quantization_config(),
get_symmetric_quantization_config(is_qat=True),
get_symmetric_quantization_config(is_per_channel=True),
get_symmetric_quantization_config(is_per_channel=True, is_qat=True),
]:
ops = supported_symmetric_quantized_operators()
for op_string, pattern_list in ops.items():
supported_config_and_operators.append(
OperatorConfig(quantization_config, pattern_list)
)
return copy.deepcopy(supported_config_and_operators)
@functools.lru_cache
def get_symmetric_quantization_config(
is_per_channel: bool = False,
is_qat: bool = False,
):
act_quantization_spec = QuantizationSpec(
dtype=torch.int8,
quant_min=-128,
quant_max=127,
qscheme=torch.per_tensor_affine,
is_dynamic=False,
)
qscheme = (
torch.per_channel_symmetric if is_per_channel else torch.per_tensor_symmetric
)
weight_quantization_spec = QuantizationSpec(
dtype=torch.int8,
quant_min=-127,
quant_max=127,
qscheme=qscheme,
ch_axis=0,
is_dynamic=False,
)
bias_quantization_spec = QuantizationSpec(dtype=torch.float)
quantization_config = QuantizationConfig(
act_quantization_spec, weight_quantization_spec, bias_quantization_spec, is_qat
)
return quantization_config
def get_supported_config_and_operators() -> List[OperatorConfig]:
return get_supported_symmetric_config_and_operators()
def _get_default_obs_or_fq_ctr():
return PlaceholderObserver.with_args(dtype=torch.float)
def _is_annotated(nodes: List[Node]):
"""
Given a list of nodes (that represents an operator pattern),
check if any of the node is annotated, return True if any of the node
is annotated, otherwise return False
"""
annotated = False
for node in nodes:
annotated = annotated or (
"quantization_annotation" in node.meta
and node.meta["quantization_annotation"]._annotated
)
return annotated
class QNNPackQuantizer(Quantizer):
supported_config_and_operators = get_supported_config_and_operators()
def __init__(self):
super().__init__()
self.global_config: QuantizationConfig = None # type: ignore[assignment]
self.operator_type_config: Dict[str, Optional[QuantizationConfig]] = {}
@classmethod
def get_supported_quantization_configs(cls) -> List[QuantizationConfig]:
op_configs: Set[QuantizationConfig] = set({})
for spec, _ in cls.supported_config_and_operators:
op_configs.add(spec)
return list(op_configs)
@classmethod
def get_supported_operator_for_quantization_config(
cls, quantization_config: Optional[QuantizationConfig]
) -> List[OperatorPatternType]:
if quantization_config is None:
all_ops = []
for _, ops in cls.supported_config_and_operators:
all_ops.extend(ops)
return all_ops
for config, ops in cls.supported_config_and_operators:
# note: this assumes each entry in cls.supported_spec_and_operators
# corresponds to one spec, e.g. we don't have
# [(spec1, op_list1), (spec1, op_list2), (spec2, op_list3)]
# where the first and second entry have the same spec but did not
# merge the op list
if config == quantization_config:
return ops
return []
def set_global(self, quantization_config: QuantizationConfig) -> QNNPackQuantizer:
self.global_config = quantization_config
return self
def set_config_for_operator_type(
self, operator_type: str, quantization_config: QuantizationConfig
) -> QNNPackQuantizer:
self.operator_type_config[operator_type] = quantization_config
return self
def annotate(self, model: torch.fx.GraphModule) -> torch.fx.GraphModule:
"""just handling global spec for now"""
global_config = self.global_config
_QUANT_CONFIG_TO_ANNOTATOR[global_config](self, model)
return model
@register_annotator(
[
get_symmetric_quantization_config(is_per_channel=False, is_qat=False),
get_symmetric_quantization_config(is_per_channel=False, is_qat=True),
get_symmetric_quantization_config(is_per_channel=True, is_qat=True),
get_symmetric_quantization_config(is_per_channel=True, is_qat=False),
]
)
def annotate_symmetric_config(
self, model: torch.fx.GraphModule, config: QuantizationConfig
) -> torch.fx.GraphModule:
# annotate the nodes from last to first since the matching is in the reversed order
# and fusion operator patterns (conv - relu) can get matched before single operator pattern (conv)
# and we will mark the matched node with "_annoated" so fusion operator pattern
# can take precedence over single operator pattern in this way
self._annotate_linear(model, config)
for node in reversed(model.graph.nodes):
# one improvement is to register node annotators for each
# supported op type.
if config.is_qat:
self._annotate_conv2d_bn_relu(node, config)
self._annotate_conv2d_bn(node, config)
self._annotate_conv2d_relu(node, config)
self._annotate_conv2d(node, config)
self._annotate_maxpool2d(node, config)
self._annotate_add_relu(node, config)
self._annotate_add(node, config)
self._annotate_hardtanh(node, config)
self._annotate_mean(node, config)
self._annotate_adaptive_avg_pool2d(node, config)
return model
def _annotate_conv2d_bn(
self, node: Node, quantization_config: QuantizationConfig
) -> None:
"""
Match the following pattern:
... -> conv -> bn -> getitem[0] -> ...
Annotate it to get the following pattern after prepare:
weight -> fq1
|
... -> fq0 -> conv -> bn -> getitem[0] -> fq2 -> ...
Note: This is only used for QAT. In PTQ, batchnorm should already be fused into the conv.
"""
if (
node.op != "call_function"
or node.target != operator.getitem
or node.args[1] != 0
):
return
getitem_node = node
bn_node = getitem_node.args[0]
assert isinstance(bn_node, Node)
if (
bn_node.op != "call_function"
or bn_node.target != torch.ops.aten._native_batch_norm_legit.default
):
return
conv_node = bn_node.args[0]
assert isinstance(conv_node, Node)
if (
conv_node.op != "call_function"
or conv_node.target != torch.ops.aten.convolution.default
):
return
if _is_annotated([getitem_node, bn_node, conv_node]):
return
input_qspec_map = {}
input_act = conv_node.args[0]
assert isinstance(input_act, Node)
input_qspec_map[input_act] = get_act_obs_or_fq_ctr(quantization_config)
weight = conv_node.args[1]
assert isinstance(weight, Node)
input_qspec_map[weight] = get_weight_obs_or_fq_ctr(quantization_config)
bias = conv_node.args[2]
if isinstance(bias, Node):
input_qspec_map[bias] = get_bias_obs_or_fq_ctr(quantization_config)
conv_node.meta["quantization_annotation"] = QuantizationAnnotation(
input_qspec_map=input_qspec_map,
_annotated=True
)
bn_node.meta["quantization_annotation"] = QuantizationAnnotation(
_annotated=True
)
getitem_node.meta["quantization_annotation"] = QuantizationAnnotation(
output_qspec=get_act_obs_or_fq_ctr(quantization_config), # type: ignore[arg-type]
_annotated=True
)
def _annotate_conv2d_bn_relu(
self, node: Node, quantization_config: QuantizationConfig
) -> None:
"""
Match the following pattern:
... -> conv -> bn -> getitem[0] -> relu -> ...
Annotate it to get the following pattern after prepare:
weight -> fq1
|
... -> fq0 -> conv -> bn -> getitem[0] -> relu -> fq2 -> ...
Note: This is only used for QAT. In PTQ, batchnorm should already be fused into the conv.
"""
if node.op != "call_function" or node.target not in [
torch.ops.aten.relu_.default,
torch.ops.aten.relu.default,
]:
return
relu_node = node
getitem_node = relu_node.args[0]
assert isinstance(getitem_node, Node)
if (
getitem_node.op != "call_function"
or getitem_node.target != operator.getitem
or getitem_node.args[1] != 0
):
return
bn_node = getitem_node.args[0]
assert isinstance(bn_node, Node)
if (
bn_node.op != "call_function"
or bn_node.target != torch.ops.aten._native_batch_norm_legit.default
):
return
conv_node = bn_node.args[0]
assert isinstance(conv_node, Node)
if (
conv_node.op != "call_function"
or conv_node.target != torch.ops.aten.convolution.default
):
return
if _is_annotated([relu_node, getitem_node, bn_node, conv_node]):
return
input_qspec_map = {}
input_act = conv_node.args[0]
assert isinstance(input_act, Node)
input_qspec_map[input_act] = get_act_obs_or_fq_ctr(quantization_config)
weight = conv_node.args[1]
assert isinstance(weight, Node)
input_qspec_map[weight] = get_weight_obs_or_fq_ctr(quantization_config)
bias = conv_node.args[2]
if isinstance(bias, Node):
input_qspec_map[bias] = get_bias_obs_or_fq_ctr(quantization_config)
conv_node.meta["quantization_annotation"] = QuantizationAnnotation(
input_qspec_map=input_qspec_map,
_annotated=True
)
bn_node.meta["quantization_annotation"] = QuantizationAnnotation(
_annotated=True
)
getitem_node.meta["quantization_annotation"] = QuantizationAnnotation(
_annotated=True
)
relu_node.meta["quantization_annotation"] = QuantizationAnnotation(
output_qspec=get_act_obs_or_fq_ctr(quantization_config), # type: ignore[arg-type]
_annotated=True
)
def _annotate_conv2d_relu(
self, node: Node, quantization_config: QuantizationConfig
) -> None:
if node.op != "call_function" or node.target not in [
torch.ops.aten.relu_.default,
torch.ops.aten.relu.default,
]:
return
relu_node = node
conv_node = relu_node.args[0]
assert isinstance(conv_node, Node)
if (
conv_node.op != "call_function"
or conv_node.target != torch.ops.aten.convolution.default
):
return
if _is_annotated([relu_node, conv_node]):
return
input_qspec_map = {}
input_act = conv_node.args[0]
assert isinstance(input_act, Node)
input_qspec_map[input_act] = get_act_obs_or_fq_ctr(quantization_config)
weight = conv_node.args[1]
assert isinstance(weight, Node)
input_qspec_map[weight] = get_weight_obs_or_fq_ctr(quantization_config)
bias = conv_node.args[2]
if isinstance(bias, Node):
input_qspec_map[bias] = get_bias_obs_or_fq_ctr(quantization_config)
conv_node.meta["quantization_annotation"] = QuantizationAnnotation(
input_qspec_map=input_qspec_map,
_annotated=True
)
relu_node.meta["quantization_annotation"] = QuantizationAnnotation(
output_qspec=get_act_obs_or_fq_ctr(quantization_config), # type: ignore[arg-type]
_annotated=True
)
def _annotate_conv2d(
self, node: Node, quantization_config: QuantizationConfig
) -> None:
conv_node = node
if (
conv_node.op != "call_function"
or conv_node.target != torch.ops.aten.convolution.default
):
return
# skip annotation if it is already annotated
if _is_annotated([conv_node]):
return
input_qspec_map = {}
input_act = conv_node.args[0]
assert isinstance(input_act, Node)
input_qspec_map[input_act] = get_act_obs_or_fq_ctr(quantization_config)
weight = conv_node.args[1]
assert isinstance(weight, Node)
input_qspec_map[weight] = get_weight_obs_or_fq_ctr(quantization_config)
bias = conv_node.args[2]
if isinstance(bias, Node):
input_qspec_map[bias] = get_bias_obs_or_fq_ctr(quantization_config)
conv_node.meta["quantization_annotation"] = QuantizationAnnotation(
input_qspec_map=input_qspec_map,
output_qspec=get_act_obs_or_fq_ctr(quantization_config),
_annotated=True
)
def _annotate_linear(
self, gm: torch.fx.GraphModule, quantization_config: QuantizationConfig
) -> None:
module_partitions = get_source_partitions(
gm.graph, [torch.nn.Linear, torch.nn.functional.linear]
)
for module_or_fn_type, partitions in module_partitions.items():
if module_or_fn_type == torch.nn.Linear:
for p in partitions:
act_node = p.input_nodes[0]
output_node = p.output_nodes[0]
weight_node = None
bias_node = None
for node in p.params:
weight_or_bias = getattr(gm, node.target) # type: ignore[arg-type]
if weight_or_bias.ndim == 2: # type: ignore[attr-defined]
weight_node = node
if weight_or_bias.ndim == 1: # type: ignore[attr-defined]
bias_node = node
if weight_node is None:
raise ValueError("No weight found in Linear pattern")
# find use of act node within the matched pattern
act_use_node = None
for node in p.nodes:
if node in act_node.users: # type: ignore[union-attr]
act_use_node = node
break
if act_use_node is None:
raise ValueError(
"Could not find an user of act node within matched pattern."
)
if _is_annotated([act_use_node]) is False: # type: ignore[list-item]
_annotate_input_qspec_map(
act_use_node,
act_node,
get_act_obs_or_fq_ctr(quantization_config),
)
if bias_node and _is_annotated([bias_node]) is False:
_annotate_output_qspec(
bias_node, get_bias_obs_or_fq_ctr(quantization_config)
)
if _is_annotated([weight_node]) is False: # type: ignore[list-item]
_annotate_output_qspec(
weight_node, get_weight_obs_or_fq_ctr(quantization_config)
)
if _is_annotated([output_node]) is False:
_annotate_output_qspec(
output_node, get_act_obs_or_fq_ctr(quantization_config)
)
nodes_to_mark_annotated = list(p.nodes)
_mark_nodes_as_annotated(nodes_to_mark_annotated)
# TODO: move to `_pt2e/_propagate_annotation.py` after we have
# decided on the how we want to use pattern matching for annotation
def _annotate_maxpool2d(
self, node: Node, quantization_config: QuantizationConfig
) -> None:
if (
node.op != "call_function"
or node.target != operator.getitem
or node.args[1] != 0
):
return
getitem_node = node
maxpool_node = getitem_node.args[0]
assert isinstance(maxpool_node, Node)
if (
maxpool_node.op != "call_function"
or maxpool_node.target != torch.ops.aten.max_pool2d_with_indices.default
):
return
if _is_annotated([getitem_node, maxpool_node]):
return
input_act = maxpool_node.args[0]
assert isinstance(input_act, Node)
maxpool_node.meta["quantization_annotation"] = QuantizationAnnotation(
input_qspec_map={
input_act: get_act_obs_or_fq_ctr(quantization_config)
},
_annotated=True,
)
getitem_node.meta["quantization_annotation"] = QuantizationAnnotation(
output_qspec=get_act_obs_or_fq_ctr(quantization_config),
_input_output_share_observers=True,
_annotated=True,
)
def _annotate_input_out_obs_sharing_op(
self,
op: Callable,
node: Node,
quantization_config: QuantizationConfig,
) -> None:
io_obs_sharing_node = node
if (
io_obs_sharing_node.op != "call_function"
or io_obs_sharing_node.target != op
):
return
if _is_annotated([io_obs_sharing_node]):
return
input_act = io_obs_sharing_node.args[0]
assert isinstance(input_act, Node)
io_obs_sharing_node.meta["quantization_annotation"] = QuantizationAnnotation(
input_qspec_map={
input_act: get_act_obs_or_fq_ctr(quantization_config)
},
output_qspec=get_act_obs_or_fq_ctr(quantization_config),
_input_output_share_observers=True,
_annotated=True,
)
def _annotate_hardtanh(
self, node: Node, quantization_config: QuantizationConfig
) -> None:
self._annotate_input_out_obs_sharing_op(
torch.ops.aten.hardtanh.default, node, quantization_config
)
def _annotate_mean(
self, node: Node, quantization_config: QuantizationConfig
) -> None:
self._annotate_input_out_obs_sharing_op(
torch.ops.aten.mean.default, node, quantization_config
)
self._annotate_input_out_obs_sharing_op(
torch.ops.aten.mean.dim, node, quantization_config
)
def _annotate_adaptive_avg_pool2d(
self, node: Node, quantization_config: QuantizationConfig
) -> None:
self._annotate_input_out_obs_sharing_op(
torch.ops.aten.adaptive_avg_pool2d.default, node, quantization_config
)
def _annotate_add_relu(
self, node: Node, quantization_config: QuantizationConfig
) -> None:
if node.op != "call_function" or node.target not in [
torch.ops.aten.relu_.default,
torch.ops.aten.relu.default,
]:
return
relu_node = node
add_node = relu_node.args[0]
assert isinstance(add_node, Node)
if add_node.op != "call_function" or add_node.target not in [
torch.ops.aten.add.Tensor,
torch.ops.aten.add_.Tensor,
]:
return
if _is_annotated([relu_node, add_node]):
return
input_qspec_map = {}
input_act0 = add_node.args[0]
if isinstance(input_act0, Node):
input_qspec_map[input_act0] = get_act_obs_or_fq_ctr(quantization_config)
input_act1 = add_node.args[1]
if isinstance(input_act1, Node):
input_qspec_map[input_act1] = get_act_obs_or_fq_ctr(quantization_config)
add_node.meta["quantization_annotation"] = QuantizationAnnotation(
input_qspec_map=input_qspec_map,
_annotated=True,
)
relu_node.meta["quantization_annotation"] = QuantizationAnnotation(
output_qspec=get_act_obs_or_fq_ctr(quantization_config),
_annotated=True,
)
def _annotate_add(
self, node: Node, quantization_config: QuantizationConfig
) -> None:
add_node = node
if add_node.op != "call_function" or add_node.target not in [
torch.ops.aten.add.Tensor,
torch.ops.aten.add_.Tensor,
]:
return
if _is_annotated([add_node]):
return
input_qspec_map = {}
input_act0 = add_node.args[0]
if isinstance(input_act0, Node):
input_qspec_map[input_act0] = get_act_obs_or_fq_ctr(quantization_config)
input_act1 = add_node.args[1]
if isinstance(input_act1, Node):
input_qspec_map[input_act1] = get_act_obs_or_fq_ctr(quantization_config)
add_node.meta["quantization_annotation"] = QuantizationAnnotation(
input_qspec_map=input_qspec_map,
output_qspec=get_act_obs_or_fq_ctr(quantization_config),
_annotated=True,
)
def validate(self, model: torch.fx.GraphModule) -> None:
pass
@classmethod
def get_supported_operators(cls) -> List[OperatorConfig]:
return cls.supported_config_and_operators
| [
"[email protected]"
] | |
3dcb6a3fba7b2c2c8998314caf270f7dc4b3d69c | 824f19d20cdfa26c607db1ff3cdc91f69509e590 | /TopInterviewQuestions/LinkedList/01-Delete-Node.py | e8cc4b8ff1bfc6f65dfa58aa9f76058738818e2a | [] | no_license | almamuncsit/LeetCode | 01d7e32300eebf92ab54c983de6e183242b3c985 | 17aa340649574c37067ec170ceea8d9326be2d6a | refs/heads/master | 2021-07-07T09:48:18.069020 | 2021-03-28T11:26:47 | 2021-03-28T11:26:47 | 230,956,634 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 346 | py | # Definition for singly-linked list.
# class ListNode:
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution:
def deleteNode(self, node):
while node.next.next:
node.val = node.next.val
node = node.next
node.val = node.next.val
node.next = None
| [
"[email protected]"
] | |
dfca92b9a02a0b34ddb02223c46fc05f0ac34303 | e35fd52fe4367320024a26f2ee357755b5d5f4bd | /leetcode/problems/434.number-of-segments-in-a-string.py | 229c119643f381afef999ff54714d595c048b7dc | [] | no_license | liseyko/CtCI | a451967b0a0ce108c491d30b81e88d20ad84d2cd | c27f19fac14b4acef8c631ad5569e1a5c29e9e1f | refs/heads/master | 2020-03-21T14:28:47.621481 | 2019-11-12T22:59:07 | 2019-11-12T22:59:07 | 138,658,372 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 642 | py | #
# @lc app=leetcode id=434 lang=python3
#
# [434] Number of Segments in a String
#
# https://leetcode.com/problems/number-of-segments-in-a-string/description/
#
# algorithms
# Easy (37.34%)
# Total Accepted: 64.1K
# Total Submissions: 171.8K
# Testcase Example: '"Hello, my name is John"'
#
# Count the number of segments in a string, where a segment is defined to be a
# contiguous sequence of non-space characters.
#
# Please note that the string does not contain any non-printable characters.
#
# Example:
#
# Input: "Hello, my name is John"
# Output: 5
#
#
#
class Solution:
def countSegments(self, s: str) -> int:
| [
"[email protected]"
] | |
1ac603767f5fde5c05e1576e3f1e35df16a53af1 | 63e0bc889563192a602463e662121058a4da30b5 | /Smart.py | 4c26367cb520a58c7477ccfe077c736086992b6b | [] | no_license | adaptiveUK/rhinopythonscripts | 11accd3048caad7b69024db55e5847acb3c7feb6 | bdd260e7f7257de54a4fac25a465dcdafff3b68c | refs/heads/master | 2021-01-17T08:38:26.863981 | 2011-07-25T19:07:29 | 2011-07-25T19:07:29 | 3,888,627 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,736 | py | '''A module for wrapping geometry with UserString and Attribute Dictionaries'''
import Rhino
class SmartFeature(object):
def __init__(self, rhinoObjectOrTuple):
self._parseConstructor(rhinoObjectOrTuple)
def _parseConstructor(self, rhinoObjectOrTuple):
# determine if it is a tuple
kind = type(rhinoObjectOrTuple)
if kind == tuple or kind == list:
# build from geom, user string pair
pair = rhinoObjectOrTuple
self.geom = self._filterGeometry(pair[0]) # geometry
self.attributes = pair[1] # properties (as dictionary)
else: # assume RhinoObject
rhObj = rhinoObjectOrTuple
self.geom = self._filterGeom(rhObj.Geometry)
self.attributes = {}
numAtts = rhObj.Attributes.UserStringCount
rawAtts = rhObj.Attributes.GetUserStrings()
keys = rawAtts.AllKeys
for key in keys:
self.attributes[key] = rhObj.Attributes.GetUserString(key)
def _filterGeom(self, geometry):
if type(geometry) == Rhino.Geometry.Point:
return geometry.Location
else:
return geometry
def objAttributes(self, objectAttributes):
for key in self.attributes:
objectAttributes.SetUserString(key, self.attributes[key])
return objectAttributes
def RhinoObjectsToSmartFeatures(RhinoObjectList):
return [SmartFeature(obj) for obj in RhinoObjectList]
def replaceGeometries(smartFeatures, geometries):
out = []
for i in range(len(smartFeatures)):
feature = smartFeatures[i]
geometry = geometries[i]
feature.geom = geometry
out.append(feature)
return out
| [
"[email protected]"
] | |
c8aa00a8afba3954be9744854afed97a99745d3f | 75dcb56e318688499bdab789262839e7f58bd4f6 | /_algorithms_challenges/codewar/_Codewars-Solu-Python-master/src/kyu7_Linked_Lists-Move_Node.py | 4367bf1aeaa9de6050ecb664223c5ff2f974bf3a | [] | no_license | syurskyi/Algorithms_and_Data_Structure | 9a1f358577e51e89c862d0f93f373b7f20ddd261 | 929dde1723fb2f54870c8a9badc80fc23e8400d3 | refs/heads/master | 2023-02-22T17:55:55.453535 | 2022-12-23T03:15:00 | 2022-12-23T03:15:00 | 226,243,987 | 4 | 1 | null | 2023-02-07T21:01:45 | 2019-12-06T04:14:10 | Jupyter Notebook | UTF-8 | Python | false | false | 2,573 | py | class Node(object):
def __init__(self, data):
self.data = data
self.next = None
class Context(object):
def __init__(self, source, dest):
self.source = source
self.dest = dest
class Solution():
"""
https://www.codewars.com/kata/linked-lists-move-node
Linked Lists - Move Node
Write a MoveNode() function which takes the node from the front of the source list and
moves it to the front of the destintation list. You should throw an error when the source list is empty.
For simplicity, we use a Context object to store and return the state of the two linked lists.
A Context object containing the two mutated lists should be returned by moveNode.
MoveNode() is a handy utility function to have for later problems.
JavaScript
var source = 1 -> 2 -> 3 -> null
var dest = 4 -> 5 -> 6 -> null
moveNode(source, dest).source === 2 -> 3 -> null
moveNode(source, dest).dest === 1 -> 4 -> 5 -> 6 -> null
Python
source = 1 -> 2 -> 3 -> None
dest = 4 -> 5 -> 6 -> None
move_node(source, dest).source == 2 -> 3 -> None
move_node(source, dest).dest == 1 -> 4 -> 5 -> 6 -> None
Ruby
source = 1 -> 2 -> 3 -> nil
dest = 4 -> 5 -> 6 -> nil
move_node(source, dest).source == 2 -> 3 -> nil
move_node(source, dest).dest == 1 -> 4 -> 5 -> 6 -> nil
The push() and buildOneTwoThree() functions need not be redefined.
There is another kata called Linked Lists - Move Node In-place that is related but more difficult.
Related Kata in order of expected completion (increasing difficulty):
Linked Lists - Push & BuildOneTwoThree
Linked Lists - Length & Count
Linked Lists - Get Nth Node
Linked Lists - Insert Nth Node
Linked Lists - Sorted Insert
Linked Lists - Insert Sort
Linked Lists - Append
Linked Lists - Remove Duplicates
Linked Lists - Move Node
Linked Lists - Move Node In-place
Linked Lists - Alternating Split
Linked Lists - Front Back Split
Linked Lists - Shuffle Merge
Linked Lists - Sorted Merge
Linked Lists - Merge Sort
Linked Lists - Sorted Intersect
Linked Lists - Iterative Reverse
Linked Lists - Recursive Reverse
Inspired by Stanford Professor Nick Parlante's excellent Linked List teachings.
"""
def __init__(self):
pass
def move_node_01(self, source, dest):
if not source:
raise ValueError
node = source
source = source.next
node.next = dest
return Context(source, node) | [
"[email protected]"
] | |
c3e539c4bf3ff081920dd8d7384b3aab42f9f2aa | c65d512975feed7dfe74f1117cdd1337293d9d60 | /python/my_py_notes_万物皆对象/db_and_数据持久化/Mysql/mysql与python交互/py_mySQL.py | 457399daaab72146341157a493068ef91fee16ba | [] | no_license | Rockyzsu/StudyRepo | e5c6420e325917c2df7dc51d606be5fa3c2ee1b8 | 385785c09bebb56df156fd149a088043f38d0aab | refs/heads/master | 2022-12-09T13:45:38.332899 | 2020-09-15T09:56:09 | 2020-09-15T09:56:09 | 295,388,871 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 799 | py | # -*- coding:utf-8 -*-
# file: PyMySQL.py
#
import MySQLdb # 导入MySQLdb模块
db = MySQLdb.connect(host='localhost', # 连接到数据库,服务器为本机
user='root', # 用户为root
passwd='root654321', # 密码为root654321
db='python') # 数据库名为python
cur = db.cursor() # 获得数据库游标
cur.execute('insert into people (name,age,sex) values (\'Jee\',21,\'F\')') # 执行SQL语句
r = cur.execute('delete from people where age=20') # 执行SQL语句
r = cur.execute('select * from people') # 执行SQL语句
db.commit() # 提交事务
r = cur.fetchall() # 获取数据
print(r) # 输出数据
cur.close() # 关闭游标
db.close() # 关闭数据库连接 | [
"[email protected]"
] | |
43fddc8268d67792feed4cbae6473c1f9b58a178 | 15f321878face2af9317363c5f6de1e5ddd9b749 | /solutions_python/Problem_210/72.py | 8b87687f01a56f0ea2dfa277b816f22c349a561c | [] | no_license | dr-dos-ok/Code_Jam_Webscraper | c06fd59870842664cd79c41eb460a09553e1c80a | 26a35bf114a3aa30fc4c677ef069d95f41665cc0 | refs/heads/master | 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,170 | py | #!/usr/bin/env python
import sys
import numpy as np
T = int(raw_input())
for t in xrange(T):
# solve the input
Ac, Aj = raw_input().strip().split()
Ac = int(Ac)
Aj = int(Aj)
activities = [] # start, end, duration, who
ctotal = 0
jtotal = 0
C = np.zeros(Ac)
D = np.zeros(Ac)
for c in xrange(Ac):
cc, dc = raw_input().strip().split()
C[c] = int(cc)
D[c] = int(dc)
activities.append((C[c], D[c], D[c]-C[c], "C"))
ctotal += D[c] - C[c]
J = np.zeros(Aj)
K = np.zeros(Aj)
for j in xrange(Aj):
jj, kj = raw_input().strip().split()
J[j] = int(jj)
K[j] = int(kj)
activities.append((J[j], K[j], K[j]-J[j], "J"))
jtotal += K[j] - J[j]
activities.sort()
cremain = 720 - ctotal
jremain = 720 - jtotal
result = 0
if Aj + Ac == 1:
# only one activity, so only two changes
print "Case #{0}: {1}".format(t+1, 2)
continue
# at least two activities, so at least two proper gaps, and first != last
### FIND ALL GAPS AND THEIR TYPES, FIRST AND LAST
first = activities[0][3]
last = activities[-1][3]
gaps = []
prev = activities[0]
for i in xrange(1, len(activities)):
curr = activities[i]
gaps.append((prev[3] + curr[3], curr[0] - prev[1])) # type of gap, length of gap
prev = curr
# add the last gap
gaps.append((last + first, (1440 - activities[-1][1]) + activities[0][0]))
# sort the gaps, then deal with them separately
gaps.sort()
# start with CC gaps
i = 0
while i < len(gaps) and gaps[i][0] == 'CC':
gap = gaps[i]
if gap[1] <= cremain:
# gap is filled in
cremain -= gap[1]
else:
# cannot fill in the gap, so need 2 changes
result += 2
i += 1
# deal with CJ and JC gaps
while i < len(gaps) and (gaps[i][0] == 'CJ' or gaps[i][0] == 'JC'):
result += 1
i += 1
# deal with JJ gaps
while i < len(gaps) and gaps[i][0] == 'JJ':
gap = gaps[i]
if gap[1] <= jremain:
# gap is filled in
jremain -= gap[1]
else:
# cannot fill in the gap, so need 2 changes
result += 2
i += 1
# this is hopefully all
print "Case #{0}: {1}".format(t+1, result)
| [
"[email protected]"
] | |
d335c1953908c703540fee6892011ac539fd127d | 7b12eb45c1ea76ad9c186b858b5dfebf2c5b862a | /.history/DEBER_20210831114343.py | 1bb3ad9fa1d068428284e79f3e43f05e552a0685 | [
"MIT"
] | permissive | Alopezm5/PROYECTO-PARTE-1 | a1dce04009b24852c1c60e69bdf602ad3af0574b | bd7a8594edf08d41c6ca544cf6bac01ea4fcb684 | refs/heads/main | 2023-07-25T11:22:17.994770 | 2021-09-07T03:27:34 | 2021-09-07T03:27:34 | 403,670,226 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,536 | py | class Nomina:
def __init__(self,nom="",ruc=0,dire="",tele=0,ciud="",tipEmpr="",email="",estado="",profe="",dep=""):#3
self.nombre=nom
self.ruc=ruc
self.direccion=dire
self.telefono=tele
self.ciudad=ciud
self.tipoEmpresa=tipEmpr
self.correo=email
self.estadocivil=estado
self.profesion=profe
self.departamento=dep
class Empresa(Nomina):
def datosEmpresa(self):#3
self.nombre=input("Ingresar nombre de la empresa: ")
self.ruc=int(input("Ingresar ruc de la empresa: "))
self.direccion=input("Ingresar la direccion de la empresa: ")
self.telefono=int(input("Ingresar el numero de telefono de la empresa: "))
self.ciudad=input("Ingresar ciudad donde esta la empresa: ")
self.tipoEmpresa=input("Ingresar tipo de empresa publica o privada: ")
def mostrarEmpresa(self):
print("Datos de la Empresa")
print("La empresa "{}"\n De RUC #{} \n Está ubicada en {}\n Se puede comunicar al #{}\n Está empresa esta en la ciudad de "{}"\n Es una entidad "{}"".format(self.nombre,self.ruc,self.direccion, self.telefono,self.ciudad, self.tipoEmpresa))
class Departamento(Empleado):
def departa(self):
self.departamento=input("Ingresar el departamento al que pertenece el empleado: ")
def mostrarDeparta(self):
print("El empleado pertenece al departamento de: "{}"".format(self.departamento))
class Empleado(Nomina):
def empleado(self):
self.nombre=input("Ingresar nombre del empleado: ")
self.cedula=int(input("Ingresar numero de cedula: "))
self.direccion=input("Ingresar la direccion del empleado: ")
self.telefono=int(input("Ingresar numero de contacto del empleado: "))
self.correo=input("Ingresar correo personal del empleado: ")
def empleadoObrero(self):
self.estadocivil=input("Ingresar estado civil del empleado: ")
def empleadoOficina(self):#falta dos atributo como definicion de oficina
self.profesion=input("Ingresar profesion del empleado: ")
def mostrarempleado(self):
print("El empleado: {} con # de ")
if eleccion==1:
print(self.estadocivil)
elif eleccion==2:
print(self.profesion)
# class Pagos():
# def __init__(self):
# pass
# def pagoNormal(self, valhora,hoesti,hotraba, desc, desper):
# self.valorhora=valhora
# self.horaestimada=hoesti
# self.horastrabajadas=hotraba
# self.descuentos=desc
# self.permisos=desper
# def pagoExtra(self, valhora,hoesti,hotraba,incentivos):
# self.valorhora=valhora
# self.horaestimada=hoesti
# self.horastrabajadas=hotraba
# self.bono=incentivos
# def Nomina(self, nom, valhora,hoesti,hotraba, desc, desper,incentivos):#faltan 8 atributos incluir cosas del empleado y sobretiempo
# self.nombre= nom
# self.valorhora=valhora
# self.horaestimada=hoesti
# self.horastrabajadas=hotraba
# self.descuentos=desc
# self.permisos=desper
# self.bono=incentivos
nom=Nomina()
emp=Empresa()
emp.datosEmpresa()
emp.mostrarEmpresa()
# emple=Empleado()
# emple.empleado()
# eleccion=int(input("Va a ingresar un empleado tipo 1. Obreo o 2.Oficina: "))
# if eleccion==1:
# emple.empleadoObrero()
# elif eleccion==2:
# emple.empleadoOficina()
# else:
# print("No selecciono el tipo de empleado")
# emple.mostrarempleado() | [
"[email protected]"
] | |
2af5c8223fc344d1baaffd129038607c4fdce3a2 | 9d4c84a3ddee3c53bfb85b7e576be97f727caca0 | /iso_two_param/optimization_blue_cv04.py | a998f3794d796a338806c045f2957cc9bb25c077 | [
"MIT"
] | permissive | cjekel/inv_bubble_opt | 386cca2c009bf1c97007c14bcbf9cd5df482afe3 | 3ecd57ee91ee09ac38319d151adbd8e9c3b9a1bb | refs/heads/master | 2023-08-15T04:26:09.702395 | 2021-08-14T01:10:49 | 2021-08-14T01:10:49 | 166,838,347 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,325 | py | # MIT License
# Copyright (c) 2019 Charles Jekel
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import numpy as np
import invbubble
import os
from scipy.optimize import fmin_l_bfgs_b
if __name__ == "__main__":
invbubble.delete_files()
# load the test data
homeuser = os.path.expanduser('~')
blue00 = np.load(os.path.join(homeuser, 'blue00.npy'),
allow_pickle=True)
blue01 = np.load(os.path.join(homeuser, 'blue01_rotated_90.npy'),
allow_pickle=True)
blue02 = np.load(os.path.join(homeuser, 'blue02_rotated_90.npy'),
allow_pickle=True)
blue03 = np.load(os.path.join(homeuser, 'blue03.npy'),
allow_pickle=True)
test_data = [blue00, blue01, blue02]
# initialize a maximum objective value
max_obj = 30.0 # mm
opt_hist_file = 'iso04r00.csv'
header = ['E', 'G', 'OBJ', 'Success']
my_opt = invbubble.BubbleOpt(opt_hist_file, header, max_obj,
None, None,
test_data=test_data,
weights=[1.0, 1.0, 1.0],
mat_model='iso-two')
np.random.seed(121)
my_bounds = np.zeros((2, 2))
my_bounds[0, 0] = 0.12
my_bounds[0, 1] = 0.25
my_bounds[1, 0] = 0.2
my_bounds[1, 1] = 0.9
X = np.array([[0.166, 0.60],
[0.155, 0.52],
[0.193, 0.67],
[0.167, 0.56],
[0.198, 0.7]])
xres = np.zeros_like(X)
fres = np.zeros(5)
for i, x0 in enumerate(X):
res = fmin_l_bfgs_b(my_opt.calc_obj_function_test_data, x0,
approx_grad=True, bounds=my_bounds, factr=1e12,
pgtol=1e-06, epsilon=1e-3, iprint=1, m=10000,
maxfun=200, maxiter=10, maxls=20)
xres[i] = res[0]
fres[i] = res[1]
# find the best result
best_ind = np.argmin(fres)
message = '\nBest result: \n' + str(fres[best_ind]) + """\n
Best values: \n""" + str(xres[best_ind]) + """\n
The full result: \n""" + str(fres) + """\n
Full values: \n""" + str(xres)
print(message)
invbubble.send_email('[email protected]', 'iso blue cv 04 done', message)
| [
"[email protected]"
] | |
fee00f670adab1b0c03e332059c2a4409748e8a6 | 9b483d42da47237d28a9f80c378aba412b89f5b1 | /special/cookie.py | 59a4301e436c65e01d0d8954723bc8de92163246 | [] | no_license | smartree/Tentacle | b0c83b671c1abe26338125d672d77b277abd2b28 | a53e046f3434bf1ac4b606ba7dfe951d9b7f5464 | refs/heads/master | 2020-05-23T01:25:57.576494 | 2019-05-09T15:33:00 | 2019-05-09T15:33:00 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,776 | py | #!/usr/bin/env python
# -*- coding:utf-8 -*-
'''
Check header' cookies secure, e.g. httponly, secure and so on.
'''
from re import findall
from re import search
from re import I
def info(data=None):
info = {
"name": "cookie",
"info": "cookie",
"level": "low",
"type": "info"
}
return info
def _plus(data, info,key = "cookie"):
data['flag'] = 1
data['res'].append({"info": info, "key": key})
return data
def prove(data):
data = init(data, 'web')
if data['url']:
try:
headers = curl('get',data['url']).headers
if 'cookies' in headers.keys():
cookies = headers['cookies'],
if not search(r'secure;', cookies, I):
data = _plus(data,'Cookie without Secure flag set')
if not search(r'httponly;', cookies, I):
data = _plus(data, 'Cookie without HttpOnly flag set')
if search(r'domain\=\S*', cookies, I):
domain = findall(r'domain\=(.+?);', headers, I)
if domain:
data = _plus(data, 'Session Cookie are valid only at Sub/Domain: %s' % domain[0])
if search(r'path\=\S*', cookies, I):
path = findall(r'path\=(.+?);', headers, I)
if path:
data = _plus(data, 'Session Cookie are valid only on that Path: %s' % path[0])
if search(r'(.+?)\=\S*;', cookies, I):
cookie_sessions = findall(r'(.+?)\=\S*;', headers, I)
for cs in cookie_sessions:
if cs not in ['domain', 'path', 'expires']:
data = _plus(data, 'Cookie Header contains multiple cookies')
break
if 'x-xss-protection' not in headers.keys():
data = _plus(data, 'X-XSS-Protection header missing','x-xss-protection')
if 'x-frame-options' not in headers:
data = _plus(data, 'Clickjacking: X-Frame-Options header missing','x-frame-options')
if 'content-type' not in headers:
data = _plus(data, 'Content-Type header missing','content-type')
if 'strict-transport-security' not in headers:
data = _plus(data, 'Strict-Transport-Security header missing','strict-transport-security')
if 'x-content-type-options' not in headers:
data = _plus(data, 'X-Content-Type-Options header missing','x-content-type-options')
except :
pass
return data
if __name__=='__main__':
from script import init, curl
print(prove({'target_host':'www.baidu.com','target_port': 22,'flag':-1,'data':[],'res':[]})) | [
"[email protected]"
] | |
f53cbfea41a1a703382d2eccb5c4074a2795e9e5 | 1635e722e7ede72f4877671f36bbbc4199abae81 | /ecosoft-addons/advance_and_additional_discount/sale.py | 227a9eaf76f4874a626c54c5534aae8deb8f788e | [] | no_license | ecosoft-odoo/sqp | 7c09617048091ac6de4b25a33ad88127d36de452 | 7a7fc6b88087d98d536dd4ec39f9fb572918090e | refs/heads/master | 2023-08-08T00:07:48.405000 | 2023-08-04T15:47:43 | 2023-08-04T15:47:43 | 40,047,976 | 3 | 9 | null | 2023-08-02T08:38:53 | 2015-08-01T13:48:54 | Python | UTF-8 | Python | false | false | 10,548 | py | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
import openerp.addons.decimal_precision as dp
from openerp.tools.translate import _
from common import AdditionalDiscountable
import types
class sale_order(AdditionalDiscountable, osv.osv):
_inherit = 'sale.order'
_tax_column = 'tax_id'
_line_column = 'order_line'
def _invoiced_rate(self, cursor, user, ids, name, arg, context=None):
res = {}
for sale in self.browse(cursor, user, ids, context=context):
if sale.invoiced:
res[sale.id] = 100.0
continue
tot = 0.0
for invoice in sale.invoice_ids:
if invoice.state not in ('cancel'):
#if invoice.state not in ('draft', 'cancel'):
# Do not add amount, it this is a deposit/advance
#tot += not invoice.is_deposit and not invoice.is_advance and invoice.amount_net # kittiu: we use amount_net instead of amount_untaxed
# We change from amount_net back to amount_untaxed again, due to case #2059 (may need to double check)
tot += not invoice.is_deposit and not invoice.is_advance and invoice.amount_untaxed
if tot:
res[sale.id] = min(100.0, round(tot * 100.0 / (sale.amount_untaxed or 1.00))) # <-- changed back to untaxed
else:
res[sale.id] = 0.0
return res
def _num_invoice(self, cursor, user, ids, name, args, context=None):
'''Return the amount still to pay regarding all the payment orders'''
if not ids:
return {}
res = dict.fromkeys(ids, False)
cursor.execute('SELECT rel.order_id ' \
'FROM sale_order_invoice_rel AS rel, account_invoice AS inv ' + \
'WHERE rel.invoice_id = inv.id AND inv.state <> \'cancel\' And rel.order_id in (%s)' % ','.join(str(x) for x in ids))
invs = cursor.fetchall()
for inv in invs:
res[inv[0]] += 1
return res
def _amount_all(self, *args, **kwargs):
return self._amount_all_generic(sale_order, *args, **kwargs)
def _get_amount_retained(self, cr, uid, ids, field_names, arg, context=None):
if context is None:
context = {}
res = {}
currency_obj = self.pool.get('res.currency')
sale_obj = self.pool.get('sale.order')
# Account Retention
prop = self.pool.get('ir.property').get(cr, uid, 'property_account_retention_customer', 'res.partner', context=context)
prop_id = prop and prop.id or False
account_id = self.pool.get('account.fiscal.position').map_account(cr, uid, False, prop_id)
if not account_id:
for id in ids:
res[id] = 0.0
else:
for id in ids:
order = sale_obj.browse(cr, uid, id)
cr.execute("""select sum(l.debit-l.credit) as amount_debit
from account_move_line l
inner join
(select order_id, move_id from account_invoice inv
inner join sale_order_invoice_rel rel
on inv.id = rel.invoice_id and order_id = %s) inv
on inv.move_id = l.move_id
where state = 'valid'
and account_id = %s
group by order_id
""", (order.id, account_id))
amount_debit = cr.rowcount and cr.fetchone()[0] or 0.0
amount = currency_obj.compute(cr, uid, order.company_id.currency_id.id, order.pricelist_id.currency_id.id, amount_debit)
res[order.id] = amount
return res
_columns = {
'invoiced_rate': fields.function(_invoiced_rate, string='Invoiced Ratio', type='float'),
# Additional Discount Feature
'add_disc': fields.float('Additional Discount(%)', digits_compute=dp.get_precision('Additional Discount'), readonly=True, states={'draft': [('readonly', False)]}),
'add_disc_amt': fields.function(_amount_all, method=True, digits_compute=dp.get_precision('Account'), string='Additional Disc Amt',
store=True, multi='sums', help="The additional discount on untaxed amount."),
'amount_untaxed': fields.function(_amount_all, method=True, digits_compute=dp.get_precision('Account'), string='Untaxed Amount',
store=True, multi='sums', help="The amount without tax."),
'amount_net': fields.function(_amount_all, method=True, digits_compute=dp.get_precision('Account'), string='Net Amount',
store=True, multi='sums', help="The amount after additional discount."),
'amount_tax': fields.function(_amount_all, method=True, digits_compute=dp.get_precision('Account'), string='Taxes',
store=True, multi='sums', help="The tax amount."),
'amount_total': fields.function(_amount_all, method=True, digits_compute=dp.get_precision('Account'), string='Total',
store=True, multi='sums', help="The total amount."),
# Advance Feature
'num_invoice': fields.function(_num_invoice, string="Number invoices created", store=False),
'advance_type': fields.selection([('advance', 'Advance on 1st Invoice'), ('deposit', 'Deposit on 1st Invoice')], 'Advance Type',
required=False, help="Deposit: Deducted full amount on the next invoice. Advance: Deducted in percentage on all following invoices."),
'advance_percentage': fields.float('Advance (%)', digits=(16, 6), required=False, readonly=True),
'amount_deposit': fields.float('Deposit Amount', readonly=True, digits_compute=dp.get_precision('Account')),
# Retention Feature
'retention_percentage': fields.float('Retention (%)', digits=(16, 6), required=False, readonly=True),
'amount_retained': fields.function(_get_amount_retained, string='Retained Amount', type='float', readonly=True, digits_compute=dp.get_precision('Account'))
#'amount_retained': fields.float('Retained Amount',readonly=True, digits_compute=dp.get_precision('Account'))
}
_defaults = {
'add_disc': 0.0,
}
def copy(self, cr, uid, id, default=None, context=None):
if not default:
default = {}
default.update({
'advance_type': False,
'amount_deposit': False,
'advance_percentage': False,
'retention_percentage': False,
})
return super(sale_order, self).copy(cr, uid, id, default, context=context)
def action_invoice_create(self, cr, uid, ids, grouped=False, states=None, date_invoice=False, context=None):
"""Add a discount in the invoice after creation, and recompute the total
"""
order = self.browse(cr, uid, ids[0], context=context)
inv_obj = self.pool.get('account.invoice')
# create the invoice
inv_id = super(sale_order, self).action_invoice_create(cr, uid, ids, grouped, states, date_invoice, context=context)
# modify the invoice
inv_obj.write(cr, uid, [inv_id], {'add_disc': order.add_disc or 0.0,
'name': order.client_order_ref or ''},
context)
inv_obj.button_compute(cr, uid, [inv_id])
return inv_id
def _prepare_invoice(self, cr, uid, order, lines, context=None):
invoice_line_obj = self.pool.get('account.invoice.line')
results = invoice_line_obj.read(cr, uid, lines, ['id', 'is_advance', 'is_deposit'])
for result in results:
if result['is_advance']: # If created for advance, remove it.
lines.remove(result['id'])
if result['is_deposit']: # If created for deposit, remove it.
lines.remove(result['id'])
res = super(sale_order, self)._prepare_invoice(cr, uid, order, lines, context=context)
return res
def _check_tax(self, cr, uid, ids, context=None):
# For Advance or Deposit case, loop through each lines, check if tax different.
if not isinstance(ids, types.ListType): # Make it a list
ids = [ids]
orders = self.browse(cr, uid, ids, context=context)
for order in orders:
if order.advance_type in ['advance', 'deposit']:
i = 0
tax_ids = []
for line in order.order_line:
next_line_tax_id = [x.id for x in line.tax_id]
if i > 0 and set(tax_ids) != set(next_line_tax_id):
raise osv.except_osv(
_('Advance/Deposit!'),
_('You cannot create lines with different taxes!'))
tax_ids = next_line_tax_id
i += 1
return True
def write(self, cr, uid, ids, vals, context=None):
if not isinstance(ids, list):
ids = [ids]
res = super(sale_order, self).write(cr, uid, ids, vals, context=context)
self._check_tax(cr, uid, ids, context=context)
return res
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| [
"[email protected]"
] | |
48c70164518cd8c13035be54a8fd4613798d6f4b | 574a23f57daec3d462967e30ff808779127dc839 | /herle_inventarios/inventarios/serializers.py | 42469033e71751845727f0149bc8dc594c449395 | [] | no_license | RedGranatum/herleBackEnd | 18d3aecf75eb0d349470747f3fca4dbfd1581e80 | 8f21a7f7d0c2d3fdf3ae52eab6b31cbea7d3da97 | refs/heads/master | 2023-01-07T07:32:17.725947 | 2020-12-15T00:23:38 | 2020-12-15T00:23:38 | 57,180,381 | 0 | 0 | null | 2022-12-26T20:22:35 | 2016-04-27T03:25:14 | Python | UTF-8 | Python | false | false | 618 | py | from rest_framework import serializers
from .models import Inventario
class InventarioSerializer(serializers.ModelSerializer):
class Meta:
model = Inventario
fields = ("id","compra_detalle","invoice_compra","material","calibre","ancho","largo",
"codigo_producto","num_rollo","peso_kg","peso_lb","transporte","pais",
"precio_libra","factor","precio_dolar","factor_impuesto","con_comercializadora",
"porc_comercializadora", "factor_kilos","valor_kilo_dolar","valor_tonelada_dolar","valor_kilo_pesos",
"valor_final_kilo_pesos","descripcion","comentarios","precio_tonelada_dolar")
| [
"[email protected]"
] | |
59ac6c1ca8c6b389889458634600394990b5dc69 | 1f62195fb1960c6bddb38343adbe41c0497e40bc | /torchrl/utils/gym_wrappers/atari_wrappers.py | 14d18b1a90860b41d033edaf180e5bd29d3c4bef | [
"MIT"
] | permissive | alyssonmazoni/torchrl | 800ec186893607adac14c38c39c1d36f3488d3d8 | 75e82f073b7234432b491a21e5083bc55e3e985a | refs/heads/master | 2020-03-16T06:12:43.733257 | 2018-05-05T02:37:50 | 2018-05-05T02:37:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,291 | py | '''
Copied from:
https://github.com/openai/baselines/blob/master/baselines/common/atari_wrappers.py
'''
import numpy as np
import gym
from gym import spaces
import cv2
def atari_wrap(env, frame_skip=4, noop_max=30):
assert 'NoFrameskip' in env.spec.id
env = EpisodicLifeEnv(env)
env = NoopResetEnv(env, noop_max=noop_max)
env = MaxAndSkipEnv(env, skip=frame_skip)
if 'FIRE' in env.unwrapped.get_action_meanings():
env = FireResetEnv(env)
env = WarpFrame(env)
env = ClipRewardEnv(env)
return env
class NoopResetEnv(gym.Wrapper):
def __init__(self, env, noop_max=30):
"""Sample initial states by taking random number of no-ops on reset.
No-op is assumed to be action 0.
"""
gym.Wrapper.__init__(self, env)
self.noop_max = noop_max
self.override_num_noops = None
self.noop_action = 0
assert env.unwrapped.get_action_meanings()[0] == 'NOOP'
def _reset(self, **kwargs):
""" Do no-op action for a number of steps in [1, noop_max]."""
self.env.reset(**kwargs)
if self.override_num_noops is not None:
noops = self.override_num_noops
else:
noops = self.unwrapped.np_random.randint(1, self.noop_max + 1) #pylint: disable=E1101
assert noops > 0
obs = None
for _ in range(noops):
obs, _, done, _ = self.env.step(self.noop_action)
if done:
obs = self.env.reset(**kwargs)
return obs
class FireResetEnv(gym.Wrapper):
def __init__(self, env):
"""Take action on reset for environments that are fixed until firing."""
gym.Wrapper.__init__(self, env)
assert env.unwrapped.get_action_meanings()[1] == 'FIRE'
assert len(env.unwrapped.get_action_meanings()) >= 3
def _reset(self, **kwargs):
self.env.reset(**kwargs)
obs, _, done, _ = self.env.step(1)
if done:
self.env.reset(**kwargs)
obs, _, done, _ = self.env.step(2)
if done:
self.env.reset(**kwargs)
return obs
class EpisodicLifeEnv(gym.Wrapper):
def __init__(self, env):
"""Make end-of-life == end-of-episode, but only reset on true game over.
Done by DeepMind for the DQN and co. since it helps value estimation.
"""
gym.Wrapper.__init__(self, env)
self.lives = 0
self.was_real_done = True
def _step(self, action):
obs, reward, done, info = self.env.step(action)
self.was_real_done = done
# check current lives, make loss of life terminal,
# then update lives to handle bonus lives
lives = self.env.unwrapped.ale.lives()
if lives < self.lives and lives > 0:
# for Qbert sometimes we stay in lives == 0 condtion for a few frames
# so its important to keep lives > 0, so that we only reset once
# the environment advertises done.
done = True
self.lives = lives
return obs, reward, done, info
def _reset(self, **kwargs):
"""Reset only when lives are exhausted.
This way all states are still reachable even though lives are episodic,
and the learner need not know about any of this behind-the-scenes.
"""
if self.was_real_done:
obs = self.env.reset(**kwargs)
else:
# no-op step to advance from terminal/lost life state
obs, _, _, _ = self.env.step(0)
self.lives = self.env.unwrapped.ale.lives()
return obs
class MaxAndSkipEnv(gym.Wrapper):
def __init__(self, env, skip=4):
"""Return only every `skip`-th frame"""
gym.Wrapper.__init__(self, env)
# most recent raw observations (for max pooling across time steps)
self._obs_buffer = np.zeros((2, ) + env.observation_space.shape, dtype='uint8')
self._skip = skip
def _step(self, action):
"""Repeat action, sum reward, and max over last observations."""
total_reward = 0.0
done = None
for i in range(self._skip):
obs, reward, done, info = self.env.step(action)
if i == self._skip - 2: self._obs_buffer[0] = obs
if i == self._skip - 1: self._obs_buffer[1] = obs
total_reward += reward
if done:
break
# Note that the observation on the done=True frame
# doesn't matter
max_frame = self._obs_buffer.max(axis=0)
return max_frame, total_reward, done, info
class ClipRewardEnv(gym.RewardWrapper):
def _reward(self, reward):
"""Bin reward to {+1, 0, -1} by its sign."""
return np.sign(reward)
class WarpFrame(gym.ObservationWrapper):
def __init__(self, env):
"""Warp frames to 84x84 as done in the Nature paper and later work."""
gym.ObservationWrapper.__init__(self, env)
self.width = 84
self.height = 84
self.observation_space = spaces.Box(
low=0, high=255, shape=(1, self.height, self.width))
def _observation(self, frame):
frame = cv2.cvtColor(frame, cv2.COLOR_RGB2GRAY)
frame = cv2.resize(frame, (self.width, self.height), interpolation=cv2.INTER_AREA)
return frame[None, :, :]
| [
"[email protected]"
] | |
b5935e769053443d0cf189014e51f82faab401ff | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02785/s130693103.py | ccbb8dc59483b523e40685ea0fb13bcd61a7bf65 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 234 | py | import heapq
(N,K) = map(int,input().split())
h = [int(x)*-1 for x in input().split()]
ans = 0
heapq.heapify(h)
if K <= N:
for i in range(K):
heapq.heappop(h)
while h != []:
ans -= heapq.heappop(h)
print(ans) | [
"[email protected]"
] | |
275a965beacc898d34762e7483c622411e29df6e | 008c0a630ffa5bc412571acef2b7462e22fce196 | /tests/test_console.py | 3ec9d656f2396736bf0840eec5723aff6aeb2a8d | [
"BSD-2-Clause",
"Python-2.0",
"BSD-2-Clause-Views"
] | permissive | Jacktwist/python-tcod | dea56c330f5c27d85e71a2c44074b0b2c4536675 | d271cc9892a6bdcd931f7a9984fffc754170b36f | refs/heads/master | 2020-04-28T12:57:01.653126 | 2019-03-11T01:39:52 | 2019-03-11T01:39:52 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,786 | py | import pickle
import numpy as np
from numpy import array
import pytest
import tcod
@pytest.mark.filterwarnings("ignore:Directly access a consoles")
def test_array_read_write():
console = tcod.console.Console(width=12, height=10)
FG = (255, 254, 253)
BG = (1, 2, 3)
CH = ord('&')
tcod.console_put_char_ex(console, 0, 0, CH, FG, BG)
assert console.ch[0, 0] == CH
assert tuple(console.fg[0, 0]) == FG
assert tuple(console.bg[0, 0]) == BG
tcod.console_put_char_ex(console, 1, 2, CH, FG, BG)
assert console.ch[2, 1] == CH
assert tuple(console.fg[2, 1]) == FG
assert tuple(console.bg[2, 1]) == BG
console.clear()
assert console.ch[1, 1] == ord(' ')
assert tuple(console.fg[1, 1]) == (255, 255, 255)
assert tuple(console.bg[1, 1]) == (0, 0, 0)
ch_slice = console.ch[1, :]
ch_slice[2] = CH
console.fg[1, ::2] = FG
console.bg[...] = BG
assert tcod.console_get_char(console, 2, 1) == CH
assert tuple(tcod.console_get_char_foreground(console, 2, 1)) == FG
assert tuple(tcod.console_get_char_background(console, 2, 1)) == BG
@pytest.mark.filterwarnings("ignore:.")
def test_console_defaults():
console = tcod.console.Console(width=12, height=10)
console.default_bg = [2, 3, 4]
assert console.default_bg == (2, 3, 4)
console.default_fg = (4, 5, 6)
assert console.default_fg == (4, 5, 6)
console.default_bg_blend = tcod.BKGND_ADD
assert console.default_bg_blend == tcod.BKGND_ADD
console.default_alignment = tcod.RIGHT
assert console.default_alignment == tcod.RIGHT
@pytest.mark.filterwarnings("ignore:Parameter names have been moved around,")
@pytest.mark.filterwarnings("ignore:Pass the key color to Console.blit instea")
def test_console_methods():
console = tcod.console.Console(width=12, height=10)
console.put_char(0, 0, ord('@'))
console.print_(0, 0, 'Test')
console.print_rect(0, 0, 2, 8, 'a b c d e f')
console.get_height_rect(0, 0, 2, 8, 'a b c d e f')
console.rect(0, 0, 2, 2, True)
console.hline(0, 1, 10)
console.vline(1, 0, 10)
console.print_frame(0, 0, 8, 8, 'Frame')
console.blit(0, 0, 0, 0, console, 0, 0)
console.blit(0, 0, 0, 0, console, 0, 0, key_color=(0, 0, 0))
console.set_key_color((254, 0, 254))
def test_console_pickle():
console = tcod.console.Console(width=12, height=10)
console.ch[...] = ord('.')
console.fg[...] = (10, 20, 30)
console.bg[...] = (1, 2, 3)
console2 = pickle.loads(pickle.dumps(console))
assert (console.ch == console2.ch).all()
assert (console.fg == console2.fg).all()
assert (console.bg == console2.bg).all()
def test_console_pickle_fortran():
console = tcod.console.Console(2, 3, order='F')
console2 = pickle.loads(pickle.dumps(console))
assert console.ch.strides == console2.ch.strides
assert console.fg.strides == console2.fg.strides
assert console.bg.strides == console2.bg.strides
def test_console_repr():
array # Needed for eval.
eval(repr(tcod.console.Console(10, 2)))
def test_console_str():
console = tcod.console.Console(10, 2)
console.print_(0, 0, "Test")
assert str(console) == ("<Test |\n"
"| >")
def test_console_fortran_buffer():
tcod.console.Console(
width=1,
height=2,
order="F",
buffer=np.zeros((1, 2), order="F", dtype=tcod.console.Console.DTYPE),
)
def test_console_clear():
console = tcod.console.Console(1, 1)
assert console.fg[0, 0].tolist() == [255, 255, 255]
assert console.bg[0, 0].tolist() == [0, 0, 0]
console.clear(fg=(7, 8, 9), bg=(10, 11, 12))
assert console.fg[0, 0].tolist() == [7, 8, 9]
assert console.bg[0, 0].tolist() == [10, 11, 12]
| [
"[email protected]"
] | |
be4111fbce90624d076a4b5716314151ab4cc46e | d4e38b8a1438c0509f3f160a2ceb9aa166ac3ed1 | /quizzes/quiz1/server.py | 1f8c6d93d9cce2d4febe28260c52823c3b82da31 | [] | no_license | yulu9206/cmpe273-spring2018 | e7ffa36818cb87596de351b36d5dc33ec387bf1b | 8e0eaa0ce951b718dae195753dca0d9fc07b5a97 | refs/heads/master | 2021-04-06T11:17:44.918483 | 2018-03-15T20:26:35 | 2018-03-15T20:26:35 | 125,421,797 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 734 | py | import time
import grpc
import ping_pb2
import ping_pb2_grpc
from concurrent import futures
class PingServer(ping_pb2_grpc.PingPongServicer):
def ping(self, request, context):
return ping_pb2.Response(data='Pong')
def run(host, port):
server = grpc.server(futures.ThreadPoolExecutor(max_workers=1))
ping_pb2_grpc.add_PingPongServicer_to_server(PingServer(), server)
server.add_insecure_port('%s:%d' % (host, port))
server.start()
_ONE_DAY_IN_SECONDS = 60 * 60 * 24
try:
while True:
print("Server started at...%d" % port)
time.sleep(_ONE_DAY_IN_SECONDS)
except KeyboardInterrupt:
server.stop(0)
if __name__ == '__main__':
run('0.0.0.0', 3000)
| [
"[email protected]"
] | |
f787f4e893e031beb971965ab9fcd5ad7fea6217 | 15e5b2e39d8f1c08b34b36783cc5504638e3e434 | /TODO-PROJECT/todo-back/todos/migrations/0001_initial.py | 318ee532b77426a5c00753188598859bd9f21d93 | [] | no_license | ghdus4185/Vue_intro | 62e0ded5b4a23ef34869d8e0bb7b337a7e9c934f | b999f19a4140b2456614625f8063ff919b8cdfed | refs/heads/master | 2023-01-13T09:42:15.804784 | 2019-11-21T00:01:38 | 2019-11-21T00:01:38 | 219,454,656 | 0 | 0 | null | 2023-01-07T21:19:32 | 2019-11-04T08:41:36 | HTML | UTF-8 | Python | false | false | 3,393 | py | # Generated by Django 2.2.6 on 2019-11-18 06:18
from django.conf import settings
import django.contrib.auth.models
import django.contrib.auth.validators
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
('auth', '0011_update_proxy_permissions'),
]
operations = [
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')),
('is_superuser', models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status')),
('username', models.CharField(error_messages={'unique': 'A user with that username already exists.'}, help_text='Required. 150 characters or fewer. Letters, digits and @/./+/-/_ only.', max_length=150, unique=True, validators=[django.contrib.auth.validators.UnicodeUsernameValidator()], verbose_name='username')),
('first_name', models.CharField(blank=True, max_length=30, verbose_name='first name')),
('last_name', models.CharField(blank=True, max_length=150, verbose_name='last name')),
('email', models.EmailField(blank=True, max_length=254, verbose_name='email address')),
('is_staff', models.BooleanField(default=False, help_text='Designates whether the user can log into this admin site.', verbose_name='staff status')),
('is_active', models.BooleanField(default=True, help_text='Designates whether this user should be treated as active. Unselect this instead of deleting accounts.', verbose_name='active')),
('date_joined', models.DateTimeField(default=django.utils.timezone.now, verbose_name='date joined')),
('groups', models.ManyToManyField(blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', related_name='user_set', related_query_name='user', to='auth.Group', verbose_name='groups')),
('user_permissions', models.ManyToManyField(blank=True, help_text='Specific permissions for this user.', related_name='user_set', related_query_name='user', to='auth.Permission', verbose_name='user permissions')),
],
options={
'verbose_name': 'user',
'verbose_name_plural': 'users',
'abstract': False,
},
managers=[
('objects', django.contrib.auth.models.UserManager()),
],
),
migrations.CreateModel(
name='Todo',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=100)),
('completed', models.BooleanField(default=False)),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| [
"[email protected]"
] | |
8b07c0d19f6538fff2340e3c06e09aba7bac2636 | 0ca210752cd5b926201f3fb40ee4aadc6da4f537 | /code/test_hd22879.py | bdca611a496161a373724a5b3bd0c099afc339a0 | [
"MIT"
] | permissive | andycasey/precise-objective-differential-spectroscopy | f45f93dfdaeb78d138d7792c439cf7b6065882c5 | 658b0d226300330375570dd2450bb284f1cf167a | refs/heads/master | 2021-01-19T20:24:48.539975 | 2014-07-09T13:41:32 | 2014-07-09T13:41:32 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,015 | py | import cPickle as pickle
from stellar_parameters import Star
from channel import SpectralChannel
class spectrum(object):
pass
import sick
spec = sick.specutils.Spectrum.load("spectra/hermes-sun.fits")
spec = sick.specutils.Spectrum.load("spectra/uvessun1.txt")
blue_channel = spectrum()
blue_channel.dispersion = spec.disp
blue_channel.flux = spec.flux
blue_channel.variance = spec.variance
with open("transitions.pkl", "rb") as fp:
transitions = pickle.load(fp)
with open("sousa-transitions.pkl", "rb") as fp:
transitions = pickle.load(fp)
# Get just blue channel ones
transition_indices = (blue_channel.dispersion[-1] > transitions["rest_wavelength"]) * (transitions["rest_wavelength"] > blue_channel.dispersion[0])
use_regions = np.array([
[4731.3, 4731.65],
[4742.65, 4742.93],
[4757.95, 4748.31],
[4759.1, 4759.56],
[4764.43, 4764.47],
[4778.08, 4778.41],
[4779.78, 4780.2],
[4781.59, 4781.92],
[4788.41, 4789],
[4789.91, 4790.19],
[4795.24, 4795.66],
[4798.39, 4798.64],
[4802.69, 4803.2],
[4805.3, 4805.71],
[4807.95, 4808.35],
[4820.23, 4820.6],
[4847.89, 4848.02],
[4869.85, 4870.3],
[4873.88, 4874.19],
[4884.95, 4885.25],
[4889.9, 4892.67],
[4894.7, 4895.0]
])
#use_regions = np.array([
# [4705, 4850.],
# [4880., 5000.]
#])
mask = np.empty(len(blue_channel.dispersion))
mask[:] = np.nan
for row in use_regions:
indices = blue_channel.dispersion.searchsorted(row)
mask[indices[0]:indices[1] + 1] = 1.
print(np.sum(np.isfinite(mask)))
blue = SpectralChannel(blue_channel, transitions[transition_indices], mask=mask, redshift=False, continuum_order=-1,
wl_tolerance=0.10, wl_cont=2, outliers=True)
xopt = blue.optimise(plot_filename="blue_optimise.pdf", plot_clobber=True)
star = Star("/Users/arc/atmospheres/castelli-kurucz-2004/a???at*.dat", channels=[blue])
star.infer({"Teff": 5700., "logg": 4.0, "[M/H]": 0.1, "xi": 0.9}, walkers=200, burn=450, sample=50)
| [
"[email protected]"
] | |
a443bc1ff37c65ac9a675e1b051f9c343c668fe0 | e3365bc8fa7da2753c248c2b8a5c5e16aef84d9f | /indices/nnprimev.py | b38df65ef76b7a4749b5371f10fce62d9dcee3fb | [] | no_license | psdh/WhatsintheVector | e8aabacc054a88b4cb25303548980af9a10c12a8 | a24168d068d9c69dc7a0fd13f606c080ae82e2a6 | refs/heads/master | 2021-01-25T10:34:22.651619 | 2015-09-23T11:54:06 | 2015-09-23T11:54:06 | 42,749,205 | 2 | 3 | null | 2015-09-23T11:54:07 | 2015-09-18T22:06:38 | Python | UTF-8 | Python | false | false | 727 | py | ii = [('BentJDO2.py', 1), ('LyelCPG2.py', 1), ('RogePAV2.py', 2), ('GodwWSL2.py', 1), ('RogePAV.py', 4), ('SadlMLP.py', 2), ('WilkJMC3.py', 1), ('PettTHE.py', 1), ('PeckJNG.py', 1), ('ChalTPW2.py', 2), ('AdamWEP.py', 1), ('CarlTFR.py', 3), ('RoscTTI3.py', 1), ('BailJD1.py', 1), ('BuckWGM.py', 17), ('LyelCPG.py', 11), ('DibdTRL2.py', 1), ('MedwTAI.py', 1), ('LandWPA2.py', 1), ('FerrSDO2.py', 1), ('KirbWPW2.py', 1), ('MedwTAI2.py', 1), ('WilkJMC.py', 4), ('HogaGMM.py', 1), ('FitzRNS4.py', 3), ('RoscTTI.py', 1), ('MackCNH2.py', 2), ('WilbRLW3.py', 1), ('JacoWHI.py', 1), ('FitzRNS2.py', 1), ('NortSTC.py', 2), ('SadlMLP2.py', 1), ('TaylIF.py', 2), ('WordWYR.py', 1), ('ChalTPW.py', 2), ('KirbWPW.py', 11), ('BentJDO.py', 1)] | [
"[email protected]"
] | |
7bfb340afa9df5a5df6c0f09a08cba7269997bbb | ecd0cffe45c6fee6ce02b70fb5721caac66a7b37 | /Data_Structures/Graphs/DijkstraAlgorithm.py | d16d8b75f887ff9deefb3181f6df5ca25839d837 | [] | no_license | johngaitho05/Interview-Questions | 75925ba5e3326857a13cfe6e1add27be3d1aa83e | 979f6960bc44186208c629403fa4ed73f72673b0 | refs/heads/master | 2021-05-21T13:39:48.290629 | 2020-06-18T05:51:30 | 2020-06-18T05:51:30 | 252,669,485 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,447 | py | import sys
import heapq
def cmp(x, y):
"""
Replacement for built-in function cmp that was removed in Python 3
Compare the two objects x and y and return an integer according to
the outcome. The return value is negative if x < y, zero if x == y
and strictly positive if x > y.
"""
return (x > y) - (x < y)
class Edge:
def __init__(self, weight, startVertex, targetVertex):
self.weight = weight
self.startVertex = startVertex
self.targetVertex = targetVertex
class Node:
def __init__(self, name):
self.name = name
self.visited = False
self.predecessor = None
self.adjacenciesList = []
self.minDistance = sys.maxsize
def __cmp__(self, otherVertex):
return cmp(self.minDistance, otherVertex.minDistance)
def __lt__(self, other):
selfPriority = self.minDistance
otherPriority = other.minDistance
return selfPriority < otherPriority
class DijkstraAlgorithm:
def calculateShortestPath(self, startVertex):
q = []
startVertex.minDistance = 0
heapq.heappush(q, startVertex)
while q:
actualVertex = heapq.heappop(q)
for edge in actualVertex.adjacenciesList:
u = edge.startVertex
v = edge.targetVertex
newDistance = u.minDistance + edge.weight
if newDistance < v.minDistance:
v.predecessor = u
v.minDistance = newDistance
heapq.heappush(q, v)
def getShortestPathTo(self, targetVertex):
print("Shortest path to vertex is: ", targetVertex.minDistance)
node = targetVertex
while node is not None:
print(node.name)
node = node.predecessor
if __name__ == '__main__':
node1 = Node("A")
node2 = Node("B")
node3 = Node("C")
node4 = Node("D")
node5 = Node("E")
node6 = Node("F")
node7 = Node("G")
node8 = Node("H")
edge1 = Edge(5, node1, node2)
edge2 = Edge(8, node1, node8)
edge3 = Edge(9, node1, node5)
edge4 = Edge(15, node2, node4)
edge5 = Edge(12, node2, node3)
edge6 = Edge(4, node2, node8)
edge7 = Edge(7, node8, node3)
edge8 = Edge(6, node8, node6)
edge9 = Edge(5, node5, node8)
edge10 = Edge(4, node5, node6)
edge11 = Edge(20, node5, node7)
edge12 = Edge(1, node6, node3)
edge13 = Edge(13, node6, node7)
edge14 = Edge(3, node3, node4)
edge15 = Edge(11, node3, node7)
edge16 = Edge(9, node4, node7)
node1.adjacenciesList.append(edge1)
node1.adjacenciesList.append(edge2)
node1.adjacenciesList.append(edge3)
node2.adjacenciesList.append(edge4)
node2.adjacenciesList.append(edge5)
node2.adjacenciesList.append(edge6)
node8.adjacenciesList.append(edge7)
node8.adjacenciesList.append(edge8)
node5.adjacenciesList.append(edge9)
node5.adjacenciesList.append(edge10)
node5.adjacenciesList.append(edge11)
node6.adjacenciesList.append(edge12)
node6.adjacenciesList.append(edge13)
node3.adjacenciesList.append(edge14)
node3.adjacenciesList.append(edge15)
node4.adjacenciesList.append(edge16)
vertexList1 = (node1, node2, node3, node4, node5, node6, node7, node8)
algorithm = DijkstraAlgorithm()
algorithm.calculateShortestPath(node1)
algorithm.getShortestPathTo(node7) | [
"[email protected]"
] | |
163ef3f305c3772d2d7644c28856b2fc13b47f3b | 55a2e62805cca90f46f3ac9c9501aa3386ab3109 | /games/urls.py | f8a56294778ab42f63bedfc70f5c669ad53da1b9 | [] | no_license | profmcdan/games-service | 2bf5d533b797d46e6a27ae63eac9c367cf144497 | 24049d08cfc645bd750839bcd0eba16fa7d7ee2d | refs/heads/master | 2022-12-16T01:19:09.859633 | 2019-08-06T15:11:54 | 2019-08-06T15:11:54 | 200,827,749 | 0 | 0 | null | 2022-12-08T01:48:16 | 2019-08-06T10:14:54 | Python | UTF-8 | Python | false | false | 984 | py | # from django.conf.urls import url
from django.urls import path
# from .views import game_collection, game_detail
from . import views
urlpatterns = [
path('esrb-ratings/', views.EsrbRatingList.as_view(),
name=views.EsrbRatingList.name),
path('esrb-ratings/<int:pk>/', views.EsrbRatingDetail.as_view(),
name=views.EsrbRatingDetail.name),
path('games/', views.GameList, name=views.GameList.name),
path('games/<int:pk>/', views.GameDetail.as_view(),
name=views.GameDetail.name),
path('players/', views.PlayerList.as_view(), name=views.PlayerList.name),
path('players/<int:pk>/', views.PlayerDetail.as_view(),
name=views.PlayerDetail.name),
path('player-scores/', views.PlayerScoreList.as_view(),
name=views.PlayerScoreList.name),
path('player-scores/<int:pk>/', views.PlayerScoreDetail.as_view(),
name=views.PlayerScoreDetail.name),
path('', views.ApiRoot.as_view(), name=views.ApiRoot.name),
]
| [
"[email protected]"
] | |
f5b439565e4463e6269798259de653eacdfd482e | 03195a6f98396fd27aedc3c06d81f1553fb1d16b | /pandas/_libs/properties.pyi | b2ba55aefb8a57e9a884c9c07d3882b1e3014f78 | [
"BSD-3-Clause"
] | permissive | huaxz1986/pandas | a08d80d27726fe141d449835b9a09265bca5b5e0 | ba2473834fedcf571d3f8245b4b24796873f2736 | refs/heads/master | 2023-06-11T02:20:14.544220 | 2022-01-12T04:40:06 | 2022-01-12T04:40:06 | 131,370,494 | 3 | 4 | BSD-3-Clause | 2018-04-28T03:51:05 | 2018-04-28T03:51:05 | null | UTF-8 | Python | false | false | 330 | pyi | # pyright: reportIncompleteStub = false
from typing import Any
# note: this is a lie to make type checkers happy (they special
# case property). cache_readonly uses attribute names similar to
# property (fget) but it does not provide fset and fdel.
cache_readonly = property
def __getattr__(name: str) -> Any: ... # incomplete
| [
"[email protected]"
] | |
3340ab19cd6e27d561259c17962122f7ca5afbb5 | baed2c2da1f776c0968d3cacd2fa45bdbe5482d6 | /ZOS_API_scripts/LAT_analysis/focal_plane_strehl_ratios_CD.py | 4531fc83e94b0eca7735eed469b8422d1bd1be10 | [] | no_license | patogallardo/zemax_tools | 5ae2fe9a1e8b032684b8cf57457ee4f3239d9141 | 90d309c2f96c94469963eb905844d76fa2137bf9 | refs/heads/master | 2023-01-08T22:52:16.865852 | 2022-12-20T21:36:28 | 2022-12-20T21:36:28 | 234,634,525 | 7 | 3 | null | null | null | null | UTF-8 | Python | false | false | 10,881 | py | import pandas as pd
import numpy as np
import glob
import matplotlib.pyplot as plt
import scipy.interpolate as interp
from scipy import stats
import os
from mpl_toolkits.axes_grid1 import make_axes_locatable
import sys
assert len(sys.argv) == 2
plt.rcParams.update({'font.size': 14})
s = pd.read_hdf('ray_db.hdf', 'system_variables')
center_field_deg = [s.center_field_x, s.center_field_y]
overlay_circle = True
rs = [2200/2] # radii for circles overlay
assert len(sys.argv) == 2
strehl_map_fname = "strehl_map_wl_%s.hdf" % sys.argv[1]
print(strehl_map_fname)
def get_field_positions_and_strehl_map_fname():
'''find 2 databases in current folder.
field positions and strehls.
if there are more than one file per flavor of database it will
raise an error.'''
field_position_fnames = glob.glob('ray_db.hdf')
strehl_fnames = glob.glob(strehl_map_fname)
field_position_fnames.sort()
strehl_fnames.sort()
print(field_position_fnames)
print(strehl_fnames)
assert len(field_position_fnames) == len(strehl_fnames)
assert len(field_position_fnames) == 1
assert len(strehl_fnames) == 1
pos_fname, strehl_fname = field_position_fnames[0], strehl_fnames[0]
print('Analyzing the following files:')
print("Focal plane positions: ", pos_fname)
print("Strehl maps: ", strehl_fname)
s = pd.read_hdf('ray_db.hdf', 'system_variables')
projectName = s.project_name
print('project name: %s' % projectName)
return pos_fname, strehl_fname, projectName
pos_fname, strehl_fname, projectName = get_field_positions_and_strehl_map_fname() # noqa
def interpolate_vignetting_for_strehls(xx, yy, vig):
'''Receives the xx, yy grid in angle, and their vignetting flag.
Figures out if rays are vignetted or not and returns an interpolator
function'''
dim = int(np.sqrt(len(xx)))
x = np.reshape(xx, (dim, dim))[0, :]
y = np.reshape(yy, (dim, dim))[:, 0]
z = vig.reshape([dim, dim]) * 1.0 # float conversion
u = interp.RegularGridInterpolator(points=(x, y),
values=z.swapaxes(0, 1),
method='linear',
bounds_error=False)
return u
class open_databases:
'''object containing raytrace dataframe split by marginal rays
'''
def __init__(self):
projectInfo = get_field_positions_and_strehl_map_fname()
self.pos_fname, strehl_fnem, projectName = projectInfo
df_rays = pd.read_hdf(self.pos_fname, key='df')
df_rays['hx_deg'] = df_rays['hx_deg'] - center_field_deg[0]
df_rays['hy_deg'] = df_rays['hy_deg'] - center_field_deg[1]
df_pos = df_rays.query('px == 0 and py == 0', inplace=False)
df_xp = df_rays.query('px==1 and py==0')
df_yp = df_rays.query('px==0 and py==1')
df_xm = df_rays.query('px==-1 and py==0')
df_ym = df_rays.query('px==0 and py==-1')
vig1 = df_xp.vignette_code.values != 0
vig2 = df_yp.vignette_code.values != 0
vig3 = df_xm.vignette_code.values != 0
vig4 = df_ym.vignette_code.values != 0
vig_p = np.logical_or(vig1, vig2)
vig_m = np.logical_or(vig3, vig4)
vig = np.logical_or(vig_p, vig_m)
self.vig = vig
df_pos.x_pos.values[vig] = np.nan
df_pos.y_pos.values[vig] = np.nan
u = interpolate_vignetting_for_strehls(df_pos.hx_deg.values,
df_pos.hy_deg.values,
vig)
df_strh = pd.read_hdf(strehl_fname, key='df')
wl = pd.read_hdf(strehl_fname, key='wavelength').wavelength_um/1e3
df_strh['vignetted'] = u((df_strh.xx_deg.values,
df_strh.yy_deg.values))
self.df_pos = df_pos
self.df_xp = df_xp
self.df_yp = df_yp
self.df_xm = df_xm
self.df_ym = df_ym
self.df_strh = df_strh
self.wavelength = wl
db = open_databases()
def interpolate_grid(df_pos):
dim = int(np.sqrt(len(df_pos))) # requires square grid
xx = df_pos.hx_deg.values
yy = df_pos.hy_deg.values
x = np.reshape(xx, (dim, dim))[0, :]
y = np.reshape(yy, (dim, dim))[:, 0]
zx = df_pos.x_pos.values.reshape([dim, dim])
zy = df_pos.y_pos.values.reshape([dim, dim])
u = interp.RegularGridInterpolator((x, y), zx.swapaxes(0, 1),
bounds_error=False)
v = interp.RegularGridInterpolator((x, y), zy.swapaxes(0, 1),
bounds_error=False)
return u, v
def plotArea_focal_plane(x_mm, y_mm, z_strehl,
thresholds=[0.95],
overlay_circle=False,
rs=[2000, 3000]):
sel = np.logical_not(np.isnan(x_mm))
x, y, z = x_mm[sel], y_mm[sel], z_strehl[sel]
res = stats.binned_statistic_2d(x, y, z, statistic='mean',
range=[[-2000, 2000], [-2000, 2000]],
bins=[100, 100])
x_bin = 0.5*(res.x_edge[:-1] + res.x_edge[1:])
y_bin = 0.5*(res.y_edge[:-1] + res.y_edge[1:])
x_increment, y_increment = np.diff(res.x_edge)[0], np.diff(res.y_edge)[0]
pixel_area = x_increment * y_increment
above_thresholds = [res.statistic > threshold
for threshold in thresholds]
areas = [np.sum(above_threshold) * pixel_area
for above_threshold in above_thresholds]
for j in range(len(thresholds)):
print('Area above Strehl %1.2f: %3.1f [m^2]' % (thresholds[j],
areas[j]/1e6))
# now make the plot
fig, ax = plt.subplots(figsize=[6, 5])
hb = ax.hexbin(x_mm, y_mm, z_strehl, vmin=0.5, vmax=1.0)
divider = make_axes_locatable(ax)
cax = divider.append_axes("right", size="3%", pad=0.05)
cbar = plt.colorbar(hb, cax=cax,
ticks=np.array([0.5, 0.6, 0.7, 0.8, 0.9, 1.0]))
cbar.set_label('Strehl ratio [-]')
contours_ = [0.5, 0.7, 0.8, 0.9, 0.95]
cs = ax.contour(x_bin, y_bin, res.statistic.T, contours_,
cmap='inferno')
ax.clabel(cs, inline=1, fontsize=15, fmt='%1.2f')
if overlay_circle:
theta = np.linspace(0, 2*np.pi, 1000)
for j, r in enumerate(rs):
x = r * np.cos(theta)
y = r * np.sin(theta)
circle_area = np.pi * r**2/1e6 # in m^2
ax.plot(x, y,
label='$r_{\\bigcirc}$= %1.2f m\nA=%1.2f$m^2$' % (r/1000, circle_area), # noqa
color='C%i' %(j+1)) # noqa
ax.legend(loc='lower left', fontsize=8)
ax.set_aspect('equal')
ax.set_xlabel('$x_{\\rm{focal~plane}}$ [mm]')
ax.set_ylabel('$y_{\\rm{focal~plane}}$ [mm]')
x_min, x_max = np.min(x_mm[sel])*1.05, np.max(x_mm[sel])*1.05
y_min, y_max = np.min(y_mm[sel])*1.05, np.max(y_mm[sel])*1.05
ax.set_xlim([x_min, x_max])
ax.set_ylim([y_min, y_max])
ax.set_title('Focal plane Strehl ratio at $\\lambda=1mm$')
# plt.colorbar()
ax.grid(alpha=0.3)
# bubble
texts = ['Area$_{Strehl > %1.2f}$: %1.1fm$^2$' % (thresholds[j], areas[j]/1e6) # noqa
for j in range(len(thresholds))]
textstr = '\n'.join(texts)
props = dict(boxstyle='round', facecolor='white', alpha=1)
plt.figtext(0.63, 0.84, textstr, bbox=props, fontsize=8, alpha=1.0)
plt.figtext(0.9, 0.05, projectName, fontsize=5, ha='right')
if not os.path.exists('./strehls'):
os.mkdir('./strehls')
fig.tight_layout()
plt.savefig('./strehls/focal_plane_strehls_wl_%i_mm.png' % db.wavelength,
dpi=150)
plt.savefig('./strehls/focal_plane_strehls_wl_%i_mm.pdf' % db.wavelength)
plt.close()
def plot_img_qual_sky(db, thresholds=[0.95, 0.90, 0.80]):
df_strh = db.df_strh
sel = df_strh.vignetted == 0
x, y = df_strh.xx_deg.values[sel], df_strh.yy_deg.values[sel]
z = df_strh.z_strehl.values[sel]
res = stats.binned_statistic_2d(x, y, z, statistic='mean',
range=[[-7, 7], [-7, 7]],
bins=[100, 100])
# compute area over thresholds
x_bin = 0.5*(res.x_edge[:-1] + res.x_edge[1:])
y_bin = 0.5*(res.y_edge[:-1] + res.y_edge[1:])
x_increment, y_increment = np.diff(res.x_edge)[0], np.diff(res.y_edge)[0]
pixel_area = x_increment * y_increment #
above_thresholds = [res.statistic > threshold
for threshold in thresholds]
areas = [np.sum(above_threshold) * pixel_area
for above_threshold in above_thresholds]
for j in range(len(thresholds)):
print('Area above Strehl %1.2f: %3.1f [deg^2]' % (thresholds[j],
areas[j]))
# now make the plot
fig, ax = plt.subplots(figsize=[6, 5])
hb = ax.hexbin(x, y, z, vmin=0.0, vmax=1.0,
cmap='viridis')
divider = make_axes_locatable(ax)
cax = divider.append_axes("right", size="3%", pad=0.05)
cbar = plt.colorbar(hb, cax=cax,
ticks=np.arange(0, 1.1, 0.2))
cbar.set_label('Strehl ratio [-]')
cs = ax.contour(x_bin, y_bin, res.statistic.T,
np.array([0.5, 0.7, 0.8, 0.9, 0.95]),
colors='white')
# cmap='viridis')
ax.clabel(cs, inline=1, fontsize=15)
ax.set_xlabel('$x_{sky}$ [deg]')
ax.set_ylabel('$y_{sky}$ [deg]')
xmax = 5.0
ax.set_xlim([-xmax, xmax])
ax.set_ylim([-xmax, xmax])
ax.set_title('CD Strehl ratio at $\\lambda=%1.1f mm$' % db.wavelength)
ax.grid(alpha=0.3)
# bubble
texts = ['$\\Omega_{Strehl > %1.2f}$: %1.1f deg$^2$' % (thresholds[j],
round(areas[j], 1))
for j in range(len(thresholds))]
textstr = '\n'.join(texts)
props = dict(boxstyle='round', facecolor='white', alpha=0.7)
plt.figtext(0.60, 0.175, textstr, bbox=props, fontsize=8)
# plt.figtext(0.9, 0.05, projectName, fontsize=5, ha='right')
if not os.path.exists('./strehls'):
os.mkdir('./strehls')
fig.tight_layout()
plt.savefig('./strehls/sky_strehls_wl_%i_mm.png' % db.wavelength, dpi=150)
plt.savefig('./strehls/sky_strehls_wl_%i_mm.pdf' % db.wavelength)
plt.close()
u, v = interpolate_grid(db.df_pos)
x_str_deg, y_str_deg = db.df_strh.xx_deg.values, db.df_strh.yy_deg.values
positions_to_eval = np.hstack([x_str_deg[:, np.newaxis],
y_str_deg[:, np.newaxis]])
x_mm = u(positions_to_eval)
y_mm = v(positions_to_eval)
plotArea_focal_plane(x_mm, y_mm, db.df_strh.z_strehl.values,
overlay_circle=overlay_circle,
rs=rs)
plot_img_qual_sky(db)
| [
"[email protected]"
] | |
f9106238b4ff20bec1e7b0835e8bd33b0db2acf4 | 8e69eee9b474587925e22413717eb82e4b024360 | /v1.0.0.test/toontown/uberdog/DistributedDataStoreManagerUD.py | a96c02427ed6514633f660973bad639449564c3f | [
"MIT"
] | permissive | TTOFFLINE-LEAK/ttoffline | afaef613c36dc3b70514ccee7030ba73c3b5045b | bb0e91704a755d34983e94288d50288e46b68380 | refs/heads/master | 2020-06-12T15:41:59.411795 | 2020-04-17T08:22:55 | 2020-04-17T08:22:55 | 194,348,185 | 5 | 4 | null | null | null | null | UTF-8 | Python | false | false | 523 | py | from direct.directnotify import DirectNotifyGlobal
from direct.distributed.DistributedObjectUD import DistributedObjectUD
class DistributedDataStoreManagerUD(DistributedObjectUD):
notify = DirectNotifyGlobal.directNotify.newCategory('DistributedDataStoreManagerUD')
def startStore(self, todo0):
pass
def stopStore(self, todo0):
pass
def queryStore(self, todo0, todo1):
pass
def receiveResults(self, todo0, todo1):
pass
def deleteBackupStores(self):
pass | [
"[email protected]"
] | |
73a145d26a657841c35d7ea4b5ab7b210955a4ee | b3879bc761ac38dab903da57c4061ad79fd70c6d | /курсы пайтон модуль 8/задание 9.py | 6957678c459752ce7e24854b4f53dc592a0069d7 | [] | no_license | Ruslan5252/all-of-my-projects-byPyCharm | 4df70cc3a31c4a5d97560fa858a706edcc856299 | 817d5f711408590ea141590ae52c6d888dfa2015 | refs/heads/master | 2023-05-03T01:06:30.156731 | 2021-05-29T13:51:16 | 2021-05-29T13:51:16 | 371,970,160 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,957 | py | class Car():
def __init__(self,name,model,maxSpeed,year,volume):
self.name=name
self.model=model
self.maxSpeed=maxSpeed
self.year=year
self.volume=volume
def ride(self):
print(f"машина названием ",{self.name},"моделью ",{self.model},"с максимальной скоростью ",{self.maxSpeed},
"с годом выпуска ",{self.year},"с объемом",{self.volume},'is riding')
class Toyota(Car):
def __init__(self,name,model,maxSpeed,year,volume,manufacturer):
self.name=name
self.model=model
self.maxSpeed=maxSpeed
self.year=year
self.volume=volume
self.manufcturer=manufacturer
def ride(self):
print(f"машина с названием ",{self.name},"моделью ",{self.model},"с максимальной скоростью ",{self.maxSpeed},
"с годом выпуска ",{self.year},"с объемом",{self.volume},
'произведенная в ',{self.manufcturer},'is riding')
class Mercedes(Car):
def __init__(self,name,model,maxSpeed,year,volume,class_Type):
self.name=name
self.model=model
self.maxSpeed=maxSpeed
self.year=year
self.volume=volume
self.class_Type=class_Type
def ride(self):
print(f"машина с названием ", {self.name}, "моделью ", {self.model}, "с максимальной скоростью ", {self.maxSpeed},
"с годом выпуска ", {self.year}, "с объемом", {self.volume},
'имеющая ', {self.class_Type},'класс is riding')
class Main():
b=[]
i=3
while i!=0:
c=Car(input("Введите название машины "), input("Введите модель"), input("Введите максимальную скорость "),
input("введите год выпуска машины "),
input("Введите объем машины "))
p = Toyota(input("Введите название машины "), input("Введите модель"), input("Введите максимальную скорость "),
input("введите год выпуска машины "),
input("Введите объем машины "), input("Введите страну производства "))
a = Mercedes(input("Введите название машины "), input("Введите модель"), input("Введите максимальную скорость "),
input("введите год выпуска машины "),
input("Введите объем машины "), input("введите класс машины "))
b.append(p)
b.append(a)
b.append(c)
i-=1
for i in b:
i.ride() | [
"[email protected]"
] | |
f73c69327f9a0808a5342429f537d9f3327594c9 | cec7315a6e86aece2b1fbc5c471aafca3288bfc2 | /backend/manage.py | d8e871a95646132dff05fabdcd33238d8bf93a36 | [] | no_license | crowdbotics-apps/broken-art-29088 | c783b82a52483e5bec52893bdae01c1849095e44 | b240934504f1aba03821ff07b336298fb98e6ca6 | refs/heads/master | 2023-06-25T17:31:47.595217 | 2021-07-23T21:38:46 | 2021-07-23T21:38:46 | 388,932,591 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 636 | py | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'broken_art_29088.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
8fca49fbe0da62e740f590084e3aea24dc479f4e | fcd29745ed7a66b46f5039c2ad07f2fa5cb457a2 | /6_files/files_project/app.py | 758c311e0fb3ecf125e133b27a725f2b0261621f | [
"MIT"
] | permissive | PacktPublishing/The-Complete-Python-Course | 17489ec6939b5c6c20b92d5bb2d15a71a6444f8e | b880ef9c0e281316f4080531d3690435a686e9c0 | refs/heads/master | 2023-02-19T04:22:33.451524 | 2023-02-02T06:04:40 | 2023-02-02T06:04:40 | 204,631,924 | 56 | 72 | MIT | 2023-02-15T23:21:39 | 2019-08-27T06:09:32 | Python | UTF-8 | Python | false | false | 239 | py | my_file = open('data.txt', 'r')
file_content = my_file.read()
my_file.close()
print(file_content)
user_name = input('Enter your name: ')
my_file_writing = open('data.txt', 'w')
my_file_writing.write(user_name)
my_file_writing.close()
| [
"[email protected]"
] | |
2bc2d9a96d32d718cd7212819c018fb6c1d25567 | 5cc4a73d6fb144d72e74b07a10b60fc36bfe50ec | /shops/migrations/0002_auto_20190330_1916.py | df99c08c2f3e516d78ab25dd75133d1b5afcbeba | [] | no_license | pedrofolch/digitalsoil | 79d9497dcbb54df3c7df64f9da35d71d592fe580 | 7b6d1ffd34e991cf87c91342e5336a97fa1cf59b | refs/heads/master | 2022-12-11T00:47:01.728729 | 2019-04-11T03:34:12 | 2019-04-11T03:34:12 | 120,937,159 | 0 | 0 | null | 2022-12-08T04:58:09 | 2018-02-09T17:49:10 | CSS | UTF-8 | Python | false | false | 659 | py | # Generated by Django 2.1.5 on 2019-03-31 02:16
import django.contrib.gis.db.models.fields
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('shops', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='elevation',
name='rast',
field=django.contrib.gis.db.models.fields.RasterField(blank=True, null=True, srid=4326),
),
migrations.AlterField(
model_name='shop',
name='poly',
field=django.contrib.gis.db.models.fields.PolygonField(blank=True, null=True, srid=4326),
),
]
| [
"[email protected]"
] | |
e4f668c29f8509034aa848c7bc5ab56a68eb64c4 | ca0e761b2948b2bd93d46e5bab610901f4a9936c | /data/convert_to_json_file.py | 6e7ef5a2f0f1f046e2029916e023b0653ab42ed7 | [
"MIT"
] | permissive | dojinkimm/go-krx | 7f122321f69a119594de1ee184be57eeb4f148f7 | b565696a7c13427f3320c6c43a529638ea06682e | refs/heads/main | 2023-03-11T22:06:00.291164 | 2021-02-28T13:29:51 | 2021-02-28T13:29:51 | 300,895,170 | 7 | 2 | null | 2021-02-23T12:34:18 | 2020-10-03T14:09:40 | Go | UTF-8 | Python | false | false | 1,695 | py | import json
import pandas as pd
dfstockcode = pd.read_html(
"http://kind.krx.co.kr/corpgeneral/corpList.do?method=download", header=0
)[0]
stock_information = list()
for (
i,
(
name,
symbol,
sector,
industry,
listing_date,
settlement_month,
representative,
homepage,
region,
),
) in enumerate(
zip(
dfstockcode.get("회사명"),
dfstockcode.get("종목코드"),
dfstockcode.get("업종"),
dfstockcode.get("주요제품"),
dfstockcode.get("상장일"),
dfstockcode.get("결산월"),
dfstockcode.get("대표자명"),
dfstockcode.get("홈페이지"),
dfstockcode.get("지역"),
)
):
if type(sector) == float:
sector = "없음"
if type(industry) == float:
industry = "없음"
if type(settlement_month) == float:
settlement_month = "없음"
if type(representative) == float:
representative = "없음"
if type(homepage) == float:
homepage = "없음"
if type(region) == float:
region = "없음"
symbol = str(symbol).zfill(6)
stock_information.append(
{
"name": name,
"symbol": symbol,
"sector": sector,
"industry": industry,
"listing_date": listing_date,
"settlement_month": settlement_month,
"representative": representative,
"homepage": homepage,
"region": region,
}
)
with open("data.json", "w", encoding='utf-8') as file:
json.dump(stock_information, file,indent=4, ensure_ascii=False)
file.write("\n")
| [
"[email protected]"
] | |
a27ffb478d2e67e0421e6bd0ec93873bf9393a62 | 36957a9ce540846d08f151b6a2c2d582cff1df47 | /VR/Python/Python36/Lib/lib2to3/tests/data/crlf.py | ae969da2ed9f77889127906baddd4a6ef5472fd3 | [] | no_license | aqp1234/gitVR | 60fc952307ef413e396d31e0d136faffe087ed2b | e70bd82c451943c2966b8ad1bee620a0ee1080d2 | refs/heads/master | 2022-12-29T15:30:12.540947 | 2020-10-07T15:26:32 | 2020-10-07T15:26:32 | 290,163,043 | 0 | 1 | null | 2020-08-25T09:15:40 | 2020-08-25T08:47:36 | C# | UTF-8 | Python | false | false | 127 | py | version https://git-lfs.github.com/spec/v1
oid sha256:d910ad886333abf3664a4fb4290d3b81307a16c6d9ca14356b3644a9aae6e714
size 50
| [
"[email protected]"
] | |
7c691685311f964776bd731d24ea73ab2268ea4a | a6719f4815ff41d3a1f09e9a63a64c4582d03702 | /file_handling/read_file_demo.py | b131427b8bc801751761bb301ff7f1a6d3fecacc | [
"MIT"
] | permissive | thanh-vt/python-basic-programming | 8136007b8435dae6339ae33015fe536e21b19d1d | 5fe817986fbef2649b4b03955f07b59d2a2035d8 | refs/heads/main | 2023-01-30T12:57:36.819687 | 2020-12-13T17:27:05 | 2020-12-13T17:27:05 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 144 | py | f = open('demo_file.txt', 'r')
print(f.read())
# read only part of a file (first 5 characters)
f = open('demo_file.txt', 'r')
print(f.read(5))
| [
"[email protected]"
] | |
686006acd784aeb64f48aa38eeb51d5c566319c7 | 1d11ff770c5530de4c18e83d9474d4c09c4376d2 | /igor/std-plugins/philips/scripts/philips.py | 0a6d1b43a4de640d5a4642c054379da4b21d6527 | [
"MIT"
] | permissive | bobandrey37/igor | 6660508639d90e7f44ea85146581685513b99ca2 | 41e163c8fa3da8ef13a337e1fe4268cf6fd7d07a | refs/heads/master | 2020-05-01T06:27:36.954089 | 2019-03-04T14:45:26 | 2019-03-04T14:45:26 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,929 | py | #!/usr/bin/python
from __future__ import print_function
from __future__ import unicode_literals
from future import standard_library
standard_library.install_aliases()
from builtins import object
import socket
import struct
import select
import json
import urllib.request, urllib.parse, urllib.error
import sys
DEBUG=False
ORDER = [
('192', '168', '1'),
('10', '0', '1'),
('10', '0', '2')
]
JOINTSPACE_PORT=1925
VOODOO_PORT=2323
VOODOO_VERSION=0x03010401
VPMT_DISCOVER=1
VOODOO_DISCOVER = struct.pack('<l28xll16s96s96s96s', VOODOO_VERSION, VPMT_DISCOVER, 0, '1234567890123456', 'Python Control', 'Jack', 'Philips.py')
class JointSpaceRemote(object):
def __init__(self, ipaddr=None):
self.tv = None
def connect(self):
while not self.tv:
self.tv = self.findTV()
if self.tv:
break
if DEBUG: print("TV not found, is it turned on?'")
return False
return True
def findTV(self, ipaddr=None):
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
sock.bind(('', VOODOO_PORT))
if ipaddr:
sock.sendto(VOODOO_DISCOVER, (ipaddr, VOODOO_PORT))
else:
sock.sendto(VOODOO_DISCOVER, ('<broadcast>', VOODOO_PORT))
while True:
result = select.select([sock], [], [], 5)
if sock in result[0]:
msg, sender = sock.recvfrom(2000)
if DEBUG: print('Got message from', sender[0])
myHostName = socket.gethostname()
if not '.' in myHostName:
myHostName = myHostName + '.local'
if not sender[0] in socket.gethostbyname_ex(myHostName)[2]:
# It is not our own message. It must be the Philips TV.
return sender[0]
else:
break
return None
def getData(self, path):
assert self.tv
url = 'http://%s:1925/1/%s' % (self.tv, path)
if DEBUG: print('GET', url)
data = urllib.request.urlopen(url).read()
##print 'RAW', data
data = json.loads(data)
##print 'DECODED', data
return data
def putData(self, path, data):
assert self.tv
url = 'http://%s:1925/1/%s' % (self.tv, path)
data = json.dumps(data)
if DEBUG: print('POST %s DATA %s' % (url, data))
data = urllib.request.urlopen(url, data).read()
if data:
if DEBUG: print('PUTDATA RETURNED', data)
def curWatching(self):
assert self.tv
data = self.getData('sources/current')
source = data['id']
if source == 'tv':
chanID = self.getData('channels/current')['id']
chanInfo = self.getData('channels/%s' % chanID)
name = chanInfo['name']
else:
names = self.getData('sources')
name = names[source]['name']
return source, name
def cmd_sources(self):
"""List available input sources"""
assert self.tv
data = self.getData('sources')
for source, descr in list(data.items()):
print('%s\t%s' % (source, descr['name']))
def cmd_channels(self):
"""List available TV channels"""
assert self.tv
data = self.getData('channels')
all = []
for fingerprint, descr in list(data.items()):
all.append((int(descr['preset']), descr['name']))
all.sort()
for preset, name in all:
print('%s\t%s' % (preset, name))
def cmd_source(self, source=None):
"""Set to the given input source (or print current source)"""
assert self.tv
if source:
self.putData('sources/current', {'id' : source })
else:
data = self.getData('sources/current')
print(data['id'])
def cmd_channel(self, channel=None):
"""Set to the given TV channel, by name, number or ID (or list current channel)"""
assert self.tv
if channel:
data = self.getData('channels')
for chID, chDescr in list(data.items()):
if chID == channel or chDescr['preset'] == channel or chDescr['name'] == channel:
self.putData('channels/current', { 'id' : chID })
self.putData('sources/current', {'id' : 'tv' })
return
print('No such channel: %s' % channel, file=sys.stderr)
else:
data = self.getData('channels/current')
chID = data['id']
data = self.getData('channels')
print('%s\t%s' % (data[chID]['preset'], data[chID]['name']))
def cmd_volume(self, volume=None):
"""Change volume on the TV"""
assert self.tv
if volume is None:
data = self.getData('audio/volume')
muted = ' (muted)' if data['muted'] else ''
print('%d%s' % (data['current'], muted))
else:
volume = int(volume)
self.putData('audio/volume', { 'muted' : False, 'current' : volume })
def cmd_json(self, data=None):
"""Return all data as a JSON object"""
if data is None:
data = {}
volumeData = self.getData('audio/volume')
data['volume'] = volumeData['current']
data['muted'] = volumeData['muted']
data['source'] = self.getData('sources/current')['id']
data['power'] = True
data['ip-address'] = self.tv
data['url'] = 'http://%s:1925/1/' % (self.tv)
else:
jData = json.loads(data)
assert 0
print(json.dumps(data))
def cmd_help(self):
"""List available commands"""
for name in dir(self):
if name[:4] == 'cmd_':
method = getattr(self, name)
doc = method.__doc__
print('%s\t%s' % (name[4:], doc))
def main():
if len(sys.argv) > 1 and sys.argv[1] == '-d':
global DEBUG
DEBUG=True
del sys.argv[1]
tv = JointSpaceRemote()
if not tv.connect():
if len(sys.argv) == 2 and sys.argv[1] == 'json':
print('{"power":false}')
sys.exit(0)
print("TV not found, is it turned on?", file=sys.stderr)
sys.exit(1)
if len(sys.argv) <= 1:
print(tv.curWatching())
else:
cmdName = 'cmd_' + sys.argv[1]
if not hasattr(tv, cmdName):
print('Unknown command: %s. Use help for help' % sys.argv[1], file=sys.stderr)
sys.exit(2)
cmd = getattr(tv, cmdName)
cmd(*sys.argv[2:])
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
dd289bbe11d653c04e5f33bf697ff022530a0ef8 | b7eb8279ebe2f525d27849d6ca24cc7270d30433 | /processing/b2_demultiplex_stats.py | c941dc97629b4495d6d94f77ebdff996cd4bb1a9 | [] | no_license | maxwshen/prime-peptide | d0da277521537c6e09dfeca4afbe3297893ed61b | d72244e85683583c812d3bd106b6874da0a17b80 | refs/heads/main | 2023-04-07T19:07:03.371146 | 2021-04-09T20:36:07 | 2021-04-09T20:36:07 | 356,391,769 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,136 | py | #
from __future__ import division
import _config
import sys, os, fnmatch, datetime, subprocess
sys.path.append('/home/unix/maxwshen/')
import numpy as np
from collections import defaultdict
from mylib import util
import pandas as pd
# Default params
inp_dir = _config.OUT_PLACE + 'b_demultiplex/'
NAME = util.get_fn(__file__)
out_dir = _config.OUT_PLACE + NAME + '/'
util.ensure_dir_exists(out_dir)
exp_design = pd.read_csv(_config.DATA_DIR + 'exp_design.csv')
##
# Functions
##
def demultiplex_stats(nm):
num_lines = 0
for fn in os.listdir(inp_dir + nm + '/'):
if 'R1' not in fn: continue
lc = util.line_count(inp_dir + nm + '/' + fn)
if lc % 2 == 1:
print('Error: fq num lines is odd')
# import code; code.interact(local=dict(globals(), **locals()))
num_lines += lc
# divide by 4 for fastq
num_reads = num_lines / 4
print(f'{nm}: {num_reads} reads')
return
##
# qsub
##
# def gen_qsubs():
# # Generate qsub shell scripts and commands for easy parallelization
# print('Generating qsub scripts...')
# qsubs_dir = _config.QSUBS_DIR + NAME + '/'
# util.ensure_dir_exists(qsubs_dir)
# qsub_commands = []
# num_scripts = 0
# for idx in range(0, 60):
# command = 'python %s.py %s' % (NAME, idx)
# script_id = NAME.split('_')[0]
# # Write shell scripts
# sh_fn = qsubs_dir + 'q_%s_%s.sh' % (script_id, idx)
# with open(sh_fn, 'w') as f:
# f.write('#!/bin/bash\n%s\n' % (command))
# num_scripts += 1
# # Write qsub commands
# qsub_commands.append('qsub -V -wd %s %s' % (_config.SRC_DIR, sh_fn))
# # Save commands
# with open(qsubs_dir + '_commands.txt', 'w') as f:
# f.write('\n'.join(qsub_commands))
# print('Wrote %s shell scripts to %s' % (num_scripts, qsubs_dir))
# return
##
# Main
##
@util.time_dec
def main():
print(NAME)
for nm in exp_design['Name']:
demultiplex_stats(nm)
demultiplex_stats('other')
return out_dir
if __name__ == '__main__':
if len(sys.argv) > 1:
main(split = sys.argv[1])
else:
main() | [
"[email protected]"
] | |
6f6bbd7824afebb390fcad7b60006d07593eaeb0 | f445450ac693b466ca20b42f1ac82071d32dd991 | /generated_tempdir_2019_09_15_163300/generated_part005963.py | 536d9214289d3cb10209ba7b567a2e1a915c7dca | [] | no_license | Upabjojr/rubi_generated | 76e43cbafe70b4e1516fb761cabd9e5257691374 | cd35e9e51722b04fb159ada3d5811d62a423e429 | refs/heads/master | 2020-07-25T17:26:19.227918 | 2019-09-15T15:41:48 | 2019-09-15T15:41:48 | 208,357,412 | 4 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,302 | py | from sympy.abc import *
from matchpy.matching.many_to_one import CommutativeMatcher
from matchpy import *
from matchpy.utils import VariableWithCount
from collections import deque
from multiset import Multiset
from sympy.integrals.rubi.constraints import *
from sympy.integrals.rubi.utility_function import *
from sympy.integrals.rubi.rules.miscellaneous_integration import *
from sympy import *
class CommutativeMatcher141988(CommutativeMatcher):
_instance = None
patterns = {
0: (0, Multiset({}), [
(VariableWithCount('i2.3.3.1.0', 1, 1, None), Mul),
(VariableWithCount('i2.3.3.1.0_1', 1, 1, S(1)), Mul)
])
}
subjects = {}
subjects_by_id = {}
bipartite = BipartiteGraph()
associative = Mul
max_optional_count = 1
anonymous_patterns = set()
def __init__(self):
self.add_subject(None)
@staticmethod
def get():
if CommutativeMatcher141988._instance is None:
CommutativeMatcher141988._instance = CommutativeMatcher141988()
return CommutativeMatcher141988._instance
@staticmethod
def get_match_iter(subject):
subjects = deque([subject]) if subject is not None else deque()
subst0 = Substitution()
# State 141987
return
yield
from collections import deque | [
"[email protected]"
] | |
bf4938d9e73a26fe0757893e1a32b04c141a9cdb | d1845a132213f2239fb0fea3502982dcfbdaca08 | /youtube-favourites-export.py | 965eff77359c96636c638fe0f16b20fabf00c131 | [
"MIT"
] | permissive | dw/scratch | 361e9dac7693061b66ccd064633f4ed09875e1b2 | c22c84d4d2d0347283e70192458ea50e08efcadb | refs/heads/master | 2021-01-17T12:21:52.423557 | 2019-06-11T00:09:30 | 2019-06-11T00:09:30 | 3,239,854 | 30 | 10 | null | null | null | null | UTF-8 | Python | false | false | 982 | py |
import gdata.youtube.client
client = gdata.youtube.client.YouTubeClient()
client.client_login('[email protected]', 'password', 'exporter')
entries = []
uri = 'https://gdata.youtube.com/feeds/api/users/default/favorites'
while True:
print 'Fetch', uri
feed = client.get_videos(uri=uri, **{'max-results': 50})
entries += feed.entry
if not feed.get_next_link():
break
uri = feed.get_next_link().href
feed.entry = entries
print 'total', len(entries)
with open('youtube-favorites.xml', 'w') as fp:
fp.write(feed.to_string())
# get subs
#
entries = []
uri = 'https://gdata.youtube.com/feeds/api/users/default/subscriptions'
while True:
print 'Fetch', uri
feed = client.get_feed(uri=uri, **{'max-results': 50})
entries += feed.entry
if not feed.get_next_link():
break
uri = feed.get_next_link().href
feed.entry = entries
print 'total', len(entries)
with open('youtube-subs.xml', 'w') as fp:
fp.write(feed.to_string())
| [
"[email protected]"
] | |
676b57edf2543587624cb7fb53630425c91c775f | 7c1892d60f07848756cefe0dea0cce7292c7c572 | /database/add.py | a4e5e1046441fc9f119b92398f0c094ccabc923e | [] | no_license | cherryMonth/BWC | 31d92a583b0ff35a18368a2c2ccfdb8d549dd7e1 | 187430bbc9e81d1cbc8721fd423f9b0488e0e78d | refs/heads/master | 2021-01-01T17:29:17.655717 | 2017-07-28T04:40:20 | 2017-07-28T04:40:20 | 98,082,540 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,605 | py | # coding=utf-8
import csv
import os
import collections
class Add(object):
@staticmethod
def add(filename, key_list=None):
if not os.path.exists(filename):
index = 1
else:
index = 0
try:
with open(filename, 'ab') as csv_file:
if not key_list:
csv_file.close()
return True
def num(string):
count = 0
for n in string:
count = count + ord(n)
return count
error = []
for key in key_list:
d = collections.OrderedDict()
key = sorted(key.items(), key=lambda x: num(x[0]))
for k in key:
d[k[0]] = k[1]
error.append(d)
key_list = error
row_name = key_list[0].keys() # 类变量记录列名
writer = csv.DictWriter(csv_file, fieldnames=row_name)
if index == 1:
writer.writerow(dict(zip(row_name, row_name))) # 写表头
for key in key_list:
writer.writerow(key) # 写数据
csv_file.close()
return True
except IOError:
print "File open error : " + filename + "\nplease check the filename"
return False
if __name__ == '__main__':
Add().add('b.csv',[{'WeChatID': 'wonka80', 'TeacherName': '王珂'}])
| [
"[email protected]"
] | |
fcb0ac9a2e90fb3003f163171bdf3f9429306a81 | e43ff8f429a6938a4f16edc4b2c94976acbff157 | /ABC/HELLO SPACE/c.py | 7e33c484056d96fed727123096a19a47f8c58635 | [] | no_license | Soule50431/AtCoder | 4fcd6ab6b771d55c90dc62aedd75eb81fd067466 | 118ac5d03630ce143fb50153402eee38e988ae0c | refs/heads/master | 2023-06-18T13:07:13.843361 | 2021-07-14T01:56:20 | 2021-07-14T01:56:20 | 357,827,698 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 496 | py | n = int(input())
team = [tuple(map(int, input().split())) for i in range(n)]
def check(x):
comb = set()
for member in team:
comb.add(sum(1 << i for i in range(5) if member[i] >= x))
for x in comb:
for y in comb:
for z in comb:
if x | y | z == 31:
return True
return False
ok = 0
ng = 10**9 + 1
while ng - ok > 1:
mid = (ng + ok) // 2
if check(mid):
ok = mid
else:
ng = mid
print(ok) | [
"[email protected]"
] | |
9f7513aceb03d3a629148eb93790f2bd922608ca | 6c2ecefb12be6b04f597e3fb887d9389050aa7e1 | /DjangoCourse/第三周/djangologin/djangologin/settings.py | ca207ee45368d5f381d99a9266ac9e571e9357b6 | [] | no_license | GmyLsh/learngit | 99d3c75843d2b0b873f26e098025832985c635b3 | 3e7993c7119b79216fea24e5e35035336e4f5f5b | refs/heads/master | 2020-04-12T09:11:55.068312 | 2018-12-19T07:19:42 | 2018-12-19T07:19:42 | 162,395,001 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,616 | py | """
Django settings for djangologin project.
Generated by 'django-admin startproject' using Django 2.1.2.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.1/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '5cwed$8ury*r$q%)b-vm$(x@z_sqrja($d)nxu#of#&+(3zwg1'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'login.apps.LoginConfig',
'hashlogin.apps.HashloginConfig',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'djangologin.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')]
,
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'djangologin.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': 'login',
'USER':'root',
'PASSWORD':'123456',
'HOST':'localhost'
}
}
# Password validation
# https://docs.djangoproject.com/en/2.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.1/howto/static-files/
#app下
STATIC_URL = '/static/'
#根目录下
STATICFILES_DIRS=[os.path.join(BASE_DIR,'static')]
#覆盖默认的用户模型,使用自定义的模型
#语法:'app的名称.自定义用户模型的名称'
AUTH_USER_MODEL='login.UserModel'
#使用@login_required这个装饰器必须设置LOGIN_URL,这个LOGIN_URL就是django用于自动跳转的地址
LOGIN_URL='/login/' | [
"[email protected]"
] | |
b7ea7c196a657c03362e5a72b8dc3b5a15f15f9c | eb9c3dac0dca0ecd184df14b1fda62e61cc8c7d7 | /google/ads/googleads/v6/googleads-py/google/ads/googleads/v6/services/services/ad_group_criterion_simulation_service/transports/base.py | ecc835952d95a794ce1333feaca5eb673c52f842 | [
"Apache-2.0"
] | permissive | Tryweirder/googleapis-gen | 2e5daf46574c3af3d448f1177eaebe809100c346 | 45d8e9377379f9d1d4e166e80415a8c1737f284d | refs/heads/master | 2023-04-05T06:30:04.726589 | 2021-04-13T23:35:20 | 2021-04-13T23:35:20 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,855 | py | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import abc
import typing
import pkg_resources
from google import auth
from google.api_core import gapic_v1 # type: ignore
from google.api_core import retry as retries # type: ignore
from google.auth import credentials # type: ignore
from google.ads.googleads.v6.resources.types import ad_group_criterion_simulation
from google.ads.googleads.v6.services.types import ad_group_criterion_simulation_service
try:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
gapic_version=pkg_resources.get_distribution(
'google-ads-googleads',
).version,
)
except pkg_resources.DistributionNotFound:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo()
class AdGroupCriterionSimulationServiceTransport(metaclass=abc.ABCMeta):
"""Abstract transport class for AdGroupCriterionSimulationService."""
AUTH_SCOPES = (
'https://www.googleapis.com/auth/adwords',
)
def __init__(
self, *,
host: str = 'googleads.googleapis.com',
credentials: credentials.Credentials = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]): The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
"""
# Save the hostname. Default to port 443 (HTTPS) if none is specified.
if ':' not in host:
host += ':443'
self._host = host
# If no credentials are provided, then determine the appropriate
# defaults.
if credentials is None:
credentials, _ = auth.default(scopes=self.AUTH_SCOPES)
# Save the credentials.
self._credentials = credentials
# Lifted into its own function so it can be stubbed out during tests.
self._prep_wrapped_messages(client_info)
def _prep_wrapped_messages(self, client_info):
# Precomputed wrapped methods
self._wrapped_methods = {
self.get_ad_group_criterion_simulation: gapic_v1.method.wrap_method(
self.get_ad_group_criterion_simulation,
default_timeout=None,
client_info=client_info,
),
}
@property
def get_ad_group_criterion_simulation(self) -> typing.Callable[
[ad_group_criterion_simulation_service.GetAdGroupCriterionSimulationRequest],
ad_group_criterion_simulation.AdGroupCriterionSimulation]:
raise NotImplementedError
__all__ = (
'AdGroupCriterionSimulationServiceTransport',
)
| [
"bazel-bot-development[bot]@users.noreply.github.com"
] | bazel-bot-development[bot]@users.noreply.github.com |
49c05d2676b8eed51218f2ef3306bf504397a1b1 | 0f0a7adfae45e07a896c5cd5648ae081d4ef7790 | /python数据结构/python黑马数据结构/排序于搜索/桶排序2.py | 081bee496e92f52adc6aa7b5f6d0b08d0687b4c3 | [] | no_license | renlei-great/git_window- | e2c578544c7a8bdd97a7a9da7be0464d6955186f | 8bff20a18d7bbeeaf714aa49bf15ab706153cc28 | refs/heads/master | 2021-07-19T13:09:01.075494 | 2020-06-13T06:14:37 | 2020-06-13T06:14:37 | 227,722,554 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 630 | py | lista = [12, 4, 5, 6, 22, 3, 3, 3, 3, 43, 654, 765, 7, 234]
def pail_sort(alist):
"""桶排序"""
n = len(alist)
cur = 0
while cur < n-1:
if alist[cur] > alist[cur+1]:
max_num = alist[cur]
cur += 1
max_li = [0] * (max_num +1)
for i in alist:
max_li[i] += 1
print(max_li)
sort_num = []
for i in range(len(max_li)):
if max_li[i] != 0:
print(i)
ex = 'sort_num.append(i)\n' * max_li[i]
print(ex)
exec(ex)
return sort_num
if __name__ == "__main__":
new_li = pail_sort(lista)
print(new_li) | [
"[email protected]"
] | |
b12de6c619935508db19c3f39260210233e6a4ab | a0801d0e7325b31f0383fc68517e208680bb36d6 | /Kattis/rijeci.py | 7d0bdadf29031e7404c2c5a61ad6cc2e938add57 | [] | no_license | conormccauley1999/CompetitiveProgramming | bd649bf04438817c7fa4755df2c2c7727273b073 | a7e188767364be40f625612af3d16182f2d8d4de | refs/heads/master | 2023-05-14T13:19:32.678134 | 2023-05-11T16:07:33 | 2023-05-11T16:07:33 | 179,089,010 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 173 | py | K = int(raw_input())
if K == 1:
print 0, 1
else:
x = [0, 1]
y = [1, 1]
for i in range(0, K - 1):
t = [y[0]+x[0], y[1]+x[1]]
x = y
y = t
print x[0], y[0] | [
"[email protected]"
] | |
e1c3075f706667755ba59b0caaaedb0ba5b258d1 | 039c28f0903a2b87ef1439a991e7c2e1d898ab48 | /pyneqsys/_release.py | a9edcd4240cbd989149d9c51908621ebb04cd636 | [
"BSD-2-Clause",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | andim/pyneqsys | f6ddae44ef0f0fdc41725b1cc6007834b790a4fa | f22fb840692a174826bd4d0a03a52e41e63d062f | refs/heads/master | 2021-05-05T19:22:59.256424 | 2018-01-08T23:23:33 | 2018-01-08T23:23:33 | 117,774,582 | 0 | 0 | null | 2018-01-17T02:53:21 | 2018-01-17T02:53:21 | null | UTF-8 | Python | false | false | 26 | py | __version__ = '0.6.0.git'
| [
"[email protected]"
] | |
50f5477a0bbb10e0d356fbe8aa777cae29d9dffa | 6ec91b363b077bffd33f15300a0935124e9fb915 | /Cracking_the_Code_Interview/Leetcode/14.DP/120.Triangle.py | a7e2879f610fe04793e1b1f2c35318dc4b3ff0fc | [] | no_license | lzxyzq/Cracking_the_Coding_Interview | 03232515ae8eb50394d46322d36b230d1a626fcf | 79dee7dab41830c4ff9e38858dad229815c719a0 | refs/heads/master | 2023-06-05T19:52:15.595289 | 2021-06-23T22:46:02 | 2021-06-23T22:46:02 | 238,068,000 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 805 | py | '''
@Author: your name
@Date: 2020-06-30 18:43:37
@LastEditTime: 2020-06-30 19:39:19
@LastEditors: Please set LastEditors
@Description: In User Settings Edit
@FilePath: /Cracking_the_Code_Interview/Leetcode/14.DP/120.Triangle.py
'''
# Given a triangle, find the minimum path sum from top to bottom. Each step you may move to adjacent numbers on the row below.
# For example, given the following triangle
# [
# [2],
# [3,4],
# [6,5,7],
# [4,1,8,3]
# ]
# The minimum path sum from top to bottom is 11 (i.e., 2 + 3 + 5 + 1 = 11).
class Solution:
def minimumTotal(self, triangle: List[List[int]]) -> int:
for i in range(len(triangle)-2,-1,-1):
for j in range(i+1):
triangle[i][j] += min(triangle[i+1][j],triangle[i+1][j+1])
return triangle[0][0] | [
"[email protected]"
] | |
6c965678baa7cebf2a03764ddb7523795f47ebf2 | 1885e952aa4a89f8b417b4c2e70b91bf1df887ff | /ABC096/A.py | cc0a107e3cc810c92a52750a636bff76ae2ca381 | [] | no_license | takumiw/AtCoder | 01ed45b4d537a42e1120b1769fe4eff86a8e4406 | 23b9c89f07db8dd5b5345d7b40a4bae6762b2119 | refs/heads/master | 2021-07-10T12:01:32.401438 | 2020-06-27T14:07:17 | 2020-06-27T14:07:17 | 158,206,535 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 77 | py | a, b = map(int, input().split())
if b >= a:
print(a)
else:
print(a-1) | [
"[email protected]"
] | |
13f4acd3b7b06c62449a3ff575618e203428cc3d | c7066d3b72a54665d81de1d77d7bdcfd0ece7b42 | /python/ccxt/ascendex.py | c2951759419e5046421f16fd8bd991a4af0f59cb | [
"MIT"
] | permissive | blair/ccxt | cf09b7a604586c230e8cea2b6e4dbf6c3c3497ea | 3a6bd4efb78d01391f9a4ea43ec228b75ca24695 | refs/heads/master | 2023-09-03T21:09:44.447194 | 2023-08-26T19:01:14 | 2023-08-26T19:01:14 | 126,121,401 | 0 | 2 | MIT | 2018-03-21T04:02:57 | 2018-03-21T04:02:56 | null | UTF-8 | Python | false | false | 132,974 | py | # -*- coding: utf-8 -*-
# PLEASE DO NOT EDIT THIS FILE, IT IS GENERATED AND WILL BE OVERWRITTEN:
# https://github.com/ccxt/ccxt/blob/master/CONTRIBUTING.md#how-to-contribute-code
from ccxt.base.exchange import Exchange
from ccxt.abstract.ascendex import ImplicitAPI
import hashlib
from ccxt.base.types import OrderSide
from ccxt.base.types import OrderType
from typing import Optional
from typing import List
from ccxt.base.errors import ExchangeError
from ccxt.base.errors import PermissionDenied
from ccxt.base.errors import ArgumentsRequired
from ccxt.base.errors import BadRequest
from ccxt.base.errors import BadSymbol
from ccxt.base.errors import InsufficientFunds
from ccxt.base.errors import InvalidOrder
from ccxt.base.errors import AuthenticationError
from ccxt.base.decimal_to_precision import TICK_SIZE
from ccxt.base.precise import Precise
class ascendex(Exchange, ImplicitAPI):
def describe(self):
return self.deep_extend(super(ascendex, self).describe(), {
'id': 'ascendex',
'name': 'AscendEX',
'countries': ['SG'], # Singapore
# 8 requests per minute = 0.13333 per second => rateLimit = 750
# testing 400 works
'rateLimit': 400,
'certified': False,
'pro': True,
# new metainfo interface
'has': {
'CORS': None,
'spot': True,
'margin': True,
'swap': True,
'future': True,
'option': False,
'addMargin': True,
'cancelAllOrders': True,
'cancelOrder': True,
'createOrder': True,
'createPostOnlyOrder': True,
'createReduceOnlyOrder': True,
'createStopLimitOrder': True,
'createStopMarketOrder': True,
'createStopOrder': True,
'fetchAccounts': True,
'fetchBalance': True,
'fetchClosedOrders': True,
'fetchCurrencies': True,
'fetchDepositAddress': True,
'fetchDepositAddresses': False,
'fetchDepositAddressesByNetwork': False,
'fetchDeposits': True,
'fetchDepositsWithdrawals': True,
'fetchDepositWithdrawFee': 'emulated',
'fetchDepositWithdrawFees': True,
'fetchFundingHistory': False,
'fetchFundingRate': 'emulated',
'fetchFundingRateHistory': False,
'fetchFundingRates': True,
'fetchIndexOHLCV': False,
'fetchLeverage': False,
'fetchLeverageTiers': True,
'fetchMarginMode': False,
'fetchMarketLeverageTiers': 'emulated',
'fetchMarkets': True,
'fetchMarkOHLCV': False,
'fetchOHLCV': True,
'fetchOpenOrders': True,
'fetchOrder': True,
'fetchOrderBook': True,
'fetchOrders': False,
'fetchPosition': False,
'fetchPositionMode': False,
'fetchPositions': True,
'fetchPositionsRisk': False,
'fetchPremiumIndexOHLCV': False,
'fetchTicker': True,
'fetchTickers': True,
'fetchTime': True,
'fetchTrades': True,
'fetchTradingFee': False,
'fetchTradingFees': True,
'fetchTransactionFee': False,
'fetchTransactionFees': False,
'fetchTransactions': 'emulated',
'fetchTransfer': False,
'fetchTransfers': False,
'fetchWithdrawal': False,
'fetchWithdrawals': True,
'reduceMargin': True,
'setLeverage': True,
'setMarginMode': True,
'setPositionMode': False,
'transfer': True,
},
'timeframes': {
'1m': '1',
'5m': '5',
'15m': '15',
'30m': '30',
'1h': '60',
'2h': '120',
'4h': '240',
'6h': '360',
'12h': '720',
'1d': '1d',
'1w': '1w',
'1M': '1m',
},
'version': 'v2',
'urls': {
'logo': 'https://user-images.githubusercontent.com/1294454/112027508-47984600-8b48-11eb-9e17-d26459cc36c6.jpg',
'api': {
'rest': 'https://ascendex.com',
},
'test': {
'rest': 'https://api-test.ascendex-sandbox.com',
},
'www': 'https://ascendex.com',
'doc': [
'https://ascendex.github.io/ascendex-pro-api/#ascendex-pro-api-documentation',
],
'fees': 'https://ascendex.com/en/feerate/transactionfee-traderate',
'referral': {
'url': 'https://ascendex.com/en-us/register?inviteCode=EL6BXBQM',
'discount': 0.25,
},
},
'api': {
'v1': {
'public': {
'get': {
'assets': 1,
'products': 1,
'ticker': 1,
'barhist/info': 1,
'barhist': 1,
'depth': 1,
'trades': 1,
'cash/assets': 1, # not documented
'cash/products': 1, # not documented
'margin/assets': 1, # not documented
'margin/products': 1, # not documented
'futures/collateral': 1,
'futures/contracts': 1,
'futures/ref-px': 1,
'futures/market-data': 1,
'futures/funding-rates': 1,
'risk-limit-info': 1,
'exchange-info': 1,
},
},
'private': {
'get': {
'info': 1,
'wallet/transactions': 1,
'wallet/deposit/address': 1, # not documented
'data/balance/snapshot': 1,
'data/balance/history': 1,
},
'accountCategory': {
'get': {
'balance': 1,
'order/open': 1,
'order/status': 1,
'order/hist/current': 1,
'risk': 1,
},
'post': {
'order': 1,
'order/batch': 1,
},
'delete': {
'order': 1,
'order/all': 1,
'order/batch': 1,
},
},
'accountGroup': {
'get': {
'cash/balance': 1,
'margin/balance': 1,
'margin/risk': 1,
'futures/collateral-balance': 1,
'futures/position': 1,
'futures/risk': 1,
'futures/funding-payments': 1,
'order/hist': 1,
'spot/fee': 1,
},
'post': {
'transfer': 1,
'futures/transfer/deposit': 1,
'futures/transfer/withdraw': 1,
},
},
},
},
'v2': {
'public': {
'get': {
'assets': 1,
'futures/contract': 1,
'futures/collateral': 1,
'futures/pricing-data': 1,
'futures/ticker': 1,
'risk-limit-info': 1,
},
},
'private': {
'data': {
'get': {
'order/hist': 1,
},
},
'get': {
'account/info': 1,
},
'accountGroup': {
'get': {
'order/hist': 1,
'futures/position': 1,
'futures/free-margin': 1,
'futures/order/hist/current': 1,
'futures/order/open': 1,
'futures/order/status': 1,
},
'post': {
'futures/isolated-position-margin': 1,
'futures/margin-type': 1,
'futures/leverage': 1,
'futures/transfer/deposit': 1,
'futures/transfer/withdraw': 1,
'futures/order': 1,
'futures/order/batch': 1,
'futures/order/open': 1,
'subuser/subuser-transfer': 1,
'subuser/subuser-transfer-hist': 1,
},
'delete': {
'futures/order': 1,
'futures/order/batch': 1,
'futures/order/all': 1,
},
},
},
},
},
'fees': {
'trading': {
'feeSide': 'get',
'tierBased': True,
'percentage': True,
'taker': self.parse_number('0.002'),
'maker': self.parse_number('0.002'),
},
},
'precisionMode': TICK_SIZE,
'options': {
'account-category': 'cash', # 'cash', 'margin', 'futures' # obsolete
'account-group': None,
'fetchClosedOrders': {
'method': 'v2PrivateDataGetOrderHist', # 'v1PrivateAccountGroupGetAccountCategoryOrderHistCurrent'
},
'defaultType': 'spot', # 'spot', 'margin', 'swap'
'accountsByType': {
'spot': 'cash',
'swap': 'futures',
'future': 'futures',
'margin': 'margin',
},
'transfer': {
'fillResponseFromRequest': True,
},
'networks': {
'BSC': 'BEP20(BSC)',
'ARB': 'arbitrum',
'SOL': 'Solana',
'AVAX': 'avalanche C chain',
'OMNI': 'Omni',
},
'networksById': {
'BEP20(BSC)': 'BSC',
'arbitrum': 'ARB',
'Solana': 'SOL',
'avalanche C chain': 'AVAX',
'Omni': 'OMNI',
},
},
'exceptions': {
'exact': {
# not documented
'1900': BadRequest, # {"code":1900,"message":"Invalid Http Request Input"}
'2100': AuthenticationError, # {"code":2100,"message":"ApiKeyFailure"}
'5002': BadSymbol, # {"code":5002,"message":"Invalid Symbol"}
'6001': BadSymbol, # {"code":6001,"message":"Trading is disabled on symbol."}
'6010': InsufficientFunds, # {'code': 6010, 'message': 'Not enough balance.'}
'60060': InvalidOrder, # {'code': 60060, 'message': 'The order is already filled or canceled.'}
'600503': InvalidOrder, # {"code":600503,"message":"Notional is too small."}
# documented
'100001': BadRequest, # INVALID_HTTP_INPUT Http request is invalid
'100002': BadRequest, # DATA_NOT_AVAILABLE Some required data is missing
'100003': BadRequest, # KEY_CONFLICT The same key exists already
'100004': BadRequest, # INVALID_REQUEST_DATA The HTTP request contains invalid field or argument
'100005': BadRequest, # INVALID_WS_REQUEST_DATA Websocket request contains invalid field or argument
'100006': BadRequest, # INVALID_ARGUMENT The arugment is invalid
'100007': BadRequest, # ENCRYPTION_ERROR Something wrong with data encryption
'100008': BadSymbol, # SYMBOL_ERROR Symbol does not exist or not valid for the request
'100009': AuthenticationError, # AUTHORIZATION_NEEDED Authorization is require for the API access or request
'100010': BadRequest, # INVALID_OPERATION The action is invalid or not allowed for the account
'100011': BadRequest, # INVALID_TIMESTAMP Not a valid timestamp
'100012': BadRequest, # INVALID_STR_FORMAT str format does not
'100013': BadRequest, # INVALID_NUM_FORMAT Invalid number input
'100101': ExchangeError, # UNKNOWN_ERROR Some unknown error
'150001': BadRequest, # INVALID_JSON_FORMAT Require a valid json object
'200001': AuthenticationError, # AUTHENTICATION_FAILED Authorization failed
'200002': ExchangeError, # TOO_MANY_ATTEMPTS Tried and failed too many times
'200003': ExchangeError, # ACCOUNT_NOT_FOUND Account not exist
'200004': ExchangeError, # ACCOUNT_NOT_SETUP Account not setup properly
'200005': ExchangeError, # ACCOUNT_ALREADY_EXIST Account already exist
'200006': ExchangeError, # ACCOUNT_ERROR Some error related with error
'200007': ExchangeError, # CODE_NOT_FOUND
'200008': ExchangeError, # CODE_EXPIRED Code expired
'200009': ExchangeError, # CODE_MISMATCH Code does not match
'200010': AuthenticationError, # PASSWORD_ERROR Wrong assword
'200011': ExchangeError, # CODE_GEN_FAILED Do not generate required code promptly
'200012': ExchangeError, # FAKE_COKE_VERIFY
'200013': ExchangeError, # SECURITY_ALERT Provide security alert message
'200014': PermissionDenied, # RESTRICTED_ACCOUNT Account is restricted for certain activity, such, or withdraw.
'200015': PermissionDenied, # PERMISSION_DENIED No enough permission for the operation
'300001': InvalidOrder, # INVALID_PRICE Order price is invalid
'300002': InvalidOrder, # INVALID_QTY Order size is invalid
'300003': InvalidOrder, # INVALID_SIDE Order side is invalid
'300004': InvalidOrder, # INVALID_NOTIONAL Notional is too small or too large
'300005': InvalidOrder, # INVALID_TYPE Order typs is invalid
'300006': InvalidOrder, # INVALID_ORDER_ID Order id is invalid
'300007': InvalidOrder, # INVALID_TIME_IN_FORCE Time In Force in order request is invalid
'300008': InvalidOrder, # INVALID_ORDER_PARAMETER Some order parameter is invalid
'300009': InvalidOrder, # TRADING_VIOLATION Trading violation on account or asset
'300011': InsufficientFunds, # INVALID_BALANCE No enough account or asset balance for the trading
'300012': BadSymbol, # INVALID_PRODUCT Not a valid product supported by exchange
'300013': InvalidOrder, # INVALID_BATCH_ORDER Some or all orders are invalid in batch order request
'300014': InvalidOrder, # {"code":300014,"message":"Order price doesn't conform to the required tick size: 0.1","reason":"TICK_SIZE_VIOLATION"}
'300020': InvalidOrder, # TRADING_RESTRICTED There is some trading restriction on account or asset
'300021': InvalidOrder, # TRADING_DISABLED Trading is disabled on account or asset
'300031': InvalidOrder, # NO_MARKET_PRICE No market price for market type order trading
'310001': InsufficientFunds, # INVALID_MARGIN_BALANCE No enough margin balance
'310002': InvalidOrder, # INVALID_MARGIN_ACCOUNT Not a valid account for margin trading
'310003': InvalidOrder, # MARGIN_TOO_RISKY Leverage is too high
'310004': BadSymbol, # INVALID_MARGIN_ASSET This asset does not support margin trading
'310005': InvalidOrder, # INVALID_REFERENCE_PRICE There is no valid reference price
'510001': ExchangeError, # SERVER_ERROR Something wrong with server.
'900001': ExchangeError, # HUMAN_CHALLENGE Human change do not pass
},
'broad': {},
},
'commonCurrencies': {
'BOND': 'BONDED',
'BTCBEAR': 'BEAR',
'BTCBULL': 'BULL',
'BYN': 'BeyondFi',
'PLN': 'Pollen',
},
})
def get_account(self, params={}):
# get current or provided bitmax sub-account
account = self.safe_value(params, 'account', self.options['account'])
lowercaseAccount = account.lower()
return self.capitalize(lowercaseAccount)
def fetch_currencies(self, params={}):
"""
fetches all available currencies on an exchange
:param dict [params]: extra parameters specific to the ascendex api endpoint
:returns dict: an associative dictionary of currencies
"""
assets = self.v1PublicGetAssets(params)
#
# {
# "code":0,
# "data":[
# {
# "assetCode" : "LTCBULL",
# "assetName" : "3X Long LTC Token",
# "precisionScale" : 9,
# "nativeScale" : 4,
# "withdrawalFee" : "0.2",
# "minWithdrawalAmt" : "1.0",
# "status" : "Normal"
# },
# ]
# }
#
margin = self.v1PublicGetMarginAssets(params)
#
# {
# "code":0,
# "data":[
# {
# "assetCode":"BTT",
# "borrowAssetCode":"BTT-B",
# "interestAssetCode":"BTT-I",
# "nativeScale":0,
# "numConfirmations":1,
# "withdrawFee":"100.0",
# "minWithdrawalAmt":"1000.0",
# "statusCode":"Normal",
# "statusMessage":"",
# "interestRate":"0.001"
# }
# ]
# }
#
cash = self.v1PublicGetCashAssets(params)
#
# {
# "code":0,
# "data":[
# {
# "assetCode":"LTCBULL",
# "nativeScale":4,
# "numConfirmations":20,
# "withdrawFee":"0.2",
# "minWithdrawalAmt":"1.0",
# "statusCode":"Normal",
# "statusMessage":""
# }
# ]
# }
#
assetsData = self.safe_value(assets, 'data', [])
marginData = self.safe_value(margin, 'data', [])
cashData = self.safe_value(cash, 'data', [])
assetsById = self.index_by(assetsData, 'assetCode')
marginById = self.index_by(marginData, 'assetCode')
cashById = self.index_by(cashData, 'assetCode')
dataById = self.deep_extend(assetsById, marginById, cashById)
ids = list(dataById.keys())
result = {}
for i in range(0, len(ids)):
id = ids[i]
currency = dataById[id]
code = self.safe_currency_code(id)
scale = self.safe_string_2(currency, 'precisionScale', 'nativeScale')
precision = self.parse_number(self.parse_precision(scale))
fee = self.safe_number_2(currency, 'withdrawFee', 'withdrawalFee')
status = self.safe_string_2(currency, 'status', 'statusCode')
active = (status == 'Normal')
marginInside = ('borrowAssetCode' in currency)
result[code] = {
'id': id,
'code': code,
'info': currency,
'type': None,
'margin': marginInside,
'name': self.safe_string(currency, 'assetName'),
'active': active,
'deposit': None,
'withdraw': None,
'fee': fee,
'precision': precision,
'limits': {
'amount': {
'min': precision,
'max': None,
},
'withdraw': {
'min': self.safe_number(currency, 'minWithdrawalAmt'),
'max': None,
},
},
'networks': {},
}
return result
def fetch_markets(self, params={}):
"""
retrieves data on all markets for ascendex
:param dict [params]: extra parameters specific to the exchange api endpoint
:returns dict[]: an array of objects representing market data
"""
products = self.v1PublicGetProducts(params)
#
# {
# "code": 0,
# "data": [
# {
# "symbol": "LBA/BTC",
# "baseAsset": "LBA",
# "quoteAsset": "BTC",
# "status": "Normal",
# "minNotional": "0.000625",
# "maxNotional": "6.25",
# "marginTradable": False,
# "commissionType": "Quote",
# "commissionReserveRate": "0.001",
# "tickSize": "0.000000001",
# "lotSize": "1"
# },
# ]
# }
#
cash = self.v1PublicGetCashProducts(params)
#
# {
# "code": 0,
# "data": [
# {
# "symbol": "QTUM/BTC",
# "displayName": "QTUM/BTC",
# "domain": "BTC",
# "tradingStartTime": 1569506400000,
# "collapseDecimals": "0.0001,0.000001,0.00000001",
# "minQty": "0.000000001",
# "maxQty": "1000000000",
# "minNotional": "0.000625",
# "maxNotional": "12.5",
# "statusCode": "Normal",
# "statusMessage": "",
# "tickSize": "0.00000001",
# "useTick": False,
# "lotSize": "0.1",
# "useLot": False,
# "commissionType": "Quote",
# "commissionReserveRate": "0.001",
# "qtyScale": 1,
# "priceScale": 8,
# "notionalScale": 4
# }
# ]
# }
#
perpetuals = self.v2PublicGetFuturesContract(params)
#
# {
# "code": 0,
# "data": [
# {
# "symbol": "BTC-PERP",
# "status": "Normal",
# "displayName": "BTCUSDT",
# "settlementAsset": "USDT",
# "underlying": "BTC/USDT",
# "tradingStartTime": 1579701600000,
# "priceFilter": {
# "minPrice": "1",
# "maxPrice": "1000000",
# "tickSize": "1"
# },
# "lotSizeFilter": {
# "minQty": "0.0001",
# "maxQty": "1000000000",
# "lotSize": "0.0001"
# },
# "commissionType": "Quote",
# "commissionReserveRate": "0.001",
# "marketOrderPriceMarkup": "0.03",
# "marginRequirements": [
# {
# "positionNotionalLowerBound": "0",
# "positionNotionalUpperBound": "50000",
# "initialMarginRate": "0.01",
# "maintenanceMarginRate": "0.006"
# },
# ...
# ]
# }
# ]
# }
#
productsData = self.safe_value(products, 'data', [])
productsById = self.index_by(productsData, 'symbol')
cashData = self.safe_value(cash, 'data', [])
perpetualsData = self.safe_value(perpetuals, 'data', [])
cashAndPerpetualsData = self.array_concat(cashData, perpetualsData)
cashAndPerpetualsById = self.index_by(cashAndPerpetualsData, 'symbol')
dataById = self.deep_extend(productsById, cashAndPerpetualsById)
ids = list(dataById.keys())
result = []
for i in range(0, len(ids)):
id = ids[i]
market = dataById[id]
settleId = self.safe_value(market, 'settlementAsset')
settle = self.safe_currency_code(settleId)
status = self.safe_string(market, 'status')
domain = self.safe_string(market, 'domain')
active = False
if ((status == 'Normal') or (status == 'InternalTrading')) and (domain != 'LeveragedETF'):
active = True
spot = settle is None
swap = not spot
linear = True if swap else None
minQty = self.safe_number(market, 'minQty')
maxQty = self.safe_number(market, 'maxQty')
minPrice = self.safe_number(market, 'tickSize')
maxPrice = None
underlying = self.safe_string_2(market, 'underlying', 'symbol')
parts = underlying.split('/')
baseId = self.safe_string(parts, 0)
quoteId = self.safe_string(parts, 1)
base = self.safe_currency_code(baseId)
quote = self.safe_currency_code(quoteId)
symbol = base + '/' + quote
if swap:
lotSizeFilter = self.safe_value(market, 'lotSizeFilter')
minQty = self.safe_number(lotSizeFilter, 'minQty')
maxQty = self.safe_number(lotSizeFilter, 'maxQty')
priceFilter = self.safe_value(market, 'priceFilter')
minPrice = self.safe_number(priceFilter, 'minPrice')
maxPrice = self.safe_number(priceFilter, 'maxPrice')
symbol = base + '/' + quote + ':' + settle
fee = self.safe_number(market, 'commissionReserveRate')
marginTradable = self.safe_value(market, 'marginTradable', False)
result.append({
'id': id,
'symbol': symbol,
'base': base,
'quote': quote,
'settle': settle,
'baseId': baseId,
'quoteId': quoteId,
'settleId': settleId,
'type': 'swap' if swap else 'spot',
'spot': spot,
'margin': marginTradable if spot else None,
'swap': swap,
'future': False,
'option': False,
'active': active,
'contract': swap,
'linear': linear,
'inverse': not linear if swap else None,
'taker': fee,
'maker': fee,
'contractSize': self.parse_number('1') if swap else None,
'expiry': None,
'expiryDatetime': None,
'strike': None,
'optionType': None,
'precision': {
'amount': self.safe_number(market, 'lotSize'),
'price': self.safe_number(market, 'tickSize'),
},
'limits': {
'leverage': {
'min': None,
'max': None,
},
'amount': {
'min': minQty,
'max': maxQty,
},
'price': {
'min': minPrice,
'max': maxPrice,
},
'cost': {
'min': self.safe_number(market, 'minNotional'),
'max': self.safe_number(market, 'maxNotional'),
},
},
'info': market,
})
return result
def fetch_time(self, params={}):
"""
fetches the current integer timestamp in milliseconds from the ascendex server
:param dict [params]: extra parameters specific to the ascendex api endpoint
:returns int: the current integer timestamp in milliseconds from the ascendex server
"""
request = {
'requestTime': self.milliseconds(),
}
response = self.v1PublicGetExchangeInfo(self.extend(request, params))
#
# {
# "code": 0,
# "data": {
# "requestTimeEcho": 1656560463601,
# "requestReceiveAt": 1656560464331,
# "latency": 730
# }
# }
#
data = self.safe_value(response, 'data')
return self.safe_integer(data, 'requestReceiveAt')
def fetch_accounts(self, params={}):
"""
fetch all the accounts associated with a profile
:param dict [params]: extra parameters specific to the ascendex api endpoint
:returns dict: a dictionary of `account structures <https://github.com/ccxt/ccxt/wiki/Manual#account-structure>` indexed by the account type
"""
accountGroup = self.safe_string(self.options, 'account-group')
response = None
if accountGroup is None:
response = self.v1PrivateGetInfo(params)
#
# {
# "code":0,
# "data":{
# "email":"[email protected]",
# "accountGroup":8,
# "viewPermission":true,
# "tradePermission":true,
# "transferPermission":true,
# "cashAccount":["cshrHKLZCjlZ2ejqkmvIHHtPmLYqdnda"],
# "marginAccount":["martXoh1v1N3EMQC5FDtSj5VHso8aI2Z"],
# "futuresAccount":["futc9r7UmFJAyBY2rE3beA2JFxav2XFF"],
# "userUID":"U6491137460"
# }
# }
#
data = self.safe_value(response, 'data', {})
accountGroup = self.safe_string(data, 'accountGroup')
self.options['account-group'] = accountGroup
return [
{
'id': accountGroup,
'type': None,
'currency': None,
'info': response,
},
]
def parse_balance(self, response):
timestamp = self.milliseconds()
result = {
'info': response,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
}
balances = self.safe_value(response, 'data', [])
for i in range(0, len(balances)):
balance = balances[i]
code = self.safe_currency_code(self.safe_string(balance, 'asset'))
account = self.account()
account['free'] = self.safe_string(balance, 'availableBalance')
account['total'] = self.safe_string(balance, 'totalBalance')
result[code] = account
return self.safe_balance(result)
def parse_margin_balance(self, response):
timestamp = self.milliseconds()
result = {
'info': response,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
}
balances = self.safe_value(response, 'data', [])
for i in range(0, len(balances)):
balance = balances[i]
code = self.safe_currency_code(self.safe_string(balance, 'asset'))
account = self.account()
account['free'] = self.safe_string(balance, 'availableBalance')
account['total'] = self.safe_string(balance, 'totalBalance')
debt = self.safe_string(balance, 'borrowed')
interest = self.safe_string(balance, 'interest')
account['debt'] = Precise.string_add(debt, interest)
result[code] = account
return self.safe_balance(result)
def parse_swap_balance(self, response):
timestamp = self.milliseconds()
result = {
'info': response,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
}
data = self.safe_value(response, 'data', {})
collaterals = self.safe_value(data, 'collaterals', [])
for i in range(0, len(collaterals)):
balance = collaterals[i]
code = self.safe_currency_code(self.safe_string(balance, 'asset'))
account = self.account()
account['total'] = self.safe_string(balance, 'balance')
result[code] = account
return self.safe_balance(result)
def fetch_balance(self, params={}):
"""
query for balance and get the amount of funds available for trading or funds locked in orders
:param dict [params]: extra parameters specific to the ascendex api endpoint
:returns dict: a `balance structure <https://docs.ccxt.com/en/latest/manual.html?#balance-structure>`
"""
self.load_markets()
self.load_accounts()
query = None
marketType = None
marketType, query = self.handle_market_type_and_params('fetchBalance', None, params)
isMargin = self.safe_value(params, 'margin', False)
marketType = 'margin' if isMargin else marketType
params = self.omit(params, 'margin')
options = self.safe_value(self.options, 'fetchBalance', {})
accountsByType = self.safe_value(self.options, 'accountsByType', {})
accountCategory = self.safe_string(accountsByType, marketType, 'cash')
account = self.safe_value(self.accounts, 0, {})
accountGroup = self.safe_string(account, 'id')
request = {
'account-group': accountGroup,
}
defaultMethod = self.safe_string(options, 'method', 'v1PrivateAccountCategoryGetBalance')
method = self.get_supported_mapping(marketType, {
'spot': defaultMethod,
'margin': defaultMethod,
'swap': 'v2PrivateAccountGroupGetFuturesPosition',
})
if (accountCategory == 'cash') or (accountCategory == 'margin'):
request['account-category'] = accountCategory
response = getattr(self, method)(self.extend(request, query))
#
# cash
#
# {
# 'code': 0,
# 'data': [
# {
# 'asset': 'BCHSV',
# 'totalBalance': '64.298000048',
# 'availableBalance': '64.298000048',
# },
# ]
# }
#
# margin
#
# {
# 'code': 0,
# 'data': [
# {
# 'asset': 'BCHSV',
# 'totalBalance': '64.298000048',
# 'availableBalance': '64.298000048',
# 'borrowed': '0',
# 'interest': '0',
# },
# ]
# }
#
# swap
#
# {
# "code": 0,
# "data": {
# "accountId": "fut2ODPhGiY71Pl4vtXnOZ00ssgD7QGn",
# "ac": "FUTURES",
# "collaterals": [
# {"asset":"ADA","balance":"0.355803","referencePrice":"1.05095","discountFactor":"0.9"},
# {"asset":"USDT","balance":"0.000014519","referencePrice":"1","discountFactor":"1"}
# ],
# }j
# }
#
if marketType == 'swap':
return self.parse_swap_balance(response)
elif marketType == 'margin':
return self.parse_margin_balance(response)
else:
return self.parse_balance(response)
def fetch_order_book(self, symbol: str, limit: Optional[int] = None, params={}):
"""
fetches information on open orders with bid(buy) and ask(sell) prices, volumes and other data
:param str symbol: unified symbol of the market to fetch the order book for
:param int [limit]: the maximum amount of order book entries to return
:param dict [params]: extra parameters specific to the ascendex api endpoint
:returns dict: A dictionary of `order book structures <https://github.com/ccxt/ccxt/wiki/Manual#order-book-structure>` indexed by market symbols
"""
self.load_markets()
market = self.market(symbol)
request = {
'symbol': market['id'],
}
response = self.v1PublicGetDepth(self.extend(request, params))
#
# {
# "code":0,
# "data":{
# "m":"depth-snapshot",
# "symbol":"BTC-PERP",
# "data":{
# "ts":1590223998202,
# "seqnum":115444921,
# "asks":[
# ["9207.5","18.2383"],
# ["9207.75","18.8235"],
# ["9208","10.7873"],
# ],
# "bids":[
# ["9207.25","0.4009"],
# ["9207","0.003"],
# ["9206.5","0.003"],
# ]
# }
# }
# }
#
data = self.safe_value(response, 'data', {})
orderbook = self.safe_value(data, 'data', {})
timestamp = self.safe_integer(orderbook, 'ts')
result = self.parse_order_book(orderbook, symbol, timestamp)
result['nonce'] = self.safe_integer(orderbook, 'seqnum')
return result
def parse_ticker(self, ticker, market=None):
#
# {
# "symbol":"QTUM/BTC",
# "open":"0.00016537",
# "close":"0.00019077",
# "high":"0.000192",
# "low":"0.00016537",
# "volume":"846.6",
# "ask":["0.00018698","26.2"],
# "bid":["0.00018408","503.7"],
# "type":"spot"
# }
#
timestamp = None
marketId = self.safe_string(ticker, 'symbol')
type = self.safe_string(ticker, 'type')
delimiter = '/' if (type == 'spot') else None
symbol = self.safe_symbol(marketId, market, delimiter)
close = self.safe_string(ticker, 'close')
bid = self.safe_value(ticker, 'bid', [])
ask = self.safe_value(ticker, 'ask', [])
open = self.safe_string(ticker, 'open')
return self.safe_ticker({
'symbol': symbol,
'timestamp': timestamp,
'datetime': None,
'high': self.safe_string(ticker, 'high'),
'low': self.safe_string(ticker, 'low'),
'bid': self.safe_string(bid, 0),
'bidVolume': self.safe_string(bid, 1),
'ask': self.safe_string(ask, 0),
'askVolume': self.safe_string(ask, 1),
'vwap': None,
'open': open,
'close': close,
'last': close,
'previousClose': None, # previous day close
'change': None,
'percentage': None,
'average': None,
'baseVolume': self.safe_string(ticker, 'volume'),
'quoteVolume': None,
'info': ticker,
}, market)
def fetch_ticker(self, symbol: str, params={}):
"""
fetches a price ticker, a statistical calculation with the information calculated over the past 24 hours for a specific market
:param str symbol: unified symbol of the market to fetch the ticker for
:param dict [params]: extra parameters specific to the ascendex api endpoint
:returns dict: a `ticker structure <https://github.com/ccxt/ccxt/wiki/Manual#ticker-structure>`
"""
self.load_markets()
market = self.market(symbol)
request = {
'symbol': market['id'],
}
response = self.v1PublicGetTicker(self.extend(request, params))
#
# {
# "code":0,
# "data":{
# "symbol":"BTC-PERP", # or "BTC/USDT"
# "open":"9073",
# "close":"9185.75",
# "high":"9185.75",
# "low":"9185.75",
# "volume":"576.8334",
# "ask":["9185.75","15.5863"],
# "bid":["9185.5","0.003"],
# "type":"derivatives", # or "spot"
# }
# }
#
data = self.safe_value(response, 'data', {})
return self.parse_ticker(data, market)
def fetch_tickers(self, symbols: Optional[List[str]] = None, params={}):
"""
fetches price tickers for multiple markets, statistical calculations with the information calculated over the past 24 hours each market
see https://ascendex.github.io/ascendex-pro-api/#ticker
see https://ascendex.github.io/ascendex-futures-pro-api-v2/#ticker
:param str[]|None symbols: unified symbols of the markets to fetch the ticker for, all market tickers are returned if not assigned
:param dict [params]: extra parameters specific to the ascendex api endpoint
:returns dict: a dictionary of `ticker structures <https://github.com/ccxt/ccxt/wiki/Manual#ticker-structure>`
"""
self.load_markets()
request = {}
market = None
if symbols is not None:
symbol = self.safe_value(symbols, 0)
market = self.market(symbol)
marketIds = self.market_ids(symbols)
request['symbol'] = ','.join(marketIds)
type = None
type, params = self.handle_market_type_and_params('fetchTickers', market, params)
response = None
if type == 'spot':
response = self.v1PublicGetTicker(self.extend(request, params))
else:
response = self.v2PublicGetFuturesTicker(self.extend(request, params))
#
# {
# "code":0,
# "data":[
# {
# "symbol":"QTUM/BTC",
# "open":"0.00016537",
# "close":"0.00019077",
# "high":"0.000192",
# "low":"0.00016537",
# "volume":"846.6",
# "ask":["0.00018698","26.2"],
# "bid":["0.00018408","503.7"],
# "type":"spot"
# }
# ]
# }
#
data = self.safe_value(response, 'data', [])
if not isinstance(data, list):
return self.parse_tickers([data], symbols)
return self.parse_tickers(data, symbols)
def parse_ohlcv(self, ohlcv, market=None):
#
# {
# "m":"bar",
# "s":"BTC/USDT",
# "data":{
# "i":"1",
# "ts":1590228000000,
# "o":"9139.59",
# "c":"9131.94",
# "h":"9139.99",
# "l":"9121.71",
# "v":"25.20648"
# }
# }
#
data = self.safe_value(ohlcv, 'data', {})
return [
self.safe_integer(data, 'ts'),
self.safe_number(data, 'o'),
self.safe_number(data, 'h'),
self.safe_number(data, 'l'),
self.safe_number(data, 'c'),
self.safe_number(data, 'v'),
]
def fetch_ohlcv(self, symbol: str, timeframe='1m', since: Optional[int] = None, limit: Optional[int] = None, params={}):
"""
fetches historical candlestick data containing the open, high, low, and close price, and the volume of a market
:param str symbol: unified symbol of the market to fetch OHLCV data for
:param str timeframe: the length of time each candle represents
:param int [since]: timestamp in ms of the earliest candle to fetch
:param int [limit]: the maximum amount of candles to fetch
:param dict [params]: extra parameters specific to the ascendex api endpoint
:returns int[][]: A list of candles ordered, open, high, low, close, volume
"""
self.load_markets()
market = self.market(symbol)
request = {
'symbol': market['id'],
'interval': self.safe_string(self.timeframes, timeframe, timeframe),
}
# if since and limit are not specified
# the exchange will return just 1 last candle by default
duration = self.parse_timeframe(timeframe)
options = self.safe_value(self.options, 'fetchOHLCV', {})
defaultLimit = self.safe_integer(options, 'limit', 500)
if since is not None:
request['from'] = since
if limit is None:
limit = defaultLimit
else:
limit = min(limit, defaultLimit)
request['to'] = self.sum(since, limit * duration * 1000, 1)
elif limit is not None:
request['n'] = limit # max 500
response = self.v1PublicGetBarhist(self.extend(request, params))
#
# {
# "code":0,
# "data":[
# {
# "m":"bar",
# "s":"BTC/USDT",
# "data":{
# "i":"1",
# "ts":1590228000000,
# "o":"9139.59",
# "c":"9131.94",
# "h":"9139.99",
# "l":"9121.71",
# "v":"25.20648"
# }
# }
# ]
# }
#
data = self.safe_value(response, 'data', [])
return self.parse_ohlcvs(data, market, timeframe, since, limit)
def parse_trade(self, trade, market=None):
#
# public fetchTrades
#
# {
# "p":"9128.5", # price
# "q":"0.0030", # quantity
# "ts":1590229002385, # timestamp
# "bm":false, # if True, the buyer is the market maker, we only use self field to "define the side" of a public trade
# "seqnum":180143985289898554
# }
#
timestamp = self.safe_integer(trade, 'ts')
priceString = self.safe_string_2(trade, 'price', 'p')
amountString = self.safe_string(trade, 'q')
buyerIsMaker = self.safe_value(trade, 'bm', False)
side = 'sell' if buyerIsMaker else 'buy'
market = self.safe_market(None, market)
return self.safe_trade({
'info': trade,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'symbol': market['symbol'],
'id': None,
'order': None,
'type': None,
'takerOrMaker': None,
'side': side,
'price': priceString,
'amount': amountString,
'cost': None,
'fee': None,
}, market)
def fetch_trades(self, symbol: str, since: Optional[int] = None, limit: Optional[int] = None, params={}):
"""
get the list of most recent trades for a particular symbol
see https://ascendex.github.io/ascendex-pro-api/#market-trades
:param str symbol: unified symbol of the market to fetch trades for
:param int [since]: timestamp in ms of the earliest trade to fetch
:param int [limit]: the maximum amount of trades to fetch
:param dict [params]: extra parameters specific to the ascendex api endpoint
:returns Trade[]: a list of `trade structures <https://docs.ccxt.com/en/latest/manual.html?#public-trades>`
"""
self.load_markets()
market = self.market(symbol)
request = {
'symbol': market['id'],
}
if limit is not None:
request['n'] = limit # max 100
response = self.v1PublicGetTrades(self.extend(request, params))
#
# {
# "code":0,
# "data":{
# "m":"trades",
# "symbol":"BTC-PERP",
# "data":[
# {"p":"9128.5","q":"0.0030","ts":1590229002385,"bm":false,"seqnum":180143985289898554},
# {"p":"9129","q":"0.0030","ts":1590229002642,"bm":false,"seqnum":180143985289898587},
# {"p":"9129.5","q":"0.0030","ts":1590229021306,"bm":false,"seqnum":180143985289899043}
# ]
# }
# }
#
records = self.safe_value(response, 'data', [])
trades = self.safe_value(records, 'data', [])
return self.parse_trades(trades, market, since, limit)
def parse_order_status(self, status):
statuses = {
'PendingNew': 'open',
'New': 'open',
'PartiallyFilled': 'open',
'Filled': 'closed',
'Canceled': 'canceled',
'Rejected': 'rejected',
}
return self.safe_string(statuses, status, status)
def parse_order(self, order, market=None):
#
# createOrder
#
# {
# "id": "16e607e2b83a8bXHbAwwoqDo55c166fa",
# "orderId": "16e85b4d9b9a8bXHbAwwoqDoc3d66830",
# "orderType": "Market",
# "symbol": "BTC/USDT",
# "timestamp": 1573576916201
# }
#
# {
# "ac": "FUTURES",
# "accountId": "fut2ODPhGiY71Pl4vtXnOZ00ssgD7QGn",
# "time": 1640819389454,
# "orderId": "a17e0874ecbdU0711043490bbtcpDU5X",
# "seqNum": -1,
# "orderType": "Limit",
# "execInst": "NULL_VAL",
# "side": "Buy",
# "symbol": "BTC-PERP",
# "price": "30000",
# "orderQty": "0.002",
# "stopPrice": "0",
# "stopBy": "ref-px",
# "status": "Ack",
# "lastExecTime": 1640819389454,
# "lastQty": "0",
# "lastPx": "0",
# "avgFilledPx": "0",
# "cumFilledQty": "0",
# "fee": "0",
# "cumFee": "0",
# "feeAsset": "",
# "errorCode": "",
# "posStopLossPrice": "0",
# "posStopLossTrigger": "market",
# "posTakeProfitPrice": "0",
# "posTakeProfitTrigger": "market",
# "liquidityInd": "n"
# }
#
# fetchOrder, fetchOpenOrders, fetchClosedOrders
#
# {
# "symbol": "BTC/USDT",
# "price": "8131.22",
# "orderQty": "0.00082",
# "orderType": "Market",
# "avgPx": "7392.02",
# "cumFee": "0.005152238",
# "cumFilledQty": "0.00082",
# "errorCode": "",
# "feeAsset": "USDT",
# "lastExecTime": 1575953151764,
# "orderId": "a16eee20b6750866943712zWEDdAjt3",
# "seqNum": 2623469,
# "side": "Buy",
# "status": "Filled",
# "stopPrice": "",
# "execInst": "NULL_VAL" # "Post"(for postOnly orders), "reduceOnly"(for reduceOnly orders)
# }
#
# {
# "orderId": "a173ad938fc3U22666567717788c3b66", # orderId
# "seqNum": 18777366360, # sequence number
# "accountId": "cshwSjbpPjSwHmxPdz2CPQVU9mnbzPpt", # accountId
# "symbol": "BTC/USDT", # symbol
# "orderType": "Limit", # order type(Limit/Market/StopMarket/StopLimit)
# "side": "Sell", # order side(Buy/Sell)
# "price": "11346.77", # order price
# "stopPrice": "0", # stop price(0 by default)
# "orderQty": "0.01", # order quantity(in base asset)
# "status": "Canceled", # order status(Filled/Canceled/Rejected)
# "createTime": 1596344995793, # order creation time
# "lastExecTime": 1596344996053, # last execution time
# "avgFillPrice": "11346.77", # average filled price
# "fillQty": "0.01", # filled quantity(in base asset)
# "fee": "-0.004992579", # cummulative fee. if negative, self value is the commission charged; if possitive, self value is the rebate received.
# "feeAsset": "USDT" # fee asset
# }
#
# {
# "ac": "FUTURES",
# "accountId": "testabcdefg",
# "avgPx": "0",
# "cumFee": "0",
# "cumQty": "0",
# "errorCode": "NULL_VAL",
# "execInst": "NULL_VAL",
# "feeAsset": "USDT",
# "lastExecTime": 1584072844085,
# "orderId": "r170d21956dd5450276356bbtcpKa74",
# "orderQty": "1.1499",
# "orderType": "Limit",
# "price": "4000",
# "sendingTime": 1584072841033,
# "seqNum": 24105338,
# "side": "Buy",
# "status": "Canceled",
# "stopPrice": "",
# "symbol": "BTC-PERP"
# },
#
status = self.parse_order_status(self.safe_string(order, 'status'))
marketId = self.safe_string(order, 'symbol')
symbol = self.safe_symbol(marketId, market, '/')
timestamp = self.safe_integer_2(order, 'timestamp', 'sendingTime')
lastTradeTimestamp = self.safe_integer(order, 'lastExecTime')
if timestamp is None:
timestamp = lastTradeTimestamp
price = self.safe_string(order, 'price')
amount = self.safe_string(order, 'orderQty')
average = self.safe_string(order, 'avgPx')
filled = self.safe_string_n(order, ['cumFilledQty', 'cumQty', 'fillQty'])
id = self.safe_string(order, 'orderId')
clientOrderId = self.safe_string(order, 'id')
if clientOrderId is not None:
if len(clientOrderId) < 1:
clientOrderId = None
rawTypeLower = self.safe_string_lower(order, 'orderType')
type = rawTypeLower
if rawTypeLower is not None:
if rawTypeLower == 'stoplimit':
type = 'limit'
if rawTypeLower == 'stopmarket':
type = 'market'
side = self.safe_string_lower(order, 'side')
feeCost = self.safe_number_2(order, 'cumFee', 'fee')
fee = None
if feeCost is not None:
feeCurrencyId = self.safe_string(order, 'feeAsset')
feeCurrencyCode = self.safe_currency_code(feeCurrencyId)
fee = {
'cost': feeCost,
'currency': feeCurrencyCode,
}
stopPrice = self.safe_number(order, 'stopPrice')
reduceOnly = None
execInst = self.safe_string(order, 'execInst')
if execInst == 'reduceOnly':
reduceOnly = True
postOnly = None
if execInst == 'Post':
postOnly = True
return self.safe_order({
'info': order,
'id': id,
'clientOrderId': clientOrderId,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'lastTradeTimestamp': lastTradeTimestamp,
'symbol': symbol,
'type': type,
'timeInForce': None,
'postOnly': postOnly,
'reduceOnly': reduceOnly,
'side': side,
'price': price,
'stopPrice': stopPrice,
'triggerPrice': stopPrice,
'amount': amount,
'cost': None,
'average': average,
'filled': filled,
'remaining': None,
'status': status,
'fee': fee,
'trades': None,
}, market)
def fetch_trading_fees(self, params={}):
"""
fetch the trading fees for multiple markets
:param dict [params]: extra parameters specific to the ascendex api endpoint
:returns dict: a dictionary of `fee structures <https://github.com/ccxt/ccxt/wiki/Manual#fee-structure>` indexed by market symbols
"""
self.load_markets()
self.load_accounts()
account = self.safe_value(self.accounts, 0, {})
accountGroup = self.safe_string(account, 'id')
request = {
'account-group': accountGroup,
}
response = self.v1PrivateAccountGroupGetSpotFee(self.extend(request, params))
#
# {
# code: '0',
# data: {
# domain: 'spot',
# userUID: 'U1479576458',
# vipLevel: '0',
# fees: [
# {symbol: 'HT/USDT', fee: {taker: '0.001', maker: '0.001'}},
# {symbol: 'LAMB/BTC', fee: {taker: '0.002', maker: '0.002'}},
# {symbol: 'STOS/USDT', fee: {taker: '0.002', maker: '0.002'}},
# ...
# ]
# }
# }
#
data = self.safe_value(response, 'data', {})
fees = self.safe_value(data, 'fees', [])
result = {}
for i in range(0, len(fees)):
fee = fees[i]
marketId = self.safe_string(fee, 'symbol')
symbol = self.safe_symbol(marketId, None, '/')
takerMaker = self.safe_value(fee, 'fee', {})
result[symbol] = {
'info': fee,
'symbol': symbol,
'maker': self.safe_number(takerMaker, 'maker'),
'taker': self.safe_number(takerMaker, 'taker'),
}
return result
def create_order(self, symbol: str, type: OrderType, side: OrderSide, amount, price=None, params={}):
"""
Create an order on the exchange
:param str symbol: Unified CCXT market symbol
:param str type: "limit" or "market"
:param str side: "buy" or "sell"
:param float amount: the amount of currency to trade
:param float [price]: *ignored in "market" orders* the price at which the order is to be fullfilled at in units of the quote currency
:param dict [params]: Extra parameters specific to the exchange API endpoint
:param str [params.timeInForce]: "GTC", "IOC", "FOK", or "PO"
:param bool [params.postOnly]: True or False
:param float [params.stopPrice]: The price at which a trigger order is triggered at
:returns: `An order structure <https://github.com/ccxt/ccxt/wiki/Manual#order-structure>`
"""
self.load_markets()
self.load_accounts()
market = self.market(symbol)
marketType = None
marketType, params = self.handle_market_type_and_params('createOrder', market, params)
options = self.safe_value(self.options, 'createOrder', {})
accountsByType = self.safe_value(self.options, 'accountsByType', {})
accountCategory = self.safe_string(accountsByType, marketType, 'cash')
account = self.safe_value(self.accounts, 0, {})
accountGroup = self.safe_value(account, 'id')
clientOrderId = self.safe_string_2(params, 'clientOrderId', 'id')
request = {
'account-group': accountGroup,
'account-category': accountCategory,
'symbol': market['id'],
'time': self.milliseconds(),
'orderQty': self.amount_to_precision(symbol, amount),
'orderType': type, # limit, market, stop_market, stop_limit
'side': side, # buy or sell,
# 'execInst': # Post for postOnly, ReduceOnly for reduceOnly
# 'respInst': 'ACK', # ACK, 'ACCEPT, DONE
}
isMarketOrder = ((type == 'market') or (type == 'stop_market'))
isLimitOrder = ((type == 'limit') or (type == 'stop_limit'))
timeInForce = self.safe_string(params, 'timeInForce')
postOnly = self.is_post_only(isMarketOrder, False, params)
reduceOnly = self.safe_value(params, 'reduceOnly', False)
stopPrice = self.safe_value_2(params, 'triggerPrice', 'stopPrice')
params = self.omit(params, ['timeInForce', 'postOnly', 'reduceOnly', 'stopPrice', 'triggerPrice'])
if reduceOnly:
if marketType != 'swap':
raise InvalidOrder(self.id + ' createOrder() does not support reduceOnly for ' + marketType + ' orders, reduceOnly orders are supported for perpetuals only')
request['execInst'] = 'ReduceOnly'
if isLimitOrder:
request['orderPrice'] = self.price_to_precision(symbol, price)
if timeInForce == 'IOC':
request['timeInForce'] = 'IOC'
if timeInForce == 'FOK':
request['timeInForce'] = 'FOK'
if postOnly:
request['postOnly'] = True
if stopPrice is not None:
request['stopPrice'] = self.price_to_precision(symbol, stopPrice)
if isLimitOrder:
request['orderType'] = 'stop_limit'
elif isMarketOrder:
request['orderType'] = 'stop_market'
if clientOrderId is not None:
request['id'] = clientOrderId
defaultMethod = self.safe_string(options, 'method', 'v1PrivateAccountCategoryPostOrder')
method = self.get_supported_mapping(marketType, {
'spot': defaultMethod,
'margin': defaultMethod,
'swap': 'v2PrivateAccountGroupPostFuturesOrder',
})
if method == 'v1PrivateAccountCategoryPostOrder':
if accountCategory is not None:
request['category'] = accountCategory
else:
request['account-category'] = accountCategory
response = getattr(self, method)(self.extend(request, params))
#
# spot
#
# {
# "code":0,
# "data": {
# "accountId":"cshwT8RKojkT1HoaA5UdeimR2SrmHG2I",
# "ac":"CASH",
# "action":"place-order",
# "status":"Ack",
# "info": {
# "symbol":"TRX/USDT",
# "orderType":"StopLimit",
# "timestamp":1654290662172,
# "id":"",
# "orderId":"a1812b6840ddU8191168955av0k6Eyhj"
# }
# }
# }
#
#
# swap
#
# {
# "code":0,
# "data": {
# "meta": {
# "id":"",
# "action":"place-order",
# "respInst":"ACK"
# },
# "order": {
# "ac":"FUTURES",
# "accountId":"futwT8RKojkT1HoaA5UdeimR2SrmHG2I",
# "time":1654290969965,
# "orderId":"a1812b6cf322U8191168955oJamfTh7b",
# "seqNum":-1,
# "orderType":"StopLimit",
# "execInst":"NULL_VAL",
# "side":"Buy",
# "symbol":"TRX-PERP",
# "price":"0.083",
# "orderQty":"1",
# "stopPrice":"0.082",
# "stopBy":"ref-px",
# "status":"Ack",
# "lastExecTime":1654290969965,
# "lastQty":"0",
# "lastPx":"0",
# "avgFilledPx":"0",
# "cumFilledQty":"0",
# "fee":"0",
# "cumFee":"0",
# "feeAsset":"",
# "errorCode":"",
# "posStopLossPrice":"0",
# "posStopLossTrigger":"market",
# "posTakeProfitPrice":"0",
# "posTakeProfitTrigger":"market",
# "liquidityInd":"n"
# }
# }
# }
#
data = self.safe_value(response, 'data', {})
order = self.safe_value_2(data, 'order', 'info', {})
return self.parse_order(order, market)
def fetch_order(self, id: str, symbol: Optional[str] = None, params={}):
"""
fetches information on an order made by the user
:param str symbol: unified symbol of the market the order was made in
:param dict [params]: extra parameters specific to the ascendex api endpoint
:returns dict: An `order structure <https://github.com/ccxt/ccxt/wiki/Manual#order-structure>`
"""
self.load_markets()
self.load_accounts()
market = None
if symbol is not None:
market = self.market(symbol)
type, query = self.handle_market_type_and_params('fetchOrder', market, params)
options = self.safe_value(self.options, 'fetchOrder', {})
accountsByType = self.safe_value(self.options, 'accountsByType', {})
accountCategory = self.safe_string(accountsByType, type, 'cash')
account = self.safe_value(self.accounts, 0, {})
accountGroup = self.safe_value(account, 'id')
request = {
'account-group': accountGroup,
'account-category': accountCategory,
'orderId': id,
}
defaultMethod = self.safe_string(options, 'method', 'v1PrivateAccountCategoryGetOrderStatus')
method = self.get_supported_mapping(type, {
'spot': defaultMethod,
'margin': defaultMethod,
'swap': 'v2PrivateAccountGroupGetFuturesOrderStatus',
})
if method == 'v1PrivateAccountCategoryGetOrderStatus':
if accountCategory is not None:
request['category'] = accountCategory
else:
request['account-category'] = accountCategory
response = getattr(self, method)(self.extend(request, query))
#
# AccountCategoryGetOrderStatus
#
# {
# "code": 0,
# "accountCategory": "CASH",
# "accountId": "cshQtyfq8XLAA9kcf19h8bXHbAwwoqDo",
# "data": [
# {
# "symbol": "BTC/USDT",
# "price": "8131.22",
# "orderQty": "0.00082",
# "orderType": "Market",
# "avgPx": "7392.02",
# "cumFee": "0.005152238",
# "cumFilledQty": "0.00082",
# "errorCode": "",
# "feeAsset": "USDT",
# "lastExecTime": 1575953151764,
# "orderId": "a16eee20b6750866943712zWEDdAjt3",
# "seqNum": 2623469,
# "side": "Buy",
# "status": "Filled",
# "stopPrice": "",
# "execInst": "NULL_VAL"
# }
# ]
# }
#
# AccountGroupGetFuturesOrderStatus
#
# {
# "code": 0,
# "accountId": "fut2ODPhGiY71Pl4vtXnOZ00ssgD7QGn",
# "ac": "FUTURES",
# "data": {
# "ac": "FUTURES",
# "accountId": "fut2ODPhGiY71Pl4vtXnOZ00ssgD7QGn",
# "time": 1640247020217,
# "orderId": "r17de65747aeU0711043490bbtcp0cmt",
# "seqNum": 28796162908,
# "orderType": "Limit",
# "execInst": "NULL_VAL",
# "side": "Buy",
# "symbol": "BTC-PERP",
# "price": "30000",
# "orderQty": "0.0021",
# "stopPrice": "0",
# "stopBy": "market",
# "status": "New",
# "lastExecTime": 1640247020232,
# "lastQty": "0",
# "lastPx": "0",
# "avgFilledPx": "0",
# "cumFilledQty": "0",
# "fee": "0",
# "cumFee": "0",
# "feeAsset": "USDT",
# "errorCode": "",
# "posStopLossPrice": "0",
# "posStopLossTrigger": "market",
# "posTakeProfitPrice": "0",
# "posTakeProfitTrigger": "market",
# "liquidityInd": "n"
# }
# }
#
data = self.safe_value(response, 'data', {})
return self.parse_order(data, market)
def fetch_open_orders(self, symbol: Optional[str] = None, since: Optional[int] = None, limit: Optional[int] = None, params={}):
"""
fetch all unfilled currently open orders
:param str symbol: unified market symbol
:param int [since]: the earliest time in ms to fetch open orders for
:param int [limit]: the maximum number of open orders structures to retrieve
:param dict [params]: extra parameters specific to the ascendex api endpoint
:returns Order[]: a list of `order structures <https://github.com/ccxt/ccxt/wiki/Manual#order-structure>`
"""
self.load_markets()
self.load_accounts()
market = None
if symbol is not None:
market = self.market(symbol)
symbol = market['symbol']
account = self.safe_value(self.accounts, 0, {})
accountGroup = self.safe_value(account, 'id')
type, query = self.handle_market_type_and_params('fetchOpenOrders', market, params)
accountsByType = self.safe_value(self.options, 'accountsByType', {})
accountCategory = self.safe_string(accountsByType, type, 'cash')
request = {
'account-group': accountGroup,
'account-category': accountCategory,
}
options = self.safe_value(self.options, 'fetchOpenOrders', {})
defaultMethod = self.safe_string(options, 'method', 'v1PrivateAccountCategoryGetOrderOpen')
method = self.get_supported_mapping(type, {
'spot': defaultMethod,
'margin': defaultMethod,
'swap': 'v2PrivateAccountGroupGetFuturesOrderOpen',
})
if method == 'v1PrivateAccountCategoryGetOrderOpen':
if accountCategory is not None:
request['category'] = accountCategory
else:
request['account-category'] = accountCategory
response = getattr(self, method)(self.extend(request, query))
#
# AccountCategoryGetOrderOpen
#
# {
# "ac": "CASH",
# "accountId": "cshQtyfq8XLAA9kcf19h8bXHbAwwoqDo",
# "code": 0,
# "data": [
# {
# "avgPx": "0", # Average filled price of the order
# "cumFee": "0", # cumulative fee paid for self order
# "cumFilledQty": "0", # cumulative filled quantity
# "errorCode": "", # error code; could be empty
# "feeAsset": "USDT", # fee asset
# "lastExecTime": 1576019723550, # The last execution time of the order
# "orderId": "s16ef21882ea0866943712034f36d83", # server provided orderId
# "orderQty": "0.0083", # order quantity
# "orderType": "Limit", # order type
# "price": "7105", # order price
# "seqNum": 8193258, # sequence number
# "side": "Buy", # order side
# "status": "New", # order status on matching engine
# "stopPrice": "", # only available for stop market and stop limit orders; otherwise empty
# "symbol": "BTC/USDT",
# "execInst": "NULL_VAL" # execution instruction
# },
# ]
# }
#
# AccountGroupGetFuturesOrderOpen
#
# {
# "code": 0,
# "data": [
# {
# "ac": "FUTURES",
# "accountId": "fut2ODPhGiY71Pl4vtXnOZ00ssgD7QGn",
# "time": 1640247020217,
# "orderId": "r17de65747aeU0711043490bbtcp0cmt",
# "seqNum": 28796162908,
# "orderType": "Limit",
# "execInst": "NULL_VAL",
# "side": "Buy",
# "symbol": "BTC-PERP",
# "price": "30000",
# "orderQty": "0.0021",
# "stopPrice": "0",
# "stopBy": "market",
# "status": "New",
# "lastExecTime": 1640247020232,
# "lastQty": "0",
# "lastPx": "0",
# "avgFilledPx": "0",
# "cumFilledQty": "0",
# "fee": "0",
# "cumFee": "0",
# "feeAsset": "USDT",
# "errorCode": "",
# "posStopLossPrice": "0",
# "posStopLossTrigger": "market",
# "posTakeProfitPrice": "0",
# "posTakeProfitTrigger": "market",
# "liquidityInd": "n"
# }
# ]
# }
#
data = self.safe_value(response, 'data', [])
if accountCategory == 'futures':
return self.parse_orders(data, market, since, limit)
# a workaround for https://github.com/ccxt/ccxt/issues/7187
orders = []
for i in range(0, len(data)):
order = self.parse_order(data[i], market)
orders.append(order)
return self.filter_by_symbol_since_limit(orders, symbol, since, limit)
def fetch_closed_orders(self, symbol: Optional[str] = None, since: Optional[int] = None, limit: Optional[int] = None, params={}):
"""
fetches information on multiple closed orders made by the user
see https://ascendex.github.io/ascendex-pro-api/#list-history-orders-v2
:param str symbol: unified market symbol of the market orders were made in
:param int [since]: the earliest time in ms to fetch orders for
:param int [limit]: the maximum number of orde structures to retrieve
:param dict [params]: extra parameters specific to the ascendex api endpoint
:param int [params.until]: the latest time in ms to fetch orders for
:returns Order[]: a list of `order structures <https://github.com/ccxt/ccxt/wiki/Manual#order-structure>`
"""
self.load_markets()
self.load_accounts()
account = self.safe_value(self.accounts, 0, {})
accountGroup = self.safe_value(account, 'id')
request = {
'account-group': accountGroup,
# 'category': accountCategory,
# 'symbol': market['id'],
# 'orderType': 'market', # optional, string
# 'side': 'buy', # or 'sell', optional, case insensitive.
# 'status': 'Filled', # "Filled", "Canceled", or "Rejected"
# 'startTime': exchange.milliseconds(),
# 'endTime': exchange.milliseconds(),
# 'page': 1,
# 'pageSize': 100,
}
market = None
if symbol is not None:
market = self.market(symbol)
request['symbol'] = market['id']
type, query = self.handle_market_type_and_params('fetchClosedOrders', market, params)
options = self.safe_value(self.options, 'fetchClosedOrders', {})
defaultMethod = self.safe_string(options, 'method', 'v2PrivateDataGetOrderHist')
method = self.get_supported_mapping(type, {
'spot': defaultMethod,
'margin': defaultMethod,
'swap': 'v2PrivateAccountGroupGetFuturesOrderHistCurrent',
})
accountsByType = self.safe_value(self.options, 'accountsByType', {})
accountCategory = self.safe_string(accountsByType, type, 'cash') # margin, futures
if method == 'v2PrivateDataGetOrderHist':
request['account'] = accountCategory
if limit is not None:
request['limit'] = limit
else:
request['account-category'] = accountCategory
if limit is not None:
request['pageSize'] = limit
if since is not None:
request['startTime'] = since
until = self.safe_string(params, 'until')
if until is not None:
request['endTime'] = until
response = getattr(self, method)(self.extend(request, query))
#
# accountCategoryGetOrderHistCurrent
#
# {
# "code":0,
# "accountId":"cshrHKLZCjlZ2ejqkmvIHHtPmLYqdnda",
# "ac":"CASH",
# "data":[
# {
# "seqNum":15561826728,
# "orderId":"a17294d305c0U6491137460bethu7kw9",
# "symbol":"ETH/USDT",
# "orderType":"Limit",
# "lastExecTime":1591635618200,
# "price":"200",
# "orderQty":"0.1",
# "side":"Buy",
# "status":"Canceled",
# "avgPx":"0",
# "cumFilledQty":"0",
# "stopPrice":"",
# "errorCode":"",
# "cumFee":"0",
# "feeAsset":"USDT",
# "execInst":"NULL_VAL"
# }
# ]
# }
#
# {
# "code": 0,
# "data": [
# {
# "orderId" : "a173ad938fc3U22666567717788c3b66", # orderId
# "seqNum" : 18777366360, # sequence number
# "accountId" : "cshwSjbpPjSwHmxPdz2CPQVU9mnbzPpt", # accountId
# "symbol" : "BTC/USDT", # symbol
# "orderType" : "Limit", # order type(Limit/Market/StopMarket/StopLimit)
# "side" : "Sell", # order side(Buy/Sell)
# "price" : "11346.77", # order price
# "stopPrice" : "0", # stop price(0 by default)
# "orderQty" : "0.01", # order quantity(in base asset)
# "status" : "Canceled", # order status(Filled/Canceled/Rejected)
# "createTime" : 1596344995793, # order creation time
# "lastExecTime": 1596344996053, # last execution time
# "avgFillPrice": "11346.77", # average filled price
# "fillQty" : "0.01", # filled quantity(in base asset)
# "fee" : "-0.004992579", # cummulative fee. if negative, self value is the commission charged; if possitive, self value is the rebate received.
# "feeAsset" : "USDT" # fee asset
# }
# ]
# }
#
# accountGroupGetFuturesOrderHistCurrent
#
# {
# "code": 0,
# "data": [
# {
# "ac": "FUTURES",
# "accountId": "fut2ODPhGiY71Pl4vtXnOZ00ssgD7QGn",
# "time": 1640245777002,
# "orderId": "r17de6444fa6U0711043490bbtcpJ2lI",
# "seqNum": 28796124902,
# "orderType": "Limit",
# "execInst": "NULL_VAL",
# "side": "Buy",
# "symbol": "BTC-PERP",
# "price": "30000",
# "orderQty": "0.0021",
# "stopPrice": "0",
# "stopBy": "market",
# "status": "Canceled",
# "lastExecTime": 1640246574886,
# "lastQty": "0",
# "lastPx": "0",
# "avgFilledPx": "0",
# "cumFilledQty": "0",
# "fee": "0",
# "cumFee": "0",
# "feeAsset": "USDT",
# "errorCode": "",
# "posStopLossPrice": "0",
# "posStopLossTrigger": "market",
# "posTakeProfitPrice": "0",
# "posTakeProfitTrigger": "market",
# "liquidityInd": "n"
# }
# ]
# }
#
data = self.safe_value(response, 'data')
isArray = isinstance(data, list)
if not isArray:
data = self.safe_value(data, 'data', [])
return self.parse_orders(data, market, since, limit)
def cancel_order(self, id: str, symbol: Optional[str] = None, params={}):
"""
cancels an open order
:param str id: order id
:param str symbol: unified symbol of the market the order was made in
:param dict [params]: extra parameters specific to the ascendex api endpoint
:returns dict: An `order structure <https://github.com/ccxt/ccxt/wiki/Manual#order-structure>`
"""
if symbol is None:
raise ArgumentsRequired(self.id + ' cancelOrder() requires a symbol argument')
self.load_markets()
self.load_accounts()
market = self.market(symbol)
type, query = self.handle_market_type_and_params('cancelOrder', market, params)
options = self.safe_value(self.options, 'cancelOrder', {})
accountsByType = self.safe_value(self.options, 'accountsByType', {})
accountCategory = self.safe_string(accountsByType, type, 'cash')
account = self.safe_value(self.accounts, 0, {})
accountGroup = self.safe_value(account, 'id')
request = {
'account-group': accountGroup,
'account-category': accountCategory,
'symbol': market['id'],
'time': self.milliseconds(),
'id': 'foobar',
}
defaultMethod = self.safe_string(options, 'method', 'v1PrivateAccountCategoryDeleteOrder')
method = self.get_supported_mapping(type, {
'spot': defaultMethod,
'margin': defaultMethod,
'swap': 'v2PrivateAccountGroupDeleteFuturesOrder',
})
if method == 'v1PrivateAccountCategoryDeleteOrder':
if accountCategory is not None:
request['category'] = accountCategory
else:
request['account-category'] = accountCategory
clientOrderId = self.safe_string_2(params, 'clientOrderId', 'id')
if clientOrderId is None:
request['orderId'] = id
else:
request['id'] = clientOrderId
params = self.omit(params, ['clientOrderId', 'id'])
response = getattr(self, method)(self.extend(request, query))
#
# AccountCategoryDeleteOrder
#
# {
# "code": 0,
# "data": {
# "accountId": "cshQtyfq8XLAA9kcf19h8bXHbAwwoqDo",
# "ac": "CASH",
# "action": "cancel-order",
# "status": "Ack",
# "info": {
# "id": "wv8QGquoeamhssvQBeHOHGQCGlcBjj23",
# "orderId": "16e6198afb4s8bXHbAwwoqDo2ebc19dc",
# "orderType": "", # could be empty
# "symbol": "ETH/USDT",
# "timestamp": 1573594877822
# }
# }
# }
#
# AccountGroupDeleteFuturesOrder
#
# {
# "code": 0,
# "data": {
# "meta": {
# "id": "foobar",
# "action": "cancel-order",
# "respInst": "ACK"
# },
# "order": {
# "ac": "FUTURES",
# "accountId": "fut2ODPhGiY71Pl4vtXnOZ00ssgD7QGn",
# "time": 1640244480476,
# "orderId": "r17de63086f4U0711043490bbtcpPUF4",
# "seqNum": 28795959269,
# "orderType": "Limit",
# "execInst": "NULL_VAL",
# "side": "Buy",
# "symbol": "BTC-PERP",
# "price": "30000",
# "orderQty": "0.0021",
# "stopPrice": "0",
# "stopBy": "market",
# "status": "New",
# "lastExecTime": 1640244480491,
# "lastQty": "0",
# "lastPx": "0",
# "avgFilledPx": "0",
# "cumFilledQty": "0",
# "fee": "0",
# "cumFee": "0",
# "feeAsset": "BTCPC",
# "errorCode": "",
# "posStopLossPrice": "0",
# "posStopLossTrigger": "market",
# "posTakeProfitPrice": "0",
# "posTakeProfitTrigger": "market",
# "liquidityInd": "n"
# }
# }
# }
#
data = self.safe_value(response, 'data', {})
order = self.safe_value_2(data, 'order', 'info', {})
return self.parse_order(order, market)
def cancel_all_orders(self, symbol: Optional[str] = None, params={}):
"""
cancel all open orders
:param str symbol: unified market symbol, only orders in the market of self symbol are cancelled when symbol is not None
:param dict [params]: extra parameters specific to the ascendex api endpoint
:returns dict[]: a list of `order structures <https://github.com/ccxt/ccxt/wiki/Manual#order-structure>`
"""
self.load_markets()
self.load_accounts()
market = None
if symbol is not None:
market = self.market(symbol)
type, query = self.handle_market_type_and_params('cancelAllOrders', market, params)
options = self.safe_value(self.options, 'cancelAllOrders', {})
accountsByType = self.safe_value(self.options, 'accountsByType', {})
accountCategory = self.safe_string(accountsByType, type, 'cash')
account = self.safe_value(self.accounts, 0, {})
accountGroup = self.safe_value(account, 'id')
request = {
'account-group': accountGroup,
'account-category': accountCategory,
'time': self.milliseconds(),
}
if symbol is not None:
request['symbol'] = market['id']
defaultMethod = self.safe_string(options, 'method', 'v1PrivateAccountCategoryDeleteOrderAll')
method = self.get_supported_mapping(type, {
'spot': defaultMethod,
'margin': defaultMethod,
'swap': 'v2PrivateAccountGroupDeleteFuturesOrderAll',
})
if method == 'v1PrivateAccountCategoryDeleteOrderAll':
if accountCategory is not None:
request['category'] = accountCategory
else:
request['account-category'] = accountCategory
response = getattr(self, method)(self.extend(request, query))
#
# AccountCategoryDeleteOrderAll
#
# {
# "code": 0,
# "data": {
# "ac": "CASH",
# "accountId": "cshQtyfq8XLAA9kcf19h8bXHbAwwoqDo",
# "action": "cancel-all",
# "info": {
# "id": "2bmYvi7lyTrneMzpcJcf2D7Pe9V1P9wy",
# "orderId": "",
# "orderType": "NULL_VAL",
# "symbol": "",
# "timestamp": 1574118495462
# },
# "status": "Ack"
# }
# }
#
# AccountGroupDeleteFuturesOrderAll
#
# {
# "code": 0,
# "data": {
# "ac": "FUTURES",
# "accountId": "fut2ODPhGiY71Pl4vtXnOZ00ssgD7QGn",
# "action": "cancel-all",
# "info": {
# "symbol":"BTC-PERP"
# }
# }
# }
#
return response
def parse_deposit_address(self, depositAddress, currency=None):
#
# {
# address: "0xe7c70b4e73b6b450ee46c3b5c0f5fb127ca55722",
# destTag: "",
# tagType: "",
# tagId: "",
# chainName: "ERC20",
# numConfirmations: 20,
# withdrawalFee: 1,
# nativeScale: 4,
# tips: []
# }
#
address = self.safe_string(depositAddress, 'address')
tagId = self.safe_string(depositAddress, 'tagId')
tag = self.safe_string(depositAddress, tagId)
self.check_address(address)
code = None if (currency is None) else currency['code']
chainName = self.safe_string(depositAddress, 'chainName')
network = self.safe_network(chainName)
return {
'currency': code,
'address': address,
'tag': tag,
'network': network,
'info': depositAddress,
}
def safe_network(self, networkId):
networksById = {
'TRC20': 'TRC20',
'ERC20': 'ERC20',
'GO20': 'GO20',
'BEP2': 'BEP2',
'BEP20(BSC)': 'BEP20',
'Bitcoin': 'BTC',
'Bitcoin ABC': 'BCH',
'Litecoin': 'LTC',
'Matic Network': 'MATIC',
'Solana': 'SOL',
'xDai': 'STAKE',
'Akash': 'AKT',
}
return self.safe_string(networksById, networkId, networkId)
def fetch_deposit_address(self, code: str, params={}):
"""
fetch the deposit address for a currency associated with self account
:param str code: unified currency code
:param dict [params]: extra parameters specific to the ascendex api endpoint
:returns dict: an `address structure <https://github.com/ccxt/ccxt/wiki/Manual#address-structure>`
"""
self.load_markets()
currency = self.currency(code)
chainName = self.safe_string(params, 'chainName')
params = self.omit(params, 'chainName')
request = {
'asset': currency['id'],
}
response = self.v1PrivateGetWalletDepositAddress(self.extend(request, params))
#
# {
# "code":0,
# "data":{
# "asset":"USDT",
# "assetName":"Tether",
# "address":[
# {
# "address":"1N22odLHXnLPCjC8kwBJPTayarr9RtPod6",
# "destTag":"",
# "tagType":"",
# "tagId":"",
# "chainName":"Omni",
# "numConfirmations":3,
# "withdrawalFee":4.7,
# "nativeScale":4,
# "tips":[]
# },
# {
# "address":"0xe7c70b4e73b6b450ee46c3b5c0f5fb127ca55722",
# "destTag":"",
# "tagType":"",
# "tagId":"",
# "chainName":"ERC20",
# "numConfirmations":20,
# "withdrawalFee":1.0,
# "nativeScale":4,
# "tips":[]
# }
# ]
# }
# }
#
data = self.safe_value(response, 'data', {})
addresses = self.safe_value(data, 'address', [])
numAddresses = len(addresses)
address = None
if numAddresses > 1:
addressesByChainName = self.index_by(addresses, 'chainName')
if chainName is None:
chainNames = list(addressesByChainName.keys())
chains = ', '.join(chainNames)
raise ArgumentsRequired(self.id + ' fetchDepositAddress() returned more than one address, a chainName parameter is required, one of ' + chains)
address = self.safe_value(addressesByChainName, chainName, {})
else:
# first address
address = self.safe_value(addresses, 0, {})
result = self.parse_deposit_address(address, currency)
return self.extend(result, {
'info': response,
})
def fetch_deposits(self, code: Optional[str] = None, since: Optional[int] = None, limit: Optional[int] = None, params={}):
"""
fetch all deposits made to an account
:param str code: unified currency code
:param int [since]: the earliest time in ms to fetch deposits for
:param int [limit]: the maximum number of deposits structures to retrieve
:param dict [params]: extra parameters specific to the ascendex api endpoint
:returns dict[]: a list of `transaction structures <https://github.com/ccxt/ccxt/wiki/Manual#transaction-structure>`
"""
request = {
'txType': 'deposit',
}
return self.fetch_transactions(code, since, limit, self.extend(request, params))
def fetch_withdrawals(self, code: Optional[str] = None, since: Optional[int] = None, limit: Optional[int] = None, params={}):
"""
fetch all withdrawals made from an account
:param str code: unified currency code
:param int [since]: the earliest time in ms to fetch withdrawals for
:param int [limit]: the maximum number of withdrawals structures to retrieve
:param dict [params]: extra parameters specific to the ascendex api endpoint
:returns dict[]: a list of `transaction structures <https://github.com/ccxt/ccxt/wiki/Manual#transaction-structure>`
"""
request = {
'txType': 'withdrawal',
}
return self.fetch_transactions(code, since, limit, self.extend(request, params))
def fetch_deposits_withdrawals(self, code: Optional[str] = None, since: Optional[int] = None, limit: Optional[int] = None, params={}):
"""
fetch history of deposits and withdrawals
:param str [code]: unified currency code for the currency of the deposit/withdrawals, default is None
:param int [since]: timestamp in ms of the earliest deposit/withdrawal, default is None
:param int [limit]: max number of deposit/withdrawals to return, default is None
:param dict [params]: extra parameters specific to the ascendex api endpoint
:returns dict: a list of `transaction structure <https://github.com/ccxt/ccxt/wiki/Manual#transaction-structure>`
"""
self.load_markets()
request = {
# 'asset': currency['id'],
# 'page': 1,
# 'pageSize': 20,
# 'startTs': self.milliseconds(),
# 'endTs': self.milliseconds(),
# 'txType': undefned, # deposit, withdrawal
}
currency = None
if code is not None:
currency = self.currency(code)
request['asset'] = currency['id']
if since is not None:
request['startTs'] = since
if limit is not None:
request['pageSize'] = limit
response = self.v1PrivateGetWalletTransactions(self.extend(request, params))
#
# {
# code: 0,
# data: {
# data: [
# {
# requestId: "wuzd1Ojsqtz4bCA3UXwtUnnJDmU8PiyB",
# time: 1591606166000,
# asset: "USDT",
# transactionType: "deposit",
# amount: "25",
# commission: "0",
# networkTransactionId: "0xbc4eabdce92f14dbcc01d799a5f8ca1f02f4a3a804b6350ea202be4d3c738fce",
# status: "pending",
# numConfirmed: 8,
# numConfirmations: 20,
# destAddress: {address: "0xe7c70b4e73b6b450ee46c3b5c0f5fb127ca55722"}
# }
# ],
# page: 1,
# pageSize: 20,
# hasNext: False
# }
# }
#
data = self.safe_value(response, 'data', {})
transactions = self.safe_value(data, 'data', [])
return self.parse_transactions(transactions, currency, since, limit)
def parse_transaction_status(self, status):
statuses = {
'reviewing': 'pending',
'pending': 'pending',
'confirmed': 'ok',
'rejected': 'rejected',
}
return self.safe_string(statuses, status, status)
def parse_transaction(self, transaction, currency=None):
#
# {
# requestId: "wuzd1Ojsqtz4bCA3UXwtUnnJDmU8PiyB",
# time: 1591606166000,
# asset: "USDT",
# transactionType: "deposit",
# amount: "25",
# commission: "0",
# networkTransactionId: "0xbc4eabdce92f14dbcc01d799a5f8ca1f02f4a3a804b6350ea202be4d3c738fce",
# status: "pending",
# numConfirmed: 8,
# numConfirmations: 20,
# destAddress: {
# address: "0xe7c70b4e73b6b450ee46c3b5c0f5fb127ca55722",
# destTag: "..." # for currencies that have it
# }
# }
#
destAddress = self.safe_value(transaction, 'destAddress', {})
address = self.safe_string(destAddress, 'address')
tag = self.safe_string(destAddress, 'destTag')
timestamp = self.safe_integer(transaction, 'time')
currencyId = self.safe_string(transaction, 'asset')
amountString = self.safe_string(transaction, 'amount')
feeCostString = self.safe_string(transaction, 'commission')
amountString = Precise.string_sub(amountString, feeCostString)
code = self.safe_currency_code(currencyId, currency)
return {
'info': transaction,
'id': self.safe_string(transaction, 'requestId'),
'txid': self.safe_string(transaction, 'networkTransactionId'),
'type': self.safe_string(transaction, 'transactionType'),
'currency': code,
'network': None,
'amount': self.parse_number(amountString),
'status': self.parse_transaction_status(self.safe_string(transaction, 'status')),
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'address': address,
'addressFrom': None,
'addressTo': address,
'tag': tag,
'tagFrom': None,
'tagTo': tag,
'updated': None,
'comment': None,
'fee': {
'currency': code,
'cost': self.parse_number(feeCostString),
'rate': None,
},
}
def fetch_positions(self, symbols: Optional[List[str]] = None, params={}):
"""
fetch all open positions
:param str[]|None symbols: list of unified market symbols
:param dict [params]: extra parameters specific to the ascendex api endpoint
:returns dict[]: a list of `position structure <https://github.com/ccxt/ccxt/wiki/Manual#position-structure>`
"""
self.load_markets()
self.load_accounts()
account = self.safe_value(self.accounts, 0, {})
accountGroup = self.safe_string(account, 'id')
request = {
'account-group': accountGroup,
}
response = self.v2PrivateAccountGroupGetFuturesPosition(self.extend(request, params))
#
# {
# "code": 0,
# "data": {
# "accountId": "fut2ODPhGiY71Pl4vtXnOZ00ssgD7QGn",
# "ac": "FUTURES",
# "collaterals": [
# {
# "asset": "USDT",
# "balance": "44.570287262",
# "referencePrice": "1",
# "discountFactor": "1"
# }
# ],
# "contracts": [
# {
# "symbol": "BTC-PERP",
# "side": "LONG",
# "position": "0.0001",
# "referenceCost": "-3.12277254",
# "unrealizedPnl": "-0.001700233",
# "realizedPnl": "0",
# "avgOpenPrice": "31209",
# "marginType": "isolated",
# "isolatedMargin": "1.654972977",
# "leverage": "2",
# "takeProfitPrice": "0",
# "takeProfitTrigger": "market",
# "stopLossPrice": "0",
# "stopLossTrigger": "market",
# "buyOpenOrderNotional": "0",
# "sellOpenOrderNotional": "0",
# "markPrice": "31210.723063672",
# "indexPrice": "31223.148857925"
# },
# ]
# }
# }
#
data = self.safe_value(response, 'data', {})
position = self.safe_value(data, 'contracts', [])
result = []
for i in range(0, len(position)):
result.append(self.parse_position(position[i]))
symbols = self.market_symbols(symbols)
return self.filter_by_array_positions(result, 'symbol', symbols, False)
def parse_position(self, position, market=None):
#
# {
# "symbol": "BTC-PERP",
# "side": "LONG",
# "position": "0.0001",
# "referenceCost": "-3.12277254",
# "unrealizedPnl": "-0.001700233",
# "realizedPnl": "0",
# "avgOpenPrice": "31209",
# "marginType": "isolated",
# "isolatedMargin": "1.654972977",
# "leverage": "2",
# "takeProfitPrice": "0",
# "takeProfitTrigger": "market",
# "stopLossPrice": "0",
# "stopLossTrigger": "market",
# "buyOpenOrderNotional": "0",
# "sellOpenOrderNotional": "0",
# "markPrice": "31210.723063672",
# "indexPrice": "31223.148857925"
# },
#
marketId = self.safe_string(position, 'symbol')
market = self.safe_market(marketId, market)
notional = self.safe_string(position, 'buyOpenOrderNotional')
if Precise.string_eq(notional, '0'):
notional = self.safe_string(position, 'sellOpenOrderNotional')
marginMode = self.safe_string(position, 'marginType')
collateral = None
if marginMode == 'isolated':
collateral = self.safe_string(position, 'isolatedMargin')
return self.safe_position({
'info': position,
'id': None,
'symbol': market['symbol'],
'notional': self.parse_number(notional),
'marginMode': marginMode,
'liquidationPrice': None,
'entryPrice': self.safe_number(position, 'avgOpenPrice'),
'unrealizedPnl': self.safe_number(position, 'unrealizedPnl'),
'percentage': None,
'contracts': self.safe_number(position, 'position'),
'contractSize': self.safe_number(market, 'contractSize'),
'markPrice': self.safe_number(position, 'markPrice'),
'lastPrice': None,
'side': self.safe_string_lower(position, 'side'),
'hedged': None,
'timestamp': None,
'datetime': None,
'lastUpdateTimestamp': None,
'maintenanceMargin': None,
'maintenanceMarginPercentage': None,
'collateral': collateral,
'initialMargin': None,
'initialMarginPercentage': None,
'leverage': self.safe_integer(position, 'leverage'),
'marginRatio': None,
'stopLossPrice': self.safe_number(position, 'stopLossPrice'),
'takeProfitPrice': self.safe_number(position, 'takeProfitPrice'),
})
def parse_funding_rate(self, contract, market=None):
#
# {
# "time": 1640061364830,
# "symbol": "EOS-PERP",
# "markPrice": "3.353854865",
# "indexPrice": "3.3542",
# "openInterest": "14242",
# "fundingRate": "-0.000073026",
# "nextFundingTime": 1640073600000
# }
#
marketId = self.safe_string(contract, 'symbol')
symbol = self.safe_symbol(marketId, market)
currentTime = self.safe_integer(contract, 'time')
nextFundingRate = self.safe_number(contract, 'fundingRate')
nextFundingRateTimestamp = self.safe_integer(contract, 'nextFundingTime')
return {
'info': contract,
'symbol': symbol,
'markPrice': self.safe_number(contract, 'markPrice'),
'indexPrice': self.safe_number(contract, 'indexPrice'),
'interestRate': self.parse_number('0'),
'estimatedSettlePrice': None,
'timestamp': currentTime,
'datetime': self.iso8601(currentTime),
'previousFundingRate': None,
'nextFundingRate': None,
'previousFundingTimestamp': None,
'nextFundingTimestamp': None,
'previousFundingDatetime': None,
'nextFundingDatetime': None,
'fundingRate': nextFundingRate,
'fundingTimestamp': nextFundingRateTimestamp,
'fundingDatetime': self.iso8601(nextFundingRateTimestamp),
}
def fetch_funding_rates(self, symbols: Optional[List[str]] = None, params={}):
"""
fetch the funding rate for multiple markets
:param str[]|None symbols: list of unified market symbols
:param dict [params]: extra parameters specific to the ascendex api endpoint
:returns dict: a dictionary of `funding rates structures <https://github.com/ccxt/ccxt/wiki/Manual#funding-rates-structure>`, indexe by market symbols
"""
self.load_markets()
symbols = self.market_symbols(symbols)
response = self.v2PublicGetFuturesPricingData(params)
#
# {
# "code": 0,
# "data": {
# "contracts": [
# {
# "time": 1640061364830,
# "symbol": "EOS-PERP",
# "markPrice": "3.353854865",
# "indexPrice": "3.3542",
# "openInterest": "14242",
# "fundingRate": "-0.000073026",
# "nextFundingTime": 1640073600000
# },
# ],
# "collaterals": [
# {
# "asset": "USDTR",
# "referencePrice": "1"
# },
# ]
# }
# }
#
data = self.safe_value(response, 'data', {})
contracts = self.safe_value(data, 'contracts', [])
result = self.parse_funding_rates(contracts)
return self.filter_by_array(result, 'symbol', symbols)
def modify_margin_helper(self, symbol: str, amount, type, params={}):
self.load_markets()
self.load_accounts()
market = self.market(symbol)
account = self.safe_value(self.accounts, 0, {})
accountGroup = self.safe_string(account, 'id')
amount = self.amount_to_precision(symbol, amount)
request = {
'account-group': accountGroup,
'symbol': market['id'],
'amount': amount, # positive value for adding margin, negative for reducing
}
response = self.v2PrivateAccountGroupPostFuturesIsolatedPositionMargin(self.extend(request, params))
#
# Can only change margin for perpetual futures isolated margin positions
#
# {
# "code": 0
# }
#
if type == 'reduce':
amount = Precise.string_abs(amount)
return self.extend(self.parse_margin_modification(response, market), {
'amount': self.parse_number(amount),
'type': type,
})
def parse_margin_modification(self, data, market=None):
errorCode = self.safe_string(data, 'code')
status = 'ok' if (errorCode == '0') else 'failed'
return {
'info': data,
'type': None,
'amount': None,
'code': market['quote'],
'symbol': market['symbol'],
'status': status,
}
def reduce_margin(self, symbol: str, amount, params={}):
"""
remove margin from a position
:param str symbol: unified market symbol
:param float amount: the amount of margin to remove
:param dict [params]: extra parameters specific to the ascendex api endpoint
:returns dict: a `margin structure <https://github.com/ccxt/ccxt/wiki/Manual#reduce-margin-structure>`
"""
return self.modify_margin_helper(symbol, amount, 'reduce', params)
def add_margin(self, symbol: str, amount, params={}):
"""
add margin
:param str symbol: unified market symbol
:param float amount: amount of margin to add
:param dict [params]: extra parameters specific to the ascendex api endpoint
:returns dict: a `margin structure <https://github.com/ccxt/ccxt/wiki/Manual#add-margin-structure>`
"""
return self.modify_margin_helper(symbol, amount, 'add', params)
def set_leverage(self, leverage, symbol: Optional[str] = None, params={}):
"""
set the level of leverage for a market
:param float leverage: the rate of leverage
:param str symbol: unified market symbol
:param dict [params]: extra parameters specific to the ascendex api endpoint
:returns dict: response from the exchange
"""
if symbol is None:
raise ArgumentsRequired(self.id + ' setLeverage() requires a symbol argument')
if (leverage < 1) or (leverage > 100):
raise BadRequest(self.id + ' leverage should be between 1 and 100')
self.load_markets()
self.load_accounts()
market = self.market(symbol)
if market['type'] != 'future':
raise BadSymbol(self.id + ' setLeverage() supports futures contracts only')
account = self.safe_value(self.accounts, 0, {})
accountGroup = self.safe_string(account, 'id')
request = {
'account-group': accountGroup,
'symbol': market['id'],
'leverage': leverage,
}
return self.v2PrivateAccountGroupPostFuturesLeverage(self.extend(request, params))
def set_margin_mode(self, marginMode, symbol: Optional[str] = None, params={}):
"""
set margin mode to 'cross' or 'isolated'
:param str marginMode: 'cross' or 'isolated'
:param str symbol: unified market symbol
:param dict [params]: extra parameters specific to the ascendex api endpoint
:returns dict: response from the exchange
"""
marginMode = marginMode.lower()
if marginMode == 'cross':
marginMode = 'crossed'
if marginMode != 'isolated' and marginMode != 'crossed':
raise BadRequest(self.id + ' setMarginMode() marginMode argument should be isolated or cross')
self.load_markets()
self.load_accounts()
market = self.market(symbol)
account = self.safe_value(self.accounts, 0, {})
accountGroup = self.safe_string(account, 'id')
request = {
'account-group': accountGroup,
'symbol': market['id'],
'marginMode': marginMode,
}
if market['type'] != 'future':
raise BadSymbol(self.id + ' setMarginMode() supports futures contracts only')
return self.v2PrivateAccountGroupPostFuturesMarginType(self.extend(request, params))
def fetch_leverage_tiers(self, symbols: Optional[List[str]] = None, params={}):
"""
retrieve information on the maximum leverage, and maintenance margin for trades of varying trade sizes
:param str[]|None symbols: list of unified market symbols
:param dict [params]: extra parameters specific to the ascendex api endpoint
:returns dict: a dictionary of `leverage tiers structures <https://github.com/ccxt/ccxt/wiki/Manual#leverage-tiers-structure>`, indexed by market symbols
"""
self.load_markets()
response = self.v2PublicGetFuturesContract(params)
#
# {
# "code":0,
# "data":[
# {
# "symbol":"BTC-PERP",
# "status":"Normal",
# "displayName":"BTCUSDT",
# "settlementAsset":"USDT",
# "underlying":"BTC/USDT",
# "tradingStartTime":1579701600000,
# "priceFilter":{"minPrice":"1","maxPrice":"1000000","tickSize":"1"},
# "lotSizeFilter":{"minQty":"0.0001","maxQty":"1000000000","lotSize":"0.0001"},
# "commissionType":"Quote",
# "commissionReserveRate":"0.001",
# "marketOrderPriceMarkup":"0.03",
# "marginRequirements":[
# {"positionNotionalLowerBound":"0","positionNotionalUpperBound":"50000","initialMarginRate":"0.01","maintenanceMarginRate":"0.006"},
# {"positionNotionalLowerBound":"50000","positionNotionalUpperBound":"200000","initialMarginRate":"0.02","maintenanceMarginRate":"0.012"},
# {"positionNotionalLowerBound":"200000","positionNotionalUpperBound":"2000000","initialMarginRate":"0.04","maintenanceMarginRate":"0.024"},
# {"positionNotionalLowerBound":"2000000","positionNotionalUpperBound":"20000000","initialMarginRate":"0.1","maintenanceMarginRate":"0.06"},
# {"positionNotionalLowerBound":"20000000","positionNotionalUpperBound":"40000000","initialMarginRate":"0.2","maintenanceMarginRate":"0.12"},
# {"positionNotionalLowerBound":"40000000","positionNotionalUpperBound":"1000000000","initialMarginRate":"0.333333","maintenanceMarginRate":"0.2"}
# ]
# }
# ]
# }
#
data = self.safe_value(response, 'data')
symbols = self.market_symbols(symbols)
return self.parse_leverage_tiers(data, symbols, 'symbol')
def parse_market_leverage_tiers(self, info, market=None):
"""
:param dict info: Exchange market response for 1 market
:param dict market: CCXT market
"""
#
# {
# "symbol":"BTC-PERP",
# "status":"Normal",
# "displayName":"BTCUSDT",
# "settlementAsset":"USDT",
# "underlying":"BTC/USDT",
# "tradingStartTime":1579701600000,
# "priceFilter":{"minPrice":"1","maxPrice":"1000000","tickSize":"1"},
# "lotSizeFilter":{"minQty":"0.0001","maxQty":"1000000000","lotSize":"0.0001"},
# "commissionType":"Quote",
# "commissionReserveRate":"0.001",
# "marketOrderPriceMarkup":"0.03",
# "marginRequirements":[
# {"positionNotionalLowerBound":"0","positionNotionalUpperBound":"50000","initialMarginRate":"0.01","maintenanceMarginRate":"0.006"},
# {"positionNotionalLowerBound":"50000","positionNotionalUpperBound":"200000","initialMarginRate":"0.02","maintenanceMarginRate":"0.012"},
# {"positionNotionalLowerBound":"200000","positionNotionalUpperBound":"2000000","initialMarginRate":"0.04","maintenanceMarginRate":"0.024"},
# {"positionNotionalLowerBound":"2000000","positionNotionalUpperBound":"20000000","initialMarginRate":"0.1","maintenanceMarginRate":"0.06"},
# {"positionNotionalLowerBound":"20000000","positionNotionalUpperBound":"40000000","initialMarginRate":"0.2","maintenanceMarginRate":"0.12"},
# {"positionNotionalLowerBound":"40000000","positionNotionalUpperBound":"1000000000","initialMarginRate":"0.333333","maintenanceMarginRate":"0.2"}
# ]
# }
#
marginRequirements = self.safe_value(info, 'marginRequirements', [])
id = self.safe_string(info, 'symbol')
market = self.safe_market(id, market)
tiers = []
for i in range(0, len(marginRequirements)):
tier = marginRequirements[i]
initialMarginRate = self.safe_string(tier, 'initialMarginRate')
tiers.append({
'tier': self.sum(i, 1),
'currency': market['quote'],
'minNotional': self.safe_number(tier, 'positionNotionalLowerBound'),
'maxNotional': self.safe_number(tier, 'positionNotionalUpperBound'),
'maintenanceMarginRate': self.safe_number(tier, 'maintenanceMarginRate'),
'maxLeverage': self.parse_number(Precise.string_div('1', initialMarginRate)),
'info': tier,
})
return tiers
def parse_deposit_withdraw_fee(self, fee, currency=None):
#
# {
# "assetCode": "USDT",
# "assetName": "Tether",
# "precisionScale": 9,
# "nativeScale": 4,
# "blockChain": [
# {
# "chainName": "Omni",
# "withdrawFee": "30.0",
# "allowDeposit": True,
# "allowWithdraw": True,
# "minDepositAmt": "0.0",
# "minWithdrawal": "50.0",
# "numConfirmations": 3
# },
# ]
# }
#
blockChains = self.safe_value(fee, 'blockChain', [])
blockChainsLength = len(blockChains)
result = {
'info': fee,
'withdraw': {
'fee': None,
'percentage': None,
},
'deposit': {
'fee': None,
'percentage': None,
},
'networks': {},
}
for i in range(0, blockChainsLength):
blockChain = blockChains[i]
networkId = self.safe_string(blockChain, 'chainName')
currencyCode = self.safe_string(currency, 'code')
networkCode = self.network_id_to_code(networkId, currencyCode)
result['networks'][networkCode] = {
'deposit': {'fee': None, 'percentage': None},
'withdraw': {'fee': self.safe_number(blockChain, 'withdrawFee'), 'percentage': False},
}
if blockChainsLength == 1:
result['withdraw']['fee'] = self.safe_number(blockChain, 'withdrawFee')
result['withdraw']['percentage'] = False
return result
def fetch_deposit_withdraw_fees(self, codes: Optional[List[str]] = None, params={}):
"""
fetch deposit and withdraw fees
see https://ascendex.github.io/ascendex-pro-api/#list-all-assets
:param str[]|None codes: list of unified currency codes
:param dict [params]: extra parameters specific to the ascendex api endpoint
:returns dict: a list of `fee structures <https://docs.ccxt.com/en/latest/manual.html#fee-structure>`
"""
self.load_markets()
response = self.v2PublicGetAssets(params)
data = self.safe_value(response, 'data')
return self.parse_deposit_withdraw_fees(data, codes, 'assetCode')
def transfer(self, code: str, amount, fromAccount, toAccount, params={}):
"""
transfer currency internally between wallets on the same account
:param str code: unified currency code
:param float amount: amount to transfer
:param str fromAccount: account to transfer from
:param str toAccount: account to transfer to
:param dict [params]: extra parameters specific to the ascendex api endpoint
:returns dict: a `transfer structure <https://github.com/ccxt/ccxt/wiki/Manual#transfer-structure>`
"""
self.load_markets()
self.load_accounts()
account = self.safe_value(self.accounts, 0, {})
accountGroup = self.safe_string(account, 'id')
currency = self.currency(code)
amount = self.currency_to_precision(code, amount)
accountsByType = self.safe_value(self.options, 'accountsByType', {})
fromId = self.safe_string(accountsByType, fromAccount, fromAccount)
toId = self.safe_string(accountsByType, toAccount, toAccount)
if fromId != 'cash' and toId != 'cash':
raise ExchangeError(self.id + ' transfer() only supports direct balance transfer between spot and future, spot and margin')
request = {
'account-group': accountGroup,
'amount': amount,
'asset': currency['id'],
'fromAccount': fromId,
'toAccount': toId,
}
response = self.v1PrivateAccountGroupPostTransfer(self.extend(request, params))
#
# {code: '0'}
#
transferOptions = self.safe_value(self.options, 'transfer', {})
fillResponseFromRequest = self.safe_value(transferOptions, 'fillResponseFromRequest', True)
transfer = self.parse_transfer(response, currency)
if fillResponseFromRequest:
transfer['fromAccount'] = fromAccount
transfer['toAccount'] = toAccount
transfer['amount'] = amount
transfer['currency'] = code
return transfer
def parse_transfer(self, transfer, currency=None):
#
# {code: '0'}
#
status = self.safe_integer(transfer, 'code')
currencyCode = self.safe_currency_code(None, currency)
timestamp = self.milliseconds()
return {
'info': transfer,
'id': None,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'currency': currencyCode,
'amount': None,
'fromAccount': None,
'toAccount': None,
'status': self.parse_transfer_status(status),
}
def parse_transfer_status(self, status):
if status == 0:
return 'ok'
return 'failed'
def sign(self, path, api='public', method='GET', params={}, headers=None, body=None):
version = api[0]
access = api[1]
type = self.safe_string(api, 2)
url = ''
accountCategory = (type == 'accountCategory')
if accountCategory or (type == 'accountGroup'):
url += self.implode_params('/{account-group}', params)
params = self.omit(params, 'account-group')
request = self.implode_params(path, params)
url += '/api/pro/'
if version == 'v2':
if type == 'data':
request = 'data/' + version + '/' + request
else:
request = version + '/' + request
else:
url += version + '/'
if accountCategory:
url += self.implode_params('{account-category}/', params)
params = self.omit(params, 'account-category')
url += request
if (version == 'v1') and (request == 'cash/balance') or (request == 'margin/balance'):
request = 'balance'
if (version == 'v1') and (request == 'spot/fee'):
request = 'fee'
if request.find('subuser') >= 0:
parts = request.split('/')
request = parts[2]
params = self.omit(params, self.extract_params(path))
if access == 'public':
if params:
url += '?' + self.urlencode(params)
else:
self.check_required_credentials()
timestamp = str(self.milliseconds())
payload = timestamp + '+' + request
hmac = self.hmac(self.encode(payload), self.encode(self.secret), hashlib.sha256, 'base64')
headers = {
'x-auth-key': self.apiKey,
'x-auth-timestamp': timestamp,
'x-auth-signature': hmac,
}
if method == 'GET':
if params:
url += '?' + self.urlencode(params)
else:
headers['Content-Type'] = 'application/json'
body = self.json(params)
url = self.urls['api']['rest'] + url
return {'url': url, 'method': method, 'body': body, 'headers': headers}
def handle_errors(self, httpCode, reason, url, method, headers, body, response, requestHeaders, requestBody):
if response is None:
return None # fallback to default error handler
#
# {'code': 6010, 'message': 'Not enough balance.'}
# {'code': 60060, 'message': 'The order is already filled or canceled.'}
# {"code":2100,"message":"ApiKeyFailure"}
# {"code":300001,"message":"Price is too low from market price.","reason":"INVALID_PRICE","accountId":"cshrHKLZCjlZ2ejqkmvIHHtPmLYqdnda","ac":"CASH","action":"place-order","status":"Err","info":{"symbol":"BTC/USDT"}}
#
code = self.safe_string(response, 'code')
message = self.safe_string(response, 'message')
error = (code is not None) and (code != '0')
if error or (message is not None):
feedback = self.id + ' ' + body
self.throw_exactly_matched_exception(self.exceptions['exact'], code, feedback)
self.throw_exactly_matched_exception(self.exceptions['exact'], message, feedback)
self.throw_broadly_matched_exception(self.exceptions['broad'], message, feedback)
raise ExchangeError(feedback) # unknown message
return None
| [
"[email protected]"
] | |
bf73bd5eda0d1303716e539c0d40f57d6ab13de8 | 22fe6ed51715486ebbc09e404504ed4d7a28c37d | /python-katas/57_CountHi.py | 6ef69b2da8a4251f4d619f0a62ab8c3d5042d32a | [] | no_license | Jethet/Practice-more | 1dd3ff19dcb3342a543ea1553a1a6fb0264b9c38 | 8488a679730e3406329ef30b4f438d41dd3167d6 | refs/heads/master | 2023-01-28T14:51:39.283741 | 2023-01-06T10:14:41 | 2023-01-06T10:14:41 | 160,946,017 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 378 | py | # Return the number of times the string 'hi' appears in a given string.
def count_hi(str):
return str.count('hi')
# CodingBat solution:
def count_hi(str):
sum = 0
for i in range(len(str)-1):
if str[i:i+2] == 'hi':
sum += 1 #or: sum = sum + 1
return sum
print(count_hi('abc hi ho'))
print(count_hi('ABChi hi'))
print(count_hi('hihi'))
| [
"[email protected]"
] | |
13ab0721b3a33f3abbaaf46d0378e8b4649ba27f | d1f15554df2d5c0f74ddbcba6e870359841f682b | /wagtail/migrations/0057_page_locale_fields_notnull.py | 8f18589b5c9f794cba254c26312dd2d73645c5f1 | [
"BSD-3-Clause",
"LicenseRef-scancode-proprietary-license"
] | permissive | wagtail/wagtail | bd405f89b86e0c625fef0685fd6bfba41cf5cbfc | 06a7bc6124bf62675c09fbe0a4ed9bbac183e025 | refs/heads/main | 2023-09-04T06:22:51.601208 | 2023-09-01T15:22:00 | 2023-09-01T15:22:00 | 16,479,108 | 12,974 | 3,580 | BSD-3-Clause | 2023-09-14T10:45:04 | 2014-02-03T12:41:59 | Python | UTF-8 | Python | false | false | 793 | py | # Generated by Django 2.2.10 on 2020-07-13 10:17
from django.db import migrations, models
import django.db.models.deletion
import uuid
class Migration(migrations.Migration):
dependencies = [
("wagtailcore", "0056_page_locale_fields_populate"),
]
operations = [
migrations.AlterField(
model_name="page",
name="locale",
field=models.ForeignKey(
editable=False,
on_delete=django.db.models.deletion.PROTECT,
related_name="+",
to="wagtailcore.Locale",
),
),
migrations.AlterField(
model_name="page",
name="translation_key",
field=models.UUIDField(default=uuid.uuid4, editable=False),
),
]
| [
"[email protected]"
] | |
83852e477286aff2176a0246871748edca6bcef8 | c733e6b433914a8faba256c7853f5cf2cd39c62a | /Python/Leetcode Daily Practice/Heap/692. Top K Frequent Words.py | db9a25d3ab733cd3cdd4dd640983c8602e54fffe | [] | no_license | YaqianQi/Algorithm-and-Data-Structure | 3016bebcc1f1356b6e5f3c3e588f3d46c276a805 | 2e1751263f484709102f7f2caf18776a004c8230 | refs/heads/master | 2021-10-27T16:29:18.409235 | 2021-10-14T13:57:36 | 2021-10-14T13:57:36 | 178,946,803 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 385 | py | class Solution(object):
def topKFrequent(self, words, k):
from collections import Counter
import heapq
cnt = Counter(words) # o(n)
h = [(-freq, key) for key, freq in cnt.items()] # o(n)
return [heapq.heappop(h)[1] for i in range(k)] # o (k * logn)
print(Solution().topKFrequent(words=["i", "love", "leetcode", "i", "love", "coding"], k = 2)) | [
"[email protected]"
] | |
55b52764902ce153ec4c19dc6da9439dee543669 | 9a0eb3e292d57b59198c7c66a994372ced9cfa5b | /nodes/1.x/python/String.ReplaceIllegalFilenameCharacters.py | a922b676f1485306810fd884001c9016638051ed | [
"MIT"
] | permissive | andydandy74/ClockworkForDynamo | 544ddf0893f5c0072fca7934f4e128001771f767 | 528400c667c4c3f2b51814af84e85c8fab8a8059 | refs/heads/master | 2023-08-19T03:07:33.489926 | 2023-08-13T04:31:17 | 2023-08-13T04:31:17 | 15,043,988 | 184 | 100 | MIT | 2023-09-04T18:47:40 | 2013-12-09T10:11:01 | Python | UTF-8 | Python | false | false | 430 | py | strings = IN[0]
replace = IN[1]
strlist = []
for str in strings:
str = str.replace('/', replace)
str = str.replace('?', replace)
str = str.replace('<', replace)
str = str.replace('>', replace)
str = str.replace('\\', replace)
str = str.replace(':', replace)
str = str.replace('*', replace)
str = str.replace('|', replace)
str = str.replace('"', replace)
str = str.replace('^', replace)
strlist.append(str)
OUT = strlist | [
"[email protected]"
] | |
bdbe6ea6340e8f88e7e25d5b64882ed8bd313b28 | 488825f206180a276a4dbf61ed85227d6eb791cf | /src/config/asgi.py | 0b57e107471310680a0c77fa63612bdaeef0f579 | [
"Unlicense"
] | permissive | MTES-MCT/trackdechets-cockpit | 53acf96e79bcdb2a834f2c28114bbb1866a766e6 | 3624caf22882bd499dc7b22900e297adc6ca62d3 | refs/heads/main | 2023-08-04T10:15:17.613615 | 2022-01-03T22:16:07 | 2022-01-03T22:16:07 | 441,255,835 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 391 | py | """
ASGI config for cockpit project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/4.0/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "cockpit.settings")
application = get_asgi_application()
| [
"[email protected]"
] | |
48b15be505f68c01bcbe37105ce08e8d80a90959 | 93b704572dd4f36ae488f931fbe8372a215b13ad | /clean_solutions/day3.py | d56d9cf4949958790cfde7211c768208ff456079 | [] | no_license | Goldenlion5648/AdventOfCode2020Live | 7cfdf6804402fdf42d10c70742579522c487f501 | e3f5908e8747991b50bdde339ad9ecba527b1168 | refs/heads/master | 2023-04-04T12:48:21.318124 | 2021-04-08T16:42:13 | 2021-04-08T16:42:13 | 317,414,264 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 736 | py |
'''
Right 1, down 1.
Right 3, down 1. (This is the slope you already checked.)
Right 5, down 1.
Right 7, down 1.
Right 1, down 2.
'''
from collections import *
with open("input3.txt") as f:
# a = list(map(int,f.read().strip().split("\n")))
board = f.read().strip().split("\n")
def slide(xChange, yChange):
posX = 0
posY = 0
count = 0
while posY < len(board):
if board[posY][posX] == "#":
count += 1
posX = (posX + 3) % len(board[0])
posY += 1
return count
print("part 1", slide(3, 1))
#part 2
slopes = [(1, 1), (3, 1), (5, 1), (7, 1), (1, 2)]
nums = []
for x, y in slopes:
nums.append(slide(x, y))
answer = 1
for i in nums:
answer *= i
print("part 2", answer) | [
"[email protected]"
] | |
eabc327817af3553828fe0ffc5f9a44d5e5d1951 | 52b5773617a1b972a905de4d692540d26ff74926 | /.history/mushroomPicker_20200729130815.py | f6b760f3a1112376d639f98641c3cd38b7ba4176 | [] | no_license | MaryanneNjeri/pythonModules | 56f54bf098ae58ea069bf33f11ae94fa8eedcabc | f4e56b1e4dda2349267af634a46f6b9df6686020 | refs/heads/master | 2022-12-16T02:59:19.896129 | 2020-09-11T12:05:22 | 2020-09-11T12:05:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,178 | py | '''
You are given a non-empty, zero-indexed array A of n (1 � n � 100 000) integers
a0, a1, . . . , an−1 (0 � ai � 1 000). This array represents number of mushrooms growing on the
consecutive spots along a road. You are also given integers k and m (0 � k, m < n).
A mushroom picker is at spot number k on the road and should perform m moves. In
one move she moves to an adjacent spot. She collects all the mushrooms growing on spots
she visits. The goal is to calculate the maximum number of mushrooms that the mushroom
picker can collect in m moves.
For example, consider array A such that:
'''
def count_totals(p,x,y):
return p[y+1]
def mushroom(A,k,m):
# A - is the array
# k- is there position -4
# m - number of moves they can make -6
n = len(A)
result = 0
pref = [0] * n
pref[0] = A[0]
for i in range(1,n):
pref[i] = pref[i-1] + A[i]
for p in range(min(m,k) + 1):
# p----> 0,1,2,3,4
# k ----> 4 ,k-p ->4,3,2,1,0
left_pos = k-p
right_pos = min(n-1,max(k,k+m-2 *p))
print('right',right_pos)
# print(left_pos)
mushroom([2,3,7,5,1,3,9],4,6) | [
"[email protected]"
] | |
ad5d5361d58d186ea6682f1b01c9158b0e151206 | 1255b4c76aa2def0d8ca07ff75ef264383de36e3 | /main.py | 8b482da2a9acf7567e56eec60c0e4c881703abac | [] | no_license | thepixelboy/flask-auth | bcbe2ce182e54743acfa70860f975b059952c65c | e49903b65c9451891b61138e1b5453ea29f733d1 | refs/heads/main | 2023-07-10T00:41:46.442728 | 2021-08-23T16:56:39 | 2021-08-23T16:56:39 | 399,182,979 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,337 | py | from os import name
from flask import Flask, render_template, request, url_for, redirect, flash, send_from_directory
from werkzeug.security import generate_password_hash, check_password_hash
from flask_sqlalchemy import SQLAlchemy
from flask_login import UserMixin, login_user, LoginManager, login_required, current_user, logout_user
app = Flask(__name__)
app.config["SECRET_KEY"] = "flown-actinium-cam-algae"
app.config["SQLALCHEMY_DATABASE_URI"] = "sqlite:///users.db"
app.config["SQLALCHEMY_TRACK_MODIFICATIONS"] = False
db = SQLAlchemy(app)
login_manager = LoginManager()
login_manager.init_app(app)
@login_manager.user_loader
def load_user(user_id):
return User.query.get(int(user_id))
##CREATE TABLE IN DB
class User(UserMixin, db.Model):
id = db.Column(db.Integer, primary_key=True)
email = db.Column(db.String(100), unique=True)
password = db.Column(db.String(100))
name = db.Column(db.String(1000))
# Line below only required once, when creating DB.
# db.create_all()
@app.route("/")
def home():
return render_template("index.html", logged_in=current_user.is_authenticated)
@app.route("/register", methods=["GET", "POST"])
def register():
if request.method == "POST":
if User.query.filter_by(email=request.form.get("email")).first():
# User already exists
flash("You've already signed up with that email, log-in instead.")
return redirect(url_for("login"))
hash_and_salted_password = generate_password_hash(
request.form.get("password"), method="pbkdf2:sha256", salt_length=8
)
new_user = User(
email=request.form.get("email"), name=request.form.get("name"), password=hash_and_salted_password
)
db.session.add(new_user)
db.session.commit()
# Log-in and authenticate user after adding new user data to the database
login_user(new_user)
return redirect(url_for("secrets"))
return render_template("register.html", logged_in=current_user.is_authenticated)
@app.route("/login", methods=["GET", "POST"])
def login():
if request.method == "POST":
email = request.form.get("email")
password = request.form.get("password")
# Find user by email
user = User.query.filter_by(email=email).first()
# Email doesn't exist
if not user:
flash("That email does not exist, please try again.")
return redirect(url_for("login"))
# Password incorrect
elif not check_password_hash(user.password, password):
flash("Password incorrect, please try again.")
return redirect(url_for("login"))
# Email exists and password correct
else:
login_user(user)
return redirect(url_for("secrets"))
return render_template("login.html", logged_in=current_user.is_authenticated)
@app.route("/secrets")
@login_required
def secrets():
return render_template("secrets.html", name=current_user.name, logged_in=True)
@app.route("/logout")
def logout():
logout_user()
return redirect(url_for("home"))
@app.route("/download")
@login_required
def download():
return send_from_directory("static", path="files/cheat_sheet.pdf", as_attachment=True)
if __name__ == "__main__":
app.run(debug=True)
| [
"[email protected]"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.