blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
288
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 684
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 25
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 128
12.7k
| extension
stringclasses 142
values | content
stringlengths 128
8.19k
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
615bcc7e7e8afc56a8dc513da89e9d4f4faab88d
|
83f78318d1a85045b0e29f3fed10e8ba3e5c107c
|
/throwback/root.py
|
a5a39de95633bd252ed2a43ca57c8e352c04ff32
|
[] |
no_license
|
kadrlica/throwback
|
c396d00230ec0e6ed4ce8c31ac6cd12e2ee76690
|
c628acb9716aad433c49de4e2f71c54d2a0bc83e
|
refs/heads/master
| 2020-03-24T09:33:03.127648 | 2018-08-02T15:04:00 | 2018-08-02T15:04:00 | 142,631,826 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 5,349 |
py
|
#!/usr/bin/env python
"""
Generic python script.
"""
__author__ = "Alex Drlica-Wagner"
import matplotlib
from collections import OrderedDict as odict
# A modern version (v6-14-02) of the Palette from TColor...
# https://github.com/root-project/root/blob/2762a32343f57664b42558cd3af4031fe2f4f086/core/base/src/TColor.cxx#L2404-L2408
PALETTE = [19,18,17,16,15,14,13,12,11,20,
21,22,23,24,25,26,27,28,29,30, 8,
31,32,33,34,35,36,37,38,39,40, 9,
41,42,43,44,45,47,48,49,46,50, 2,
7, 6, 5, 4, 3, 2,1]
#7, 6, 5, 4, 3, 112,1] # original with typo
#7, 6, 5, 4, 3, 5,1] # corrected to match
# Going back in time to 2007 (v5-17-06), here was the origin palette
# Note the typo in entry 48: 112 (pink) not 2 (red)
# https://github.com/root-project/root/blob/9294cc60a9a70dece4f24f0bc0399cc00c0f78b5/base/src/TStyle.cxx#L1445-L1449
# The commit of the fix:
# https://github.com/root-project/root/commit/d3e92e5de7e76c1ded2af7218adc9bc20b7f0c9f
PALETTE07 = list(PALETTE)
PALETTE07[-2] = 5 # typo was 112, but end up being magenta
# These are the basic root colors.
# https://github.com/root-project/root/blob/2762a32343f57664b42558cd3af4031fe2f4f086/core/base/src/TColor.cxx#L1077
# This list was generated with:
# for (int i=1; i<51; i++) {gROOT->GetColor()->Print; }
TCOLORS = [
(1.000000, 1.000000, 1.000000), # Name=background
(0.000000, 0.000000, 0.000000), # Name=black
(1.000000, 0.000000, 0.000000), # Name=red
(0.000000, 1.000000, 0.000000), # Name=green
(0.000000, 0.000000, 1.000000), # Name=blue
(1.000000, 0.000000, 1.000000), # Name=magenta
(0.000000, 1.000000, 0.800000), # Name=teal
(1.000000, 0.800000, 0.000000), # Name=orange
(0.350000, 0.830000, 0.330000), # Name=Color8
(0.350000, 0.330000, 0.850000), # Name=Color9
(0.999000, 0.999000, 0.999000), # Name=white
(0.754000, 0.715000, 0.676000), # Name=editcol
(0.300000, 0.300000, 0.300000), # Name=grey12
(0.400000, 0.400000, 0.400000), # Name=grey13
(0.500000, 0.500000, 0.500000), # Name=grey14
(0.600000, 0.600000, 0.600000), # Name=grey15
(0.700000, 0.700000, 0.700000), # Name=grey16
(0.800000, 0.800000, 0.800000), # Name=grey17
(0.900000, 0.900000, 0.900000), # Name=grey18
(0.950000, 0.950000, 0.950000), # Name=grey19
(0.800000, 0.780000, 0.670000), # Name=Color20
(0.800000, 0.780000, 0.670000), # Name=Color21
(0.760000, 0.750000, 0.660000), # Name=Color22
(0.730000, 0.710000, 0.640000), # Name=Color23
(0.700000, 0.650000, 0.590000), # Name=Color24
(0.720000, 0.640000, 0.610000), # Name=Color25
(0.680000, 0.600000, 0.550000), # Name=Color26
(0.610000, 0.560000, 0.510000), # Name=Color27
(0.530000, 0.400000, 0.340000), # Name=Color28
(0.690000, 0.810000, 0.780000), # Name=Color29
(0.520000, 0.760000, 0.640000), # Name=Color30
(0.540000, 0.660000, 0.630000), # Name=Color31
(0.510000, 0.620000, 0.550000), # Name=Color32
(0.680000, 0.740000, 0.780000), # Name=Color33
(0.480000, 0.560000, 0.600000), # Name=Color34
(0.460000, 0.540000, 0.570000), # Name=Color35
(0.410000, 0.510000, 0.590000), # Name=Color36
(0.430000, 0.480000, 0.520000), # Name=Color37
(0.490000, 0.600000, 0.820000), # Name=Color38
(0.500000, 0.500000, 0.610000), # Name=Color39
(0.670000, 0.650000, 0.750000), # Name=Color40
(0.830000, 0.810000, 0.530000), # Name=Color41
(0.870000, 0.730000, 0.530000), # Name=Color42
(0.740000, 0.620000, 0.510000), # Name=Color43
(0.780000, 0.600000, 0.490000), # Name=Color44
(0.750000, 0.510000, 0.470000), # Name=Color45
(0.810000, 0.370000, 0.380000), # Name=Color46
(0.670000, 0.560000, 0.580000), # Name=Color47
(0.650000, 0.470000, 0.480000), # Name=Color48
(0.580000, 0.410000, 0.440000), # Name=Color49
(0.830000, 0.350000, 0.330000), # Name=Color50
]
root_cmap = matplotlib.colors.ListedColormap([TCOLORS[i] for i in PALETTE])
root_cmap.set_over(TCOLORS[PALETTE[-1]]);
root_cmap.set_under(TCOLORS[PALETTE[0]])
root07_cmap = matplotlib.colors.ListedColormap([TCOLORS[i] for i in PALETTE07])
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(description=__doc__)
args = parser.parse_args()
import numpy as np
import pylab as plt
from mpl_toolkits.mplot3d import Axes3D
def fn(x,y):
return 0.1+(1-(x-2)*(x-2))*(1-(y-2)*(y-2))
xx,yy = np.meshgrid(np.linspace(1,3,1000),np.linspace(1,3,1000))
plt.figure(figsize=(6,4))
levels = np.arange(0.1,1.2,0.1)
plt.contourf(xx,yy,fn(xx,yy),levels,vmin=0.07,vmax=1.05,cmap=root_cmap)
plt.colorbar(ticks=levels,pad=0.01,aspect=10)
plt.subplots_adjust(left=0.08,right=0.99)
""" Equivalent in ROOT:
TCanvas *c1 = new TCanvas("c1","c1",0,0,600,400);
TF2 *f1 = new TF2("f1","0.1+(1-(x-2)*(x-2))*(1-(y-2)*(y-2))",1,3,1,3);
f1->SetNpx(1000);
f1->SetNpy(1000);
Double_t levels[] = {0.1,0.2,0.3,0.4,0.5,0.6,0.7,0.8,0.9,1.0,1.1};
f1->SetContour(10,levels);
gStyle->SetPalette(-1);
f1->Draw("colz")
"""
fig = plt.figure(figsize=(6,4))
ax = fig.add_subplot(111, projection='3d')
im = ax.plot_surface(xx,yy,fn(xx,yy),vmin=0.1,vmax=1.09,cmap=root_cmap)
plt.colorbar(im,ticks=np.arange(0,1.2,0.1))
"""
TCanvas *c2 = new TCanvas("c2","c2",0,0,600,400);
f1->SetContour(20);
f1->SetNpx(20);
f1->SetNpy(20)
f1->Draw("surf1z");
"""
|
[
"[email protected]"
] | |
2bae6e3b66955458f062196496992f4f9b1a1513
|
99c4d4a6592fded0e8e59652484ab226ac0bd38c
|
/code/batch-2/dn9 - minobot/M-17068-2501.py
|
51567abcbd0bb9da09e40e7d35ee022e3cb5716c
|
[] |
no_license
|
benquick123/code-profiling
|
23e9aa5aecb91753e2f1fecdc3f6d62049a990d5
|
0d496d649247776d121683d10019ec2a7cba574c
|
refs/heads/master
| 2021-10-08T02:53:50.107036 | 2018-12-06T22:56:38 | 2018-12-06T22:56:38 | 126,011,752 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 4,142 |
py
|
def premik(ukaz, x, y, smer):
smeri = "NESW"
premiki = [(0, -1), (1, 0), (0, 1), (-1, 0)]
ismer = smeri.index(smer)
if ukaz == "R":
smer = smeri[(ismer + 1) % 4]
elif ukaz == "L":
smer = smeri[(ismer - 1) % 4]
else:
dx, dy = premiki[ismer]
x += dx * ukaz
y += dy * ukaz
return x, y, smer
def angleski(ime_datoteke):
t = open(ime_datoteke, encoding="utf - 8")
s = t.read().split()
popsez = []
for i, x in enumerate(s):
if x[0] == "D":
popsez.append("R")
if x[0] == "L":
popsez.append(x[0])
if x[0] == "N":
popsez.append(s[i + 1])
return popsez
def izvedi(ime_datoteke):
x, y, smer = 0, 0, "N"
stanja = []
stanja.append((x, y, smer))
for e in angleski(ime_datoteke):
if e == "R" or e == "L":
ukaz = e
else:
ukaz = int(e)
stanja.append(premik(ukaz, x, y, smer))
x, y, smer = premik(ukaz, x, y, smer)
return stanja
def opisi_stanje(x, y, smer):
s = {"N" : "^", "E" : ">",
"S" : "v", "W" : "<" }
return "{:>3}:{:<3}{:>2}".format(x,y,s[smer])
def prevedi(ime_vhoda, ime_izhoda):
t = open(ime_izhoda, "w")
for x, y, smer in izvedi(ime_vhoda):
t.write(opisi_stanje(x, y, smer) + "\n")
t.close()
def opisi_stanje_2(x, y, smer):
s = {"N" : "^", "E" : ">",
"S" : "v", "W" : "<"}
if 99 > x > 9 or x < 0:
return "{:<3}({:}:{:>})".format(s[smer], x ,y)
if x > 99:
return "{:<2}({:}:{:>})".format(s[smer], x ,y)
else:
return "{:<4}({:}:{:>})".format(s[smer], x ,y)
import unittest
class TestObvezna(unittest.TestCase):
def test_branje(self):
self.assertEqual(
izvedi("primer.txt"),
[(0, 0, 'N'), (0, 0, 'E'), (12, 0, 'E'), (12, 0, 'S'), (12, 2, 'S'),
(12, 2, 'E'), (15, 2, 'E'), (15, 2, 'N'), (15, 2, 'W')]
)
self.assertEqual(
izvedi("ukazi.txt"),
[(0, 0, 'N'), (0, 0, 'E'), (1, 0, 'E'), (1, 0, 'S'), (1, 0, 'W'),
(0, 0, 'W'), (0, 0, 'S'), (0, 0, 'E'), (1, 0, 'E'), (1, 0, 'S'),
(1, 3, 'S'), (1, 3, 'E'), (2, 3, 'E'), (2, 3, 'S'), (2, 3, 'W')]
)
def test_opisi_stanje(self):
self.assertEqual(opisi_stanje(0, 12, "N"), " 0:12 ^")
self.assertEqual(opisi_stanje(111, 0, "E"), "111:0 >")
self.assertEqual(opisi_stanje(-2, 111, "S"), " -2:111 v")
self.assertEqual(opisi_stanje(0, 0, "W"), " 0:0 <")
def test_prevedi(self):
from random import randint
import os
ime = "izhod{:05}.txt".format(randint(0, 99999))
try:
self.assertIsNone(prevedi("primer.txt", ime))
self.assertEqual(open(ime).read().rstrip(), """ 0:0 ^
0:0 >
12:0 >
12:0 v
12:2 v
12:2 >
15:2 >
15:2 ^
15:2 <""")
self.assertIsNone(prevedi("ukazi.txt", ime))
self.assertEqual(open(ime).read().rstrip(), """ 0:0 ^
0:0 >
1:0 >
1:0 v
1:0 <
0:0 <
0:0 v
0:0 >
1:0 >
1:0 v
1:3 v
1:3 >
2:3 >
2:3 v
2:3 <""")
finally:
os.remove(ime)
vime = "vhod{:05}.txt".format(randint(0, 99999))
open(vime, "wt").write("NAPREJ 23\nLEVO\nNAPREJ 17\n")
try:
self.assertIsNone(prevedi(vime, ime))
self.assertEqual(open(ime).read().rstrip(), """ 0:0 ^
0:-23 ^
0:-23 <
-17:-23 <""")
finally:
os.remove(ime)
os.remove(vime)
class TestDodatna(unittest.TestCase):
def test_opisi_stanje(self):
self.assertEqual(opisi_stanje_2(0, 12, "N"), "^ (0:12)")
self.assertEqual(opisi_stanje_2(111, 0, "E"), "> (111:0)")
self.assertEqual(opisi_stanje_2(-2, 111, "S"), "v (-2:111)")
self.assertEqual(opisi_stanje_2(0, 0, "W"), "< (0:0)")
if __name__ == "__main__":
unittest.main()
|
[
"[email protected]"
] | |
7157050d8baaafca99fac5e26a9089f52b26636c
|
b2e9e3db0202a6bd06b5d1f4c4fd3369b5260261
|
/python/tests.py
|
db2473b8cd480c38936c4f0a6c01bddbac30db5c
|
[] |
no_license
|
jackmoody11/project-euler-solutions
|
66e7128cae130499ce518c2008e5df91a6883a68
|
8b6e00bfac7855f5c892f5b3094415935358cb98
|
refs/heads/master
| 2020-04-12T23:52:57.347142 | 2020-01-10T00:23:16 | 2020-01-10T00:23:16 | 162,831,576 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,600 |
py
|
import importlib
import time
import doctest
import sys
import argparse
filename = "../answers.txt"
with open(filename, 'r') as f:
lines = [line.strip() for line in f.readlines()]
ANSWERS = {int(line.split(". ")[0]):
line.split(". ")[1] for line in lines}
def test(prob, expected):
try:
module = importlib.import_module("p{:03d}".format(prob))
except ImportError:
raise ImportError(
"It looks like you haven't solved #{prob} yet".format(prob=prob))
start = time.time()
actual = int(module.compute()) # Must return an int
elapsed = int((time.time() - start) * 1000)
print("Problem {:03d}: {:7d} ms".format(
prob, elapsed),
"" if actual == expected else " *** FAIL ***")
return elapsed
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument('numbers', type=int, nargs='*')
parser.add_argument('-g', '--graph', action='store_true',
help='graph execution time for selected problems')
parser.add_argument('--doctest', action='store_true',
help='run doctests on utils')
args = parser.parse_args()
return args
def main(doctests=False):
args = get_args()
results = dict()
if len(args.numbers) > 0:
for prob in args.numbers:
try:
results[prob] = test(prob, ANSWERS[prob])
except KeyError:
raise KeyError(
"It looks like you haven't added #{prob} to answers.txt yet".format(prob=prob))
num_probs = len(args.numbers)
prob_names = ", ".join(map(str, args.numbers))
else:
for (prob, expected) in sorted(ANSWERS.items()):
results[prob] = test(prob, expected)
num_probs = len(ANSWERS)
prob_names = "(all)"
total_time = sum(results.values())
print("Total computation time: {} secs".format(total_time / 1000))
print("{} problems solved".format(num_probs))
if args.doctest:
doctesting()
if args.graph:
graph(results, prob_names)
def doctesting():
import utils
doctest.testmod(utils, verbose=True)
return 0
def graph(data, probs):
import matplotlib.pyplot as plt
from matplotlib import rc
rc('text', usetex=True)
plt.bar(data.keys(), data.values())
plt.title(
'Execution Time (secs) for Project Euler Problems {0}'.format(probs))
plt.xlabel('Project Euler Problem Number')
plt.ylabel('Execution Time (ms)')
plt.show()
if __name__ == "__main__":
main()
|
[
"[email protected]"
] | |
714d966a83c3c2a5e4206edcffebd91a7fd93889
|
d0cdcbe3471cc980809fd4ed5182385bb23216b5
|
/backend/task_profile/migrations/0001_initial.py
|
79a56729aed9aebe14923d104aef369745363a4a
|
[] |
no_license
|
crowdbotics-apps/strippee-19912
|
4770d421f16389e3ada580018e52385094cedf00
|
16d1d6258dca231fb157ecb3fa6709f490a5c1fc
|
refs/heads/master
| 2022-12-09T22:53:07.670289 | 2020-09-01T20:24:27 | 2020-09-01T20:24:27 | 292,099,249 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,239 |
py
|
# Generated by Django 2.2.16 on 2020-09-01 20:22
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='TaskerProfile',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('mobile_number', models.CharField(max_length=20)),
('photo', models.URLField()),
('timestamp_created', models.DateTimeField(auto_now_add=True)),
('last_updated', models.DateTimeField(auto_now=True)),
('last_login', models.DateTimeField(blank=True, null=True)),
('description', models.TextField(blank=True, null=True)),
('city', models.CharField(blank=True, max_length=50, null=True)),
('vehicle', models.CharField(blank=True, max_length=50, null=True)),
('closing_message', models.TextField(blank=True, null=True)),
('work_area_radius', models.FloatField(blank=True, null=True)),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, related_name='taskerprofile_user', to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Notification',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('type', models.CharField(max_length=20)),
('message', models.TextField()),
('timestamp_created', models.DateTimeField(auto_now_add=True)),
('user', models.ManyToManyField(related_name='notification_user', to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='InviteCode',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('code', models.CharField(max_length=20)),
('timestamp_created', models.DateTimeField(auto_now_add=True)),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='invitecode_user', to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='CustomerProfile',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('mobile_number', models.CharField(max_length=20)),
('photo', models.URLField()),
('timestamp_created', models.DateTimeField(auto_now_add=True)),
('last_updated', models.DateTimeField(auto_now=True)),
('last_login', models.DateTimeField(blank=True, null=True)),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, related_name='customerprofile_user', to=settings.AUTH_USER_MODEL)),
],
),
]
|
[
"[email protected]"
] | |
d53072873c3a5166c4017a13617ff165c8605c46
|
5c89f7c8ad6213b8c194c6e45424db199ef31041
|
/s5/1.5.1_operaciones_aritmeticas_input copy.py
|
d4c5cd82e4a065d1b2e3730e75c9ea76506033c8
|
[] |
no_license
|
camohe90/mision_tic_G6
|
1ccfe5eef6094a1bc0dc6ca5debf2c7a9e959121
|
bc04a468a568b5972b29e40643799cd9848219d8
|
refs/heads/master
| 2023-05-29T14:26:53.946013 | 2021-06-09T14:44:59 | 2021-06-09T14:44:59 | 366,415,146 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 535 |
py
|
numero1 = int(input("ingrese numero 1: "))
numero2 = int(input("ingrese numero 2: "))
suma = numero1 + numero2
print(suma)
print(type(suma))
resta = numero1 - numero2
print(resta)
print(type(resta))
multiplicacion = numero1 * numero2
print(multiplicacion)
print(type(multiplicacion))
division = numero1 / numero2
print(division)
print(type(division))
division_entera = numero1 // numero2
print(division_entera)
print(type(division_entera))
modulo = numero1 % numero2
print(modulo)
print(type(modulo))
|
[
"[email protected]"
] | |
9494c14281d52325c997fa672d79c0f65cd32d67
|
92436a50cc26c8c8a216ba6d4a62e36069614234
|
/test/losses_multi_output_sum_loss_test.py
|
1bd83488fc1d642aace065a621b9b116176498fb
|
[
"MIT"
] |
permissive
|
hahaxun/ClassyVision
|
9341f4e6849c858094592052f3df111c13d1a91d
|
b3f714ef94275b3e9753ab3f3c8256cb852b96fc
|
refs/heads/master
| 2021-08-17T07:42:34.402613 | 2021-03-08T08:50:01 | 2021-03-08T08:50:01 | 245,940,574 | 1 | 0 |
MIT
| 2021-03-08T08:50:01 | 2020-03-09T04:02:59 |
Python
|
UTF-8
|
Python
| false | false | 1,176 |
py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import unittest
import torch
from classy_vision.losses import (
ClassyLoss,
MultiOutputSumLoss,
build_loss,
register_loss,
)
@register_loss("mock_1")
class MockLoss1(ClassyLoss):
def forward(self, pred, target):
return torch.tensor(1.0)
@classmethod
def from_config(cls, config):
return cls()
class TestMultiOutputSumLoss(unittest.TestCase):
def test_multi_output_sum_loss(self):
config = {"name": "multi_output_sum_loss", "loss": {"name": "mock_1"}}
crit = build_loss(config)
self.assertTrue(isinstance(crit, MultiOutputSumLoss))
# test with a single output
output = torch.tensor([1.0, 2.3])
target = torch.tensor(1.0)
self.assertAlmostEqual(crit(output, target).item(), 1.0)
# test with a list of outputs
output = [torch.tensor([1.2, 3.2])] * 5
target = torch.tensor(2.3)
self.assertAlmostEqual(crit(output, target).item(), 5.0)
|
[
"[email protected]"
] | |
7ba4b6660a56236839281e9a6a45e1423b62ebcc
|
4de2a833df412e1609b9894c0cb809c4d70f8623
|
/app/news/migrations/0002_auto_20180313_1540.py
|
75c1430513d4b94067c753c0594020aecc4e7186
|
[] |
no_license
|
Isaias301/e-commerce
|
06ea11238403b7fa70c5c8e387253441d0dd07f4
|
e00f94406c432751adfa03556ecfe154ec222fd9
|
refs/heads/master
| 2021-04-09T10:34:23.399915 | 2018-03-16T00:33:43 | 2018-03-16T00:33:43 | 125,439,146 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 810 |
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2018-03-13 18:40
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('news', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Teste',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=200)),
],
),
migrations.RemoveField(
model_name='article',
name='reporter',
),
migrations.DeleteModel(
name='Article',
),
migrations.DeleteModel(
name='Reporter',
),
]
|
[
"[email protected]"
] | |
7c7d39ff26b769153bb74f24a416d86ba8a0eaa3
|
d0f4327ca33aa37e976b024ddfbd6361fdf1a8ee
|
/LogisticRegression.py
|
1747d6347b97ed0cc2ae5c1f114f1c197b1a7726
|
[] |
no_license
|
MitsuruFujiwara/TensorFlowTraining
|
c5f828356ea93c868f16ea67335dd4f32b1c7040
|
3a391c849699bd8332c23ddfffb773cf3050c69c
|
refs/heads/master
| 2021-01-17T17:48:21.340414 | 2016-07-24T13:05:53 | 2016-07-24T13:05:53 | 60,701,833 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,291 |
py
|
# -*- coding: utf-8 -*-
import numpy as np
import pandas as pd
import tensorflow as tf
from RegressionBase import RegressionBase
class LogisticRegression(RegressionBase):
def __init__(self, trX, trY, numStep, numParameter, learning_rate):
RegressionBase.__init__(self, trX, trY, numStep, numParameter, learning_rate)
def training(self, loss):
return tf.train.AdagradOptimizer(self.learning_rate).minimize(loss)
def loss(self, output, supervisor_labels_placeholder):
x_entropy = tf.nn.sigmoid_cross_entropy_with_logits(output, supervisor_labels_placeholder, name = 'xentropy')
return tf.reduce_mean(x_entropy, name = 'xentropy_mean')
if __name__ == '__main__':
data = pd.read_csv('test_data.csv')
trX = data[[\
'X1', 'X2', 'X3', 'X4', 'X5', 'X6', 'X7', 'X8', 'X9','X10', 'X11', 'X12',\
'X13', 'X14', 'X15', 'X16', 'X17', 'X18', 'X19', 'X20', 'X21','X22', 'X23',\
'X24', 'X25', 'X26', 'X27'\
]].fillna(0)
trY = data['Y']
numStep = 10000
numParameter = len(trX.columns)
learning_rate = 0.5
r = LogisticRegression(trX, trY, numStep, numParameter, learning_rate)
r.run()
# loss = 0.282286
# b = -6.56112
# W0 = 0.226928
# W1 = 0.238033
# W2 = -0.0118023
# W3 = 0.499244
|
[
"[email protected]"
] | |
4f429af341e1d6a151535732aff18ab72fe74f77
|
443fbf0cdd31bb65db09e2b1f3e9770adc69c08d
|
/Python 1/1 - Introducion/5 - Comentarios/Comentarios.py
|
63b10fefa4166d61ecf646f0d8d2eb7abb60000e
|
[] |
no_license
|
marcvifi10/Curso-Python
|
b9b376c39713e525756eb26f31f914359aef537a
|
047830ca92e027c7d37e2f6bb042971a773a5d6d
|
refs/heads/master
| 2020-07-15T08:30:53.622053 | 2019-09-18T16:23:35 | 2019-09-18T16:23:35 | 205,521,518 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 158 |
py
|
# Comentario de una sola linea
'''
Comentarios de
más de una linea
'''
valor = 10
print("Valor1: ",valor)
valor = "Marc"
print("Valor2: ",valor)
|
[
"[email protected]"
] | |
8ba2e1d1a7dd1c30e8a51f573682ad69ca86c3a5
|
3a6a211ea0d32405497fbd6486c490bb147e25f9
|
/systrace/systrace/decorators.py
|
8545eaac83a986ba07ccf8d3d8f5e450b9ede645
|
[
"BSD-3-Clause"
] |
permissive
|
catapult-project/catapult
|
e2cbdd5eb89f3b1492fc8752494e62ea1df4bae0
|
53102de187a48ac2cfc241fef54dcbc29c453a8e
|
refs/heads/main
| 2021-05-25T07:37:22.832505 | 2021-05-24T08:01:49 | 2021-05-25T06:07:38 | 33,947,548 | 2,032 | 742 |
BSD-3-Clause
| 2022-08-26T16:01:18 | 2015-04-14T17:49:05 |
HTML
|
UTF-8
|
Python
| false | false | 1,238 |
py
|
# Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
def HostOnlyTest(func):
"""Decorator for running unit tests only on the host device.
This will disable unit tests from running on Android devices.
"""
return _SkipTestDecoratorHelper(func, ['android'])
def ClientOnlyTest(func):
"""Decorator for running unit tests only on client devices (Android).
"""
return _SkipTestDecoratorHelper(func, ['win', 'linux', 'mac'])
def Disabled(func):
"""Decorator for not running a unit test on any Trybot platform.
"""
return _SkipTestDecoratorHelper(func, ['win', 'linux', 'mac', 'android'])
def LinuxMacTest(func):
return _SkipTestDecoratorHelper(func, ['win', 'android'])
def _SkipTestDecoratorHelper(func, disabled_strings):
if not hasattr(func, '_disabled_strings'):
setattr(func, '_disabled_strings', set(disabled_strings))
return func
def ShouldSkip(test, device):
"""Returns whether the test should be skipped and the reason for it."""
if hasattr(test, '_disabled_strings'):
disabled_devices = getattr(test, '_disabled_strings')
return device in disabled_devices
return False
|
[
"[email protected]"
] | |
a361554f5dad634f34447cdfe1a41122e44a0a07
|
8f8498bb6f56b19d45a1989c8113a077348c0a02
|
/백준/Gold/말이 되고픈 원숭이.py
|
46295228740c2fdbc91505c286b5dad4b556c089
|
[] |
no_license
|
gjtjdtn201/practice
|
a09b437c892b0b601e156c09cb1f053b52fab11b
|
ea45582b2773616b2b8f350b927559210009d89f
|
refs/heads/master
| 2021-01-01T13:29:46.640740 | 2020-11-28T00:55:37 | 2020-11-28T00:55:37 | 239,299,485 | 0 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,358 |
py
|
import sys
sys.stdin = open('말이 되고픈 원숭이.txt', 'r')
from collections import deque
import sys
input = sys.stdin.readline
def BFS():
while queue:
a, b, jump = queue.popleft()
if a == H-1 and b == W-1:
print(visit[a][b][jump]-1)
return
if jump < K:
monkey(a, b, jump)
horse(a, b, jump)
elif jump == K:
monkey(a, b, jump)
print(-1)
def monkey(a, b, jump):
for i in range(8, 12):
ny = a + dy[i]
nx = b + dx[i]
if 0 <= ny < H and 0 <= nx < W and matrix[ny][nx] == 0 and visit[ny][nx][jump] == 0:
queue.append((ny, nx, jump))
visit[ny][nx][jump] = visit[a][b][jump] + 1
def horse(a, b, jump):
for i in range(8):
ny = a + dy[i]
nx = b + dx[i]
if 0 <= ny < H and 0 <= nx < W and matrix[ny][nx] == 0 and visit[ny][nx][jump+1] == 0:
queue.append((ny, nx, jump + 1))
visit[ny][nx][jump + 1] = visit[a][b][jump] + 1
K = int(input())
W, H = map(int, input().split())
matrix = [list(map(int, input().split())) for _ in range(H)]
visit = [[[0]*(K+1) for _ in range(W)] for __ in range(H)]
visit[0][0][0] = 1
dy = [-2, -1, 1, 2, 2, 1, -1, -2, 0, 0, 1, -1]
dx = [1, 2, 2, 1, -1, -2, -2, -1, 1, -1, 0, 0]
queue = deque()
queue.append((0, 0, 0))
BFS()
|
[
"[email protected]"
] | |
cde6425b065b34539b69cfdaeedad2b56631949b
|
cbd1c52de6cd45208ecce076c238dfc75cebd70a
|
/check_restfulapi_cluster.py
|
7b0a74cda346221e1103d4859b6468d17aefbb0f
|
[
"Apache-2.0"
] |
permissive
|
enterpriseih/distributed-realtime-capfaiss
|
2e20cad0c788c0700df948b6a46be52d91ac5b9b
|
3346f540b6c9d17a6be446fefa8c9b79164929d9
|
refs/heads/main
| 2023-08-16T20:30:20.807161 | 2020-12-11T02:50:41 | 2020-12-11T02:50:41 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,598 |
py
|
import sys
sys.path.append('core.zip')
import json
import numpy as np
from core.utils.utils import NpEncoder
import requests
import argparse
from core.utils.udecorator import elapsed_time
import time
headers = {
# 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.103 Safari/537.36',
'User-Agent': 'Mozilla/5.0',
'content-type': 'application/json',
'charset': 'UTF-8',
}
# @elapsed_time
def ReindexApiTest(host, port, rc_id):
url = 'http://{host}:{port}/{url}'.format(host=host, port=port, url='rc/reindex')
d = 100
nb = 2000
np.random.seed(1234)
xb = np.random.random((nb, d)).astype('float32')
xb[:, 0] += np.arange(nb) / 1000.
ids = ['u' + str(i) for i in range(nb)]
data = json.dumps({
'rcId': rc_id,
'ids': ids,
'vectors': json.loads(json.dumps(xb, cls=NpEncoder)),
})
r = requests.post(url, data=data, headers=headers)
return r
# @elapsed_time
def SearchApiTest(host, port, rc_id):
url = 'http://{host}:{port}/{url}'.format(host=host, port=port, url='rc/search')
d = 100
nb = 2000
np.random.seed(1234)
xb = np.random.random((nb, d)).astype('float32')
xb[:, 0] += np.arange(nb) / 1000.
ids = ['u' + str(i) for i in range(1)]
data = json.dumps({
'rcId': rc_id,
'ids': ids,
'vectors': json.loads(json.dumps(xb[0:1], cls=NpEncoder)),
})
req = requests.post(url, data=data, headers=headers)
return req
# @elapsed_time
def AddApiTest(host, port, rc_id):
url = 'http://{host}:{port}/{url}'.format(host=host, port=port, url='rc/add')
print(url)
d = 100
nb = 1000
np.random.seed(4567)
xb = np.random.random((nb, d)).astype('float32')
xb[:, 0] += np.arange(nb) / 2000.
ids = ['u' + str(i + nb) for i in range(1, nb + 1)]
data = json.dumps({
'rcId': rc_id,
'ids': ids,
'vectors': json.loads(json.dumps(xb, cls=NpEncoder)),
})
req = requests.post(url, data=data, headers=headers)
return req
# @elapsed_time
def DelApiTest(host, port, rc_id):
url = 'http://{host}:{port}/{url}'.format(host=host, port=port, url='rc/del')
ids = ['u' + str(i) for i in range(10)]
data = json.dumps({
'rcId': rc_id,
'ids': ids,
})
req = requests.post(url, data=data, headers=headers)
return req
if __name__ == '__main__':
print('test begin...')
parser = argparse.ArgumentParser()
parser.add_argument('--host', type=str, default='localhost', required=False, help='')
parser.add_argument('--port', type=str, default='8088', required=False, help='')
parser.add_argument('--rc_id', type=str, default='101001101', required=False, help='')
params = parser.parse_args()
req = ReindexApiTest(params.host, params.port, params.rc_id)
print("""
reindex
status_code...%s
content...%s
""" % (req.status_code, req.content))
time.sleep(10)
req = AddApiTest(params.host, params.port, params.rc_id)
print("""
add
status_code...%s
content...%s
""" % (req.status_code, req.content))
# req = SearchApiTest(params.host, params.port, params.rc_id)
#
# print("""
# search
# content...%s
#
# """ % (req.status_code))
#
# req = DelApiTest(params.host, params.port, params.rc_id)
#
# print("""
# delete
# content...%s
#
# """ % (req.status_code))
|
[
"[email protected]"
] | |
3925ddc8929dab5fdcfe92af27724f8b26c01938
|
9806c35c4acfa16e56535970dbe5f84271b67664
|
/eod_aps/wsdl/cta_strategy_wsdl/cta_strategy_wsdl_assemble.py
|
71c81c7c89cbf04204b023b497654d74226099a3
|
[] |
no_license
|
dsjmhjs/python_eod
|
a8e3513c77a37cfebf2f21a55bfb19449b8a013b
|
48207dd15e7079ef5fd2cf999367c429087197ea
|
refs/heads/master
| 2020-06-25T23:22:30.874309 | 2019-07-29T10:26:02 | 2019-07-29T10:26:02 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,476 |
py
|
# -*- coding: utf-8 -*-
import socket
from SimpleXMLRPCServer import SimpleXMLRPCServer
from eod_aps.wsdl.cta_strategy_wsdl.strategy_init_tools import backtest_init
from eod_aps.wsdl.cta_strategy_wsdl.load_strategy_parameter_tools import load_strategy_parameter
from eod_aps.wsdl.cta_strategy_wsdl.strategy_online_offline_tools import strategy_online_offline_job
from eod_aps.model.server_constans import server_constant
def cta_test():
cta_test_str = ""
cta_server_list = server_constant.get_cta_servers()
for cta_server in cta_server_list:
server_model = server_constant.get_server_model(cta_server)
result_str = server_model.run_cmd_str('ls')
if 'apps' in result_str:
cta_test_str += '%s: connect success!\n' % cta_server
else:
cta_test_str += '%s: connect error!\n' % cta_server
return cta_test_str
def insert_strategy_state_sql():
cta_server_list = server_constant.get_cta_servers()
from eod_aps.job.insert_strategy_state_sql_job import insert_strategy_state_sql_job
insert_strategy_state_sql_job(cta_server_list)
return 0
if __name__ == '__main__':
s = SimpleXMLRPCServer((socket.gethostbyname(socket.gethostname()), 8000))
s.register_function(cta_test)
s.register_function(backtest_init)
s.register_function(load_strategy_parameter)
s.register_function(strategy_online_offline_job)
s.register_function(insert_strategy_state_sql)
s.serve_forever()
|
[
"123456789"
] |
123456789
|
a0a5a76363e80cfc5a89359595d80aa2fb243154
|
e0980f704a573894350e285f66f4cf390837238e
|
/.history/news/models_20201124143954.py
|
a2869c41e721c2526bbf773e0ca6be950d1e2aec
|
[] |
no_license
|
rucpata/WagtailWebsite
|
28008474ec779d12ef43bceb61827168274a8b61
|
5aa44f51592f49c9a708fc5515ad877c6a29dfd9
|
refs/heads/main
| 2023-02-09T15:30:02.133415 | 2021-01-05T14:55:45 | 2021-01-05T14:55:45 | 303,961,094 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 923 |
py
|
from django.db import models
from wagtail.contrib.forms.models import AbstractEmailForm
# Create your models here.
class NewsPage(AbstractEmailForm):
tempalte ='news/news_page.html'
leanding_page_template = 'news/news_page_leading.html'
subpage_types = []
max_coun = 1
intro = RichTextField(blank=True, features=['bold', 'italic', 'ol', 'ul'])
thank_you_text = RichTextField(
blank=True,
features=['bold', 'italic', 'ol', 'ul'])
map_image = models.ForeignKey(
)
map_url = URL Fild
content_panels = AbstractEmailForm.content_panel + [
FieldPanel('intro'),
ImageChooserPanel('map_iamge'),
FieldPanel('map_url'),
InlinePanel('form_fields', label="Form Fields"),
FieldPanel('thank_you_text'),
FieldPanel('from_address'),
FieldPanel('to_address'),
FieldPanel('subject'),
]
|
[
"[email protected]"
] | |
287ffcfa056904f0ad86c399480127764911db51
|
f0d713996eb095bcdc701f3fab0a8110b8541cbb
|
/kQayLoFNx4QgWahHu_2.py
|
bb1af1605885be1f995a2ebb66e86374b3d9b1ed
|
[] |
no_license
|
daniel-reich/turbo-robot
|
feda6c0523bb83ab8954b6d06302bfec5b16ebdf
|
a7a25c63097674c0a81675eed7e6b763785f1c41
|
refs/heads/main
| 2023-03-26T01:55:14.210264 | 2021-03-23T16:08:01 | 2021-03-23T16:08:01 | 350,773,815 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,528 |
py
|
"""
R, a programming language used for Statistics and Data Analysis, has the
function `order`, which returns a list with the indices needed to sort the
original vector(∗).
For example:
my_list = [1, 3, 3, 9, 8]
# Ordered would be: [0, 1, 2, 4, 3]
In plain words, `order` tells you what elements to look at in your original
vector to sort it. The list `my_list[0] + my_list[1] + my_list[2] + my_list[4]
+ my_list[3]` is equivalent to `sorted(my_list)`.
If two or more elements have the same order, their original order is
preserved. Here, `[0, 1, 2, 4, 3]` and `[0, 2, 1, 4, 3]` would both sort the
vector, but only the first one preserves the original order for the two `3`s.
Implement the function `order()` so that it works the same way it does in R.
### Examples
order([9, 1, 4, 5, 4]) ➞ [1, 2, 4, 3, 0]
order(["z", "c", "f", "b", "c"]) ➞ [3, 1, 4, 2, 0]
order(["order", "my", "words"]) ➞ [1, 0, 2]
### Notes
* Expect numbers and lower-case alphabetic characters only.
* Find Part II: Rank [here](https://edabit.com/challenge/dFosbGy8sFFCEx2Ne).
* Vectors in R are similar to a list. Although vectors in R are 1-indexed, your function should be 0-indexed. Other differences between vectors and lists will be ignored for the scope of this challenge.
* If you implement your own algorithm, it must be **stable** , meaning that the order of identical elements doesn't get switched around.
"""
def order(lst):
return sorted(range(len(lst)), key = lambda i: lst[i])
|
[
"[email protected]"
] | |
fea56c3ed93ad23d5f94bf01932101dfd8229ae3
|
29e526fb77bc4c13082a0f9c0f4104684a01893b
|
/apps/shared/tests/__init__.py
|
e2701bc458fa041f89d343db30d387d028135fda
|
[
"BSD-3-Clause",
"MIT"
] |
permissive
|
Hugochazz/affiliates
|
767034519426a657c0e9b3e38fee94cc3e0042ca
|
e234b0ab925b33d71cb5ded3d51dccbcbb0e59c1
|
refs/heads/master
| 2020-12-25T13:51:53.974167 | 2012-10-30T14:55:50 | 2012-10-30T14:55:50 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 4,655 |
py
|
from contextlib import contextmanager, nested
from functools import wraps
from smtplib import SMTPException
from django.conf import settings
from django.contrib.sessions.middleware import SessionMiddleware
from django.core.mail.backends.base import BaseEmailBackend
from django.core.management import call_command
from django.db.models import loading
from django.test.client import RequestFactory
from django.utils.translation import get_language
import test_utils
from funfactory.urlresolvers import (get_url_prefix, Prefixer, reverse,
set_url_prefix)
from mock import patch
from nose.tools import eq_, ok_
from tower import activate
from browserid.tests import mock_browserid
from facebook.tests import FacebookAuthClient
class BrokenSMTPBackend(BaseEmailBackend):
"""Simulates an email backend that throws errors."""
def send_messages(self, email_messages):
raise SMTPException('Dummy')
def model_ids(models):
"""Generates a list of model ids from a list of model objects."""
return [m.pk for m in models]
class SessionRequestFactory(RequestFactory):
"""RequestFactory that adds session data to requests."""
def __init__(self, *args, **kwargs):
super(SessionRequestFactory, self).__init__(*args, **kwargs)
self.session_middleware = SessionMiddleware()
def request(self, *args, **kwargs):
request = super(SessionRequestFactory, self).request(*args, **kwargs)
self.session_middleware.process_request(request)
return request
class TestCase(test_utils.TestCase):
"""Base class for Affiliates test cases."""
client_class = FacebookAuthClient
@contextmanager
def activate(self, locale):
"""Context manager that temporarily activates a locale."""
old_prefix = get_url_prefix()
old_locale = get_language()
rf = test_utils.RequestFactory()
set_url_prefix(Prefixer(rf.get('/%s/' % (locale,))))
activate(locale)
yield
set_url_prefix(old_prefix)
activate(old_locale)
def browserid_login(self, email):
"""Logs the test client in using BrowserID."""
factory = SessionRequestFactory()
with self.activate('en-US'):
request = factory.get(reverse('home'))
with mock_browserid(email):
self.client.login(request=request, assertion='asdf')
def assert_viewname_url(self, url, viewname, locale='en-US'):
"""Compare a viewname's url to a given url."""
with self.activate(locale):
view_url = reverse(viewname)
return ok_(url.endswith(view_url),
'URL Match failed: %s != %s' % (url, view_url))
def assert_redirects(self, response, url, status=302):
"""Assert that the given response redirects to the given url."""
eq_(response.status_code, status)
eq_(response['Location'], url)
class ModelsTestCase(TestCase):
"""
Does some pre-setup and post-teardown work to create tables for any
of your test models.
Simply subclass this and set self.apps to a tuple of *additional*
installed apps. These will be added *after* the ones in
settings.INSTALLED_APPS.
Based on http://stackoverflow.com/questions/502916#1827272
"""
apps = []
def _pre_setup(self):
# Add the models to the db.
self._original_installed_apps = list(settings.INSTALLED_APPS)
for app in self.apps:
settings.INSTALLED_APPS.append(app)
loading.cache.loaded = False
call_command('syncdb', interactive=False, verbosity=0)
# Call the original method that does the fixtures etc.
super(ModelsTestCase, self)._pre_setup()
def _post_teardown(self):
# Call the original method.
super(ModelsTestCase, self)._post_teardown()
# Restore the settings.
settings.INSTALLED_APPS = self._original_installed_apps
loading.cache.loaded = False
def refresh_model(instance):
"""Retrieves the latest version of a model instance from the DB."""
return instance.__class__.objects.get(pk=instance.pk)
def patch_settings(**new_settings):
"""
Syntactic sugar for patching many settings at once.
TODO: Replace with override_settings in Django 1.4.
"""
def decorator(f):
@wraps(f)
def wrapped(*args, **kwargs):
patches = []
for name, value in new_settings.items():
patches.append(patch.object(settings, name, value))
with nested(*patches):
return f(*args, **kwargs)
return wrapped
return decorator
|
[
"[email protected]"
] | |
9367c9fc788b09d6bf9c8369612096e5c5ffa3fa
|
55647a80c8b412af9df0ba3f50595cc2f29c25e6
|
/res/scripts/client/AvatarInputHandler/AimingSystems/StrategicAimingSystem.py
|
2ec9e5a041a24e763dee53002932f7d06da6e9d5
|
[] |
no_license
|
cnsuhao/WOT-0.9.17-CT
|
0035eb6070fb4fab8d8ee9f8bbc676c10d511cfb
|
d1f932d8cabaf8aa21708622e87f83c8d24d6451
|
refs/heads/master
| 2021-06-08T18:11:07.039293 | 2016-11-19T19:12:37 | 2016-11-19T19:12:37 | null | 0 | 0 | null | null | null | null |
WINDOWS-1250
|
Python
| false | false | 2,452 |
py
|
# 2016.11.19 19:47:39 Střední Evropa (běžný čas)
# Embedded file name: scripts/client/AvatarInputHandler/AimingSystems/StrategicAimingSystem.py
import BigWorld
import Math
from Math import Vector3, Matrix
import math
from AvatarInputHandler import mathUtils, AimingSystems
from AvatarInputHandler.AimingSystems import IAimingSystem
from AvatarInputHandler.cameras import _clampPoint2DInBox2D
class StrategicAimingSystem(IAimingSystem):
_LOOK_DIR = Vector3(0, -math.cos(0.001), math.sin(0.001))
height = property(lambda self: self.__height)
heightFromPlane = property(lambda self: self.__heightFromPlane)
def __init__(self, height, yaw):
self._matrix = mathUtils.createRotationMatrix((yaw, 0, 0))
self.__planePosition = Vector3(0, 0, 0)
self.__height = height
self.__heightFromPlane = 0.0
def destroy(self):
pass
def enable(self, targetPos):
self.updateTargetPos(targetPos)
def disable(self):
pass
def getDesiredShotPoint(self, terrainOnlyCheck = False):
return AimingSystems.getDesiredShotPoint(self._matrix.translation, Vector3(0, -1, 0), True, True, terrainOnlyCheck)
def handleMovement(self, dx, dy):
shift = self._matrix.applyVector(Vector3(dx, 0, dy))
self.__planePosition += Vector3(shift.x, 0, shift.z)
self.__updateMatrix()
def updateTargetPos(self, targetPos):
self.__planePosition.x = targetPos.x
self.__planePosition.z = targetPos.z
self.__updateMatrix()
def __updateMatrix(self):
bb = BigWorld.player().arena.arenaType.boundingBox
pos2D = _clampPoint2DInBox2D(bb[0], bb[1], Math.Vector2(self.__planePosition.x, self.__planePosition.z))
self.__planePosition.x = pos2D[0]
self.__planePosition.z = pos2D[1]
collPoint = BigWorld.wg_collideSegment(BigWorld.player().spaceID, self.__planePosition + Math.Vector3(0, 1000.0, 0), self.__planePosition + Math.Vector3(0, -250.0, 0), 3)
self.__heightFromPlane = 0.0 if collPoint is None else collPoint[0][1]
self._matrix.translation = self.__planePosition + Vector3(0, self.__heightFromPlane + self.__height, 0)
return
# okay decompyling c:\Users\PC\wotsources\files\originals\res\scripts\client\AvatarInputHandler\AimingSystems\StrategicAimingSystem.pyc
# decompiled 1 files: 1 okay, 0 failed, 0 verify failed
# 2016.11.19 19:47:39 Střední Evropa (běžný čas)
|
[
"[email protected]"
] | |
e83f53e3b09d4c31e6cddb4686f5993e3a6dc7b9
|
3899a37d1f500f7935cd04079e0b293bd64fe1cb
|
/docs/conf.py
|
9902ebaf5dadaec57cdc679f1cbc45f4be1e8a5b
|
[
"MIT"
] |
permissive
|
jubaer145/nlpaug
|
06d5fa83d68537f6485ed5afccfe2ece056aae8b
|
b631660f1997fc503258735ec011ffbe164d12af
|
refs/heads/master
| 2023-06-02T03:45:18.094793 | 2021-06-20T21:17:13 | 2021-06-20T21:17:13 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 5,645 |
py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# nlpaug documentation build configuration file, created by
# sphinx-quickstart on Wed Aug 7 07:37:05 2019.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import sys, os
from unittest.mock import MagicMock
sys.path.append(os.path.abspath('..'))
# Mock module to bypass pip install
class Mock(MagicMock):
@classmethod
def __getattr__(cls, name):
return MagicMock()
MOCK_MODULES = [
'librosa', 'librosa.display', 'numpy', 'nltk', 'matplotlib', 'matplotlib.pyplot',
'setuptools', 'python-dotenv', 'nltk.corpus', 'torch', 'transformers']
sys.modules.update((mod_name, Mock()) for mod_name in MOCK_MODULES)
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ['sphinx.ext.doctest',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.mathjax',
'sphinx.ext.viewcode',
'sphinx.ext.githubpages',
'sphinx.ext.autodoc']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'nlpaug'
copyright = '2019, Edward Ma'
author = 'Edward Ma'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '1.1.4'
# The full version, including alpha/beta/rc tags.
release = '1.1.4'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = []
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
# html_theme = 'alabaster'
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# This is required for the alabaster theme
# refs: http://alabaster.readthedocs.io/en/latest/installation.html#sidebars
html_sidebars = {
'**': [
'relations.html', # needs 'show_related': True theme option to display
'searchbox.html',
]
}
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'nlpaugdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'nlpaug.tex', 'nlpaug Documentation',
'Edward Ma', 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'nlpaug', 'nlpaug Documentation',
[author], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'nlpaug', 'nlpaug Documentation',
author, 'nlpaug', 'One line description of project.',
'Miscellaneous'),
]
|
[
"[email protected]"
] | |
6150cb6eab8ab3c168a1eead8a17ce4cc4735cb6
|
e9348d1689215220b7820134a82c2afdf8aed107
|
/backend/young_waterfall_29324/urls.py
|
19b2e1e3e6e251b7ff93a5593048f905ce1b2e58
|
[] |
no_license
|
crowdbotics-apps/young-waterfall-29324
|
8bf2accb9197c45f59ac717b2ec4fe289830b3f8
|
ea74f174180c6af5acca25a82397daa7c48eb7c2
|
refs/heads/master
| 2023-06-26T05:12:51.154938 | 2021-08-01T20:50:29 | 2021-08-01T20:50:29 | 391,735,458 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,253 |
py
|
"""young_waterfall_29324 URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include, re_path
from django.views.generic.base import TemplateView
from allauth.account.views import confirm_email
from rest_framework import permissions
from drf_yasg.views import get_schema_view
from drf_yasg import openapi
urlpatterns = [
path("", include("home.urls")),
path("accounts/", include("allauth.urls")),
path("modules/", include("modules.urls")),
path("api/v1/", include("home.api.v1.urls")),
path("admin/", admin.site.urls),
path("users/", include("users.urls", namespace="users")),
path("rest-auth/", include("rest_auth.urls")),
# Override email confirm to use allauth's HTML view instead of rest_auth's API view
path("rest-auth/registration/account-confirm-email/<str:key>/", confirm_email),
path("rest-auth/registration/", include("rest_auth.registration.urls")),
]
admin.site.site_header = "Young Waterfall"
admin.site.site_title = "Young Waterfall Admin Portal"
admin.site.index_title = "Young Waterfall Admin"
# swagger
api_info = openapi.Info(
title="Young Waterfall API",
default_version="v1",
description="API documentation for Young Waterfall App",
)
schema_view = get_schema_view(
api_info,
public=True,
permission_classes=(permissions.IsAuthenticated,),
)
urlpatterns += [
path("api-docs/", schema_view.with_ui("swagger", cache_timeout=0), name="api_docs")
]
urlpatterns += [path("", TemplateView.as_view(template_name='index.html'))]
urlpatterns += [re_path(r"^(?:.*)/?$",
TemplateView.as_view(template_name='index.html'))]
|
[
"[email protected]"
] | |
98911ce8c4bc073fa0ada3fad0c3d1e3231ad68e
|
13c2f109585a033a1acecdd912a3142802170921
|
/Python_Object_Serialization_Context_Manager.py
|
2566f5ec1aeb15199b4802d5e018e7fa67a537bf
|
[] |
no_license
|
VakinduPhilliam/Hierachy_Serialization
|
88175764e24d03602eca06e8df13223e8ec4dd7e
|
61d534b23bc3e072356cb33fd763b0cbb6320896
|
refs/heads/master
| 2020-05-24T15:59:41.674047 | 2019-11-01T15:02:08 | 2019-11-01T15:02:08 | 187,346,172 | 0 | 0 | null | null | null | null |
WINDOWS-1252
|
Python
| false | false | 1,953 |
py
|
# Python object serialization
# The 'pickle' module implements binary protocols for serializing and de-serializing
# a Python object structure.
# “Pickling” is the process whereby a Python object hierarchy is converted into a byte stream,
# and “unpickling” is the inverse operation, whereby a byte stream (from a binary file or bytes-like
# object) is converted back into an object hierarchy.
# Pickling (and unpickling) is alternatively known as “serialization”, “marshalling,” or “flattening”;
# however, to avoid confusion, the terms used here are “pickling” and “unpickling”.
# sqlite3 — DB-API 2.0 interface for SQLite databases
# SQLite is a C library that provides a lightweight disk-based database that doesn’t require a separate
# server process and allows accessing the database using a nonstandard variant of the SQL query language.
# Some applications can use SQLite for internal data storage.
# It’s also possible to prototype an application using SQLite and then port the code to a larger database
# such as PostgreSQL or Oracle.
# Using the connection as a context manager
# Connection objects can be used as context managers that automatically commit or rollback transactions.
# In the event of an exception, the transaction is rolled back; otherwise, the transaction is committed:
import sqlite3
con = sqlite3.connect(":memory:")
con.execute("create table person (id integer primary key, firstname varchar unique)")
# Successful, con.commit() is called automatically afterwards
with con:
con.execute("insert into person(firstname) values (?)", ("Joe",))
# con.rollback() is called after the with block finishes with an exception, the
# exception is still raised and must be caught
try:
with con:
con.execute("insert into person(firstname) values (?)", ("Joe",))
except sqlite3.IntegrityError:
print("couldn't add Joe twice")
|
[
"[email protected]"
] | |
471f69a116bb3f8ea26d0e157151b03c8573d7fb
|
4586fcc1afd15f04dbb269899a5b954adcd8d60e
|
/bin/ldgp.py
|
b825bbe162b265a0c48f0c32c7daf4bf04ca4e6c
|
[] |
no_license
|
gautamits/rgbd
|
d0f1435a2b91b2aa0e848688d3c1c12fc1c77931
|
a055a6b718a1e20957f20f19a0c49bbfa63cbd08
|
refs/heads/master
| 2021-01-20T05:59:43.891910 | 2017-11-25T09:16:34 | 2017-11-25T09:16:34 | 87,881,081 | 0 | 0 | null | 2017-04-25T19:22:50 | 2017-04-11T02:51:16 |
Python
|
UTF-8
|
Python
| false | false | 1,636 |
py
|
import cv2
import numpy as np
def dist(x,y):
return np.sqrt(np.sum((x-y)**2)) #this function returns euclidean distance between two one dimensional arrays
#this function returns histogram of image,
def hist(a):
hist, bin_edges = np.histogram(a, bins = range(64))
return hist
#this function returns ldgp of an image
def ldgp(i):
if i.shape >=3:
i=cv2.cvtColor(i,cv2.COLOR_BGR2GRAY)
height,width=i.shape
#zero padding
first=np.pad(i,((0,0),(1,0)),'constant')
second=np.pad(i,((0,1),(1,0)),'constant')
third=np.pad(i,((0,1),(0,0)),'constant')
fourth=np.pad(i,((0,1),(0,1)),'constant')
first=first[:,0:width]
second=second[1:height+1,0:width]
third=third[1:height+1,:]
fourth=fourth[1:height+1,1:width+1]
first=i-first #gradient at 0 degree
second=i-second #gradient at 45 degree
third=i-third #gradient at 90 degree
fourth=i-fourth # gradient at 135 degree
combo1=32*np.array( first >= second, dtype=int) #binary arrays being converted to decimal
combo2=16*np.array( first >= third, dtype=int)
combo3=8*np.array( first >= fourth, dtype=int)
combo4=4*np.array( second >= third, dtype=int)
combo5=2*np.array( second >= fourth, dtype=int)
combo6=np.array( third >= fourth, dtype=int)
ldgp=combo1+combo2+combo3+combo4+combo5+combo6
ldgp=np.array(ldgp,dtype='uint8')
return ldgp #final ldgp returned
|
[
"[email protected]"
] | |
e8a6c6d6bc56b44d9ac2fae0497b557fe4c040d9
|
b87ea98bc166cade5c78d246aeb0e23c59183d56
|
/samples/openapi3/client/petstore/python-nextgen-aiohttp/setup.py
|
d584a44727dd30a7685acc7a8fbbfecd38037804
|
[
"Apache-2.0"
] |
permissive
|
holisticon/openapi-generator
|
88f8e6a3d7bc059c8f56563c87f6d473694d94e5
|
6a67551ea54a1aa9a49eb48ee26b4e9bb7fb1272
|
refs/heads/master
| 2023-05-12T02:55:19.037397 | 2023-04-14T08:31:59 | 2023-04-14T08:31:59 | 450,034,139 | 1 | 0 |
Apache-2.0
| 2022-01-20T09:34:14 | 2022-01-20T09:34:13 | null |
UTF-8
|
Python
| false | false | 1,473 |
py
|
# coding: utf-8
"""
OpenAPI Petstore
This spec is mainly for testing Petstore server and contains fake endpoints, models. Please do not use this for any other purpose. Special characters: \" \\ # noqa: E501
The version of the OpenAPI document: 1.0.0
Generated by OpenAPI Generator (https://openapi-generator.tech)
Do not edit the class manually.
"""
from setuptools import setup, find_packages # noqa: H301
# To install the library, run the following
#
# python setup.py install
#
# prerequisite: setuptools
# http://pypi.python.org/pypi/setuptools
NAME = "petstore-api"
VERSION = "1.0.0"
PYTHON_REQUIRES = ">=3.7"
REQUIRES = [
"urllib3 >= 1.25.3",
"python-dateutil",
"aiohttp >= 3.0.0",
"pem>=19.3.0",
"pycryptodome>=3.9.0",
"pydantic >= 1.10.5, < 2",
"aenum"
]
setup(
name=NAME,
version=VERSION,
description="OpenAPI Petstore",
author="OpenAPI Generator community",
author_email="[email protected]",
url="",
keywords=["OpenAPI", "OpenAPI-Generator", "OpenAPI Petstore"],
install_requires=REQUIRES,
packages=find_packages(exclude=["test", "tests"]),
include_package_data=True,
license="Apache-2.0",
long_description_content_type='text/markdown',
long_description="""\
This spec is mainly for testing Petstore server and contains fake endpoints, models. Please do not use this for any other purpose. Special characters: \" \\ # noqa: E501
"""
)
|
[
"[email protected]"
] | |
a9ede54f8c311163f3f1593922779cde560263bc
|
3d0838cc0d3cca599c2dfb6bea3274ff3cabe0ac
|
/discore/models/_channel.py
|
316c280d8e0f64347a51b6e2f534a923fc679e15
|
[
"MIT"
] |
permissive
|
geek-space-hq/discore
|
6a799f411c81b580f0b3e0aac238e5dcf48d899c
|
45f4870426e635353b3621f5089880cbb30c683c
|
refs/heads/develop
| 2022-12-15T20:51:50.904945 | 2020-09-16T16:05:36 | 2020-09-16T16:05:36 | 295,093,463 | 3 | 0 | null | 2020-09-16T09:41:21 | 2020-09-13T06:36:35 |
Python
|
UTF-8
|
Python
| false | false | 5,679 |
py
|
from __future__ import annotations
from datetime import datetime
from enum import Enum, IntEnum
from typing import TYPE_CHECKING, List, Optional, Union
from pydantic import BaseModel
from pydantic.fields import Field
if TYPE_CHECKING:
from ._emoji import Emoji
from ._guild import GuildMember, Role
from ._user import User, UserMentioned
class Channel(BaseModel):
id: str
type: int
guild_id: Optional[str] = None
position: Optional[int] = None
permission_overwrites: Optional["Overwrite"] = None
name: Optional[str] = None
topic: Optional[str] = None
nsfw: bool = Field(default=False)
last_message_id: Optional[str] = None
bitrate: Optional[int] = None
user_limit: Optional[int] = None
rate_limit_per_user: Optional[int] = None
recipients: Optional[List["User"]] = None
icon: Optional[str] = None
parent_id: Optional[str] = None
last_pin_timestamp: Optional[datetime] = None
class ChannelType(IntEnum):
GUILD_TEXT = 0
DM = 1
GUILD_VOICE = 2
GROUP_DM = 3
GUILD_CATEGORY = 4
GUILD_NEWS = 5
GUILD_STORE = 6
class Message(BaseModel):
id: str
channel_id: str
aurhor: "User"
content: str
timestamp: datetime
tts: bool
mention_everyone: bool
mentions: List["UserMentioned"]
mention_roles: List["Role"]
attachments: List["Attachment"]
embeds: List["Embed"]
pinned: bool
type: "MessageType"
guild_id: Optional[str] = None
member: Optional["GuildMember"] = None
mention_channels: Optional[List["ChannelMention"]] = None
reactions: Optional[List["Reaction"]] = None
nonce: Optional[Union[int, str]] = None
webhook_id: Optional[str] = None
activity: Optional["MessageActivity"] = None
application: Optional["MessageApplication"] = None
message_reference: Optional["MessageReference"] = None
flags: Optional[int] = None
class MessageType(IntEnum):
DEFAULT = 0
RECIPIENT_ADD = 1
RECIPIENT_REMOVE = 2
CALL = 3
CHANNEL_NAME_CHANGE = 4
CHANNEL_ICON_CHANGE = 5
CHANNEL_PINNED_MESSAGE = 6
GUILD_MEMBER_JOIN = 7
USER_PREMIUM_GUILD_SUBSCRIPTION = 8
USER_PREMIUM_GUILD_SUBSCRIPTION_TIER_1 = 9
USER_PREMIUM_GUILD_SUBSCRIPTION_TIER_2 = 10
USER_PREMIUM_GUILD_SUBSCRIPTION_TIER_3 = 11
CHANNEL_FOLLOW_ADD = 12
GUILD_DISCOVERY_DISQUALIFIED = 14
GUILD_DISCOVERY_REQUALIFIED = 15
class MessageActivity(BaseModel):
type: int
party_id: Optional[str] = None
class MessageApplication(BaseModel):
id: str
description: str
name: str
cover_image: Optional[str] = None
icon: Optional[str] = None
class MessageReference(BaseModel):
channel_id: str
message_id: Optional[str] = None
guild_id: Optional[str] = None
class MessageActivityType(IntEnum):
JOIN = 1
SPECTATE = 2
LISTEN = 3
JOIN_REQUEST = 5
class MessageFlag(IntEnum):
CROSSPOSTED = 1 << 0
IS_CROSSPOST = 1 << 1
SUPPRESS_EMBEDS = 1 << 2
SOURCE_MESSAGE_DELETED = 1 << 3
URGENT = 1 << 4
class FollowedChannel(BaseModel):
channel_id: str
webhook_id: str
class Reaction:
count: int
me: bool
emoji: "Emoji"
class OverwriteReceiving(BaseModel):
id: str
type: str
allow: int
allow_new: str
deny: int
deny_new: str
class OverwriteSending(BaseModel):
id: str
type: str
allow: Union[int, str]
deny: Union[int, str]
Overwrite = Union[OverwriteReceiving, OverwriteSending]
class Embed(BaseModel):
title: Optional[str] = None
type: Optional["EmbedType"] = None
description: Optional[str] = None
url: Optional[str] = None
timestamp: Optional[datetime] = None
color: Optional[int] = None
footer: Optional["EmbedFooter"] = None
image: Optional["EmbedImage"] = None
thumbnail: Optional["EmbedThumbnail"] = None
video: Optional["EmbedVideo"] = None
provider: Optional["EmbedProvider"] = None
author: Optional["EmbedAuthor"] = None
fields_: Optional[List["EmbedField"]] = Field(default=None, alias="fields")
class EmbedType(str, Enum):
rich = "rich"
image = "image"
video = "video"
gifv = "gifv"
article = "article"
link = "link"
class EmbedThumbnail(BaseModel):
url: Optional[str] = None
proxy_url: Optional[str] = None
height: Optional[int] = None
width: Optional[int] = None
class EmbedVideo(BaseModel):
url: Optional[str] = None
height: Optional[int] = None
width: Optional[int] = None
class EmbedImage(BaseModel):
url: Optional[str] = None
proxy_url: Optional[str] = None
height: Optional[int] = None
width: Optional[int] = None
class EmbedProvider(BaseModel):
name: Optional[str] = None
url: Optional[str] = None
class EmbedAuthor(BaseModel):
name: Optional[str] = None
url: Optional[str] = None
icon_url: Optional[str] = None
proxy_icon_url: Optional[str] = None
class EmbedFooter(BaseModel):
text: str
icon_url: Optional[str] = None
proxy_icon_url: Optional[str] = None
class EmbedField(BaseModel):
name: str
value: str
inline: Optional[bool] = None
class Attachment(BaseModel):
id: str
filename: str
size: int
url: str
proxy_url: str
height: Optional[int] = None
width: Optional[int] = None
class ChannelMention(BaseModel):
id: str
guild_id: str
type: "ChannelType"
name: str
class AllowedMentionType(str, Enum):
ROLE_MENTIONS = "roles"
USER_MENTIONS = "users"
EVERYONE_MENTINS = "everyone"
class AllowedMention(BaseModel):
parse: "AllowedMentionType"
roles: List[str]
users: List[str]
|
[
"[email protected]"
] | |
a07cab13bbac62cbe9da389c04efe73253dd55ba
|
c6b1919498776cfc408076246390e2bba56f4c4e
|
/devops_tool/settings.py
|
e422d3a5d42866f11728863fdae9c727b4dd35e6
|
[] |
no_license
|
huozhihui/devops_tool
|
f2ceaf7f1828853e43859645f5ab36a00b0fa7df
|
0eb7b4a14203e30bb2c262075864cec0db21829f
|
refs/heads/master
| 2020-05-20T19:02:47.855055 | 2017-04-18T05:25:59 | 2017-04-18T05:25:59 | 84,509,976 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 6,196 |
py
|
"""
Django settings for devops_tool project.
Generated by 'django-admin startproject' using Django 1.10.4.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.10/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.10/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'xv*oxmw8)_0jw=e!f6bi1bop1#cpi4_2=jy2da04gf*1!h2he*'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['*']
LOGIN_REDIRECT_URL = '/role_manage'
# custom
# ===================================
TMP = os.path.join(BASE_DIR, 'tmp')
LOCALE_PATHS = (
os.path.join(BASE_DIR, 'locale'),
)
UPLOAD_FILE = (
os.path.join(TMP, 'upload_file')
)
# ANSIBLE = "/etc/ansible"
# ANSIBLE_ROLES = os.path.join(ANSIBLE, 'roles')
# ANSIBLE_YAMLS = os.path.join(ANSIBLE)
ANSIBLE = "/Users/huozhihui/huo/paas_deploy"
ANSIBLE_ROLES = os.path.join(ANSIBLE, 'roles')
ANSIBLE_YAMLS = ANSIBLE
ANSIBLE_HOSTS = ANSIBLE
ANSIBLE_INIT_USER = 'ubunt'
ANSIBLE_INIT_PASS = 'huo244'
# ===================================
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'channels',
'ext_command',
'client',
'workflow',
'client.templatetags.ext_template',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'devops_tool.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
# 'DIRS': [os.path.join(os.path.dirname(__file__), 'templates').replace('\\', '/')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'devops_tool.wsgi.application'
CHANNEL_LAYERS = {
"default": {
"BACKEND": "asgiref.inmemory.ChannelLayer",
"ROUTING": "devops_tool.routing.channel_routing",
},
}
# Database
# https://docs.djangoproject.com/en/1.10/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.10/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.10/topics/i18n/
# LANGUAGE_CODE = 'en-us'
LANGUAGE_CODE = 'zh_cn'
TIME_ZONE = 'Asia/Shanghai'
# TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = False
USE_TZ = False
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.10/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = (
os.path.join(BASE_DIR, "static"),
)
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'handlers': {
'file': {
'level': 'INFO',
'class': 'logging.FileHandler',
'filename': os.path.join(BASE_DIR, 'console.log'),
},
# 'console': {
# 'level': 'INFO',
# 'class': 'logging.StreamHandler',
# # 'formatter': 'simple'
# },
},
'loggers': {
'django.request': {
'handlers': ['file'],
'level': 'INFO',
'propagate': False,
},
},
}
# LOGGING = {
# 'version': 1,
# 'disable_existing_loggers': False,
# 'handlers': {
# 'console': {
# 'class': 'logging.StreamHandler',
# # 'level': 'INFO',
# # 'filename': os.path.join(BASE_DIR, 'console.log'),
# # 'maxBytes': 1024 * 1024 * 15, # 15MB
# # 'backupCount': 10,
# },
# },
# 'loggers': {
# 'django': {
# 'handlers': ['console'],
# 'level': os.getenv('DJANGO_LOG_LEVEL', 'INFO'),
# },
# },
# }
# LOGGING = {
# 'version': 1,
# 'disable_existing_loggers': False,
# 'filters': {
# 'require_debug_false': {
# '()': 'django.utils.log.RequireDebugFalse'
# }
# },
# 'handlers': {
# 'mail_admins': {
# 'level': 'ERROR',
# 'filters': ['require_debug_false'],
# 'class': 'django.utils.log.AdminEmailHandler'
# },
# 'applogfile': {
# 'level':'INFO',
# 'class':'logging.handlers.RotatingFileHandler',
# 'filename': os.path.join(BASE_DIR, 'APPNAME.log'),
# 'maxBytes': 1024*1024*15, # 15MB
# 'backupCount': 10,
# },
# },
# 'loggers': {
# 'django.request': {
# 'handlers': ['applogfile'],
# 'level': 'INFO',
# 'propagate': True,
# },
# }
# }
|
[
"[email protected]"
] | |
6af287c25e7567cc97a37b41e9b9df7d8d589d3a
|
69427716f39ddb8541b7dca39d26015a26e04104
|
/学习脚本/Python基础学习脚本/select_socket_server.py
|
619aaeec0a826ffbd3a7f9299ce892eb9ef5e5a3
|
[] |
no_license
|
xiatian0918/auto_scripts
|
a0fa80f3ec8a5e49e1b049ebed39a8ae3e7cdf7a
|
413c614260340557cf9e615b1339eae68a8f9acf
|
refs/heads/master
| 2020-05-14T13:32:46.556775 | 2020-01-21T00:18:56 | 2020-01-21T00:18:56 | 181,812,978 | 0 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 606 |
py
|
#!/usr/bin/env python
#-*- coding:utf-8 -*-
# author: xiatian
import select,socket,sys,queue
server = socket.socket()
server.bind(('localhost',9000))
server.listen(1000)
server.setblocking(False) #不阻塞
inputs = [server,]
outputs = []
readable , writeable, exceptional = select.select(inputs, outputs, inputs)
print(readable,writeable,exceptional)
for i in readable:
if r is server: #代表来了一个新链接
conn,addr = server.accept()
print("来了个新链接",addr)
inputs.append(conn)
else:
data = conn.recv(1024)
print("收到数据",data)
|
[
"[email protected]"
] | |
7f9ea1866114fe062661f28006ec80d13194dd03
|
a8062308fb3bf6c8952257504a50c3e97d801294
|
/problems/N875_Koko_Eating_Bananas.py
|
2d632a0fdaa6c0078dec7406cb6fa8e0e852a916
|
[] |
no_license
|
wan-catherine/Leetcode
|
650d697a873ad23c0b64d08ad525bf9fcdb62b1b
|
238995bd23c8a6c40c6035890e94baa2473d4bbc
|
refs/heads/master
| 2023-09-01T00:56:27.677230 | 2023-08-31T00:49:31 | 2023-08-31T00:49:31 | 143,770,000 | 5 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 878 |
py
|
class Solution(object):
def minEatingSpeed(self, piles, H):
"""
:type piles: List[int]
:type H: int
:rtype: int
"""
length = len(piles)
if length == H:
return max(piles)
right = max(piles)
total = sum(piles)
if total <= H:
return 1
left = total // H
while left < right:
mid = (right - left) // 2 + left
if self.helper(mid, piles, H):
right = mid
else:
left = mid + 1
return left
def helper(self, value, piles, H):
hours = 0
for pile in piles:
if pile % value:
hours += pile // value + 1
else:
hours += pile // value
if hours > H:
return False
else:
return True
|
[
"[email protected]"
] | |
1d78b2f287093aaabba4344add7cc6fae44f8d34
|
d5aa24b75c2344358752b0af0a47293533820578
|
/data_analysis/IO/load_data.py
|
6333a0cb4611a6478b56ee4e3cef726e09a8e012
|
[] |
no_license
|
ModelDBRepository/234992
|
913da9efaadb704171da907ebd953fe59efe5fb1
|
b969a4c623b92c1bd79138f4132885bc424b114c
|
refs/heads/master
| 2020-05-29T18:28:48.883803 | 2019-05-31T03:42:59 | 2019-05-31T03:42:59 | 189,300,851 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,298 |
py
|
import numpy as np
import sys, pathlib
sys.path.append(str(pathlib.Path(__file__).resolve().parents[2]))
import data_analysis.IO.axon_to_python as axon
import data_analysis.IO.binary_to_python as binary
def load_file(filename, zoom=[0,np.inf]):
if filename.endswith('.bin'):
return binary.load_file(filename, zoom=zoom)
elif filename.endswith('.abf'):
print(filename)
return axon.load_file(filename, zoom=zoom)
else:
return None
def get_metadata(filename, infos={}):
print('filename is', filename)
if filename.endswith('.bin'):
return binary.get_metadata(filename, infos=infos)
elif filename.endswith('.abf'):
return axon.get_metadata(filename, infos=infos)
elif filename.endswith('.npz'):
return {'main_protocol':'modeling_work'}
else:
return None
def get_formated_data(filename):
t, VEC = load_file(filename)
meta = get_metadata(filename)
data = {'t':t, 'Vm':VEC[0],
'infos':meta, 'dt':t[1]-t[0]}
return data
if __name__ == '__main__':
import sys
import matplotlib.pylab as plt
filename = sys.argv[-1]
print(get_metadata(filename))
t, data = load_file(filename, zoom=[-5.,np.inf])
plt.plot(t[10000:], data[0][10000:])
plt.show()
|
[
"[email protected]"
] | |
bda783c687d550284ea64c93dd66f035fb1f1dfb
|
fdfd9cab4e26491da5d2a06a15960362ccf01460
|
/ex32.py
|
2a85257657bbb1b65928785ed5e54f5bf092b766
|
[] |
no_license
|
WilliamsHerrmann/MWM15
|
c182f7f8eca4f30a41e602a8e907497bc927af81
|
3f17abd57473f328ddd1e1a2a7591423f32da0f8
|
refs/heads/master
| 2021-07-07T05:01:06.486909 | 2017-10-02T18:18:44 | 2017-10-02T18:18:44 | 103,341,626 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 816 |
py
|
the_count = [1, 2, 3, 4, 5]
fruits = ['apples', 'oranges', 'pears', 'apricots']
change = [1, 'pennies', 2, 'dimes', 3, 'quarters']
# this first kind of for-loop goes through a list
for number in the_count:
print "This is count %d" % number
# same as above
for fruit in fruits:
print "A fruit of type: %s" % fruit
# also we can go through mixed lists too
# notice we have to use %r since we don't know what's in it
for i in change:
print "I got %r" % i
# we can also build lists, first start with an empty oranges
elements = []
# the use the range function to do 0 to 5 counts
for i in range(0, 6):
print "Adding %d to the list." % i
# append is a function that lists understand
elements. append(i)
# now we can print them out too
for i in elements:
print "Element was: %d" % i
|
[
"[email protected]"
] | |
c05b2d2d9ecd3eba54b5f2efb976613d93068b2e
|
5389214afd2a1607925c2104227395a4f2a2800e
|
/ajax_guide/urls.py
|
453bb0f440546b9de8d098f5eca2b16974c1770b
|
[] |
no_license
|
vinoyjoshi/bandit
|
272081b3c843e85969e1a2217080beb08c2b0df5
|
2421d742bbf31faf9b699bd20058c242cbe68773
|
refs/heads/main
| 2023-01-06T01:49:58.327732 | 2020-10-15T19:47:39 | 2020-10-15T19:47:39 | 304,411,565 | 1 | 0 | null | 2020-10-15T19:47:40 | 2020-10-15T18:13:48 |
Python
|
UTF-8
|
Python
| false | false | 1,013 |
py
|
"""ajax_guide URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
from app1 import views as app1
from django.conf.urls import url
urlpatterns = [
path('admin/', admin.site.urls),
path('',app1.contactPage),
url(r'^ajax/contact-submit/$',app1.contact_submit, name = 'contact_submit'),
path(r'^ajax/get_contact_info/$',app1.get_contact_info,name = 'get_contact_info')
]
|
[
"[email protected]"
] | |
7ed93578216aac980f00d00bb895797a9107acd9
|
e23a4f57ce5474d468258e5e63b9e23fb6011188
|
/018_dictionaries/examples/Python 3 Most Nessesary/9.3.Listing 9.4. Enumerating dictionary elements.py
|
68e2d165ac09ae3d6584391151010bbb29be77b9
|
[] |
no_license
|
syurskyi/Python_Topics
|
52851ecce000cb751a3b986408efe32f0b4c0835
|
be331826b490b73f0a176e6abed86ef68ff2dd2b
|
refs/heads/master
| 2023-06-08T19:29:16.214395 | 2023-05-29T17:09:11 | 2023-05-29T17:09:11 | 220,583,118 | 3 | 2 | null | 2023-02-16T03:08:10 | 2019-11-09T02:58:47 |
Python
|
UTF-8
|
Python
| false | false | 1,111 |
py
|
d = {"x": 1, "y": 2, "z": 3}
for key in d.keys(): # Использование метода keys()
print("({0} => {1})".format(key, d[key]), end=" ")
# Выведет: (y => 2) (x => 1) (z => 3)
print() # Вставляем символ перевода строки
for key in d: # Словари также поддерживают итерации
print("({0} => {1})".format(key, d[key]), end=" ")
# Выведет: (y => 2) (x => 1) (z => 3)
d = {"x": 1, "y": 2, "z": 3}
k = list(d.keys()) # Получаем список ключей
k.sort() # Сортируем список ключей
for key in k:
print("({0} => {1})".format(key, d[key]), end=" ")
# Выведет: (x => 1) (y => 2) (z => 3)
d = {"x": 1, "y": 2, "z": 3}
for key in sorted(d.keys()):
print("({0} => {1})".format(key, d[key]), end=" ")
# Выведет: (x => 1) (y => 2) (z => 3)
d = {"x": 1, "y": 2, "z": 3}
for key in sorted(d):
print("({0} => {1})".format(key, d[key]), end=" ")
# Выведет: (x => 1) (y => 2) (z => 3)
|
[
"[email protected]"
] | |
def2fc41b751673fb8775b648f289d98ef9a0106
|
51f6443116ef09aa91cca0ac91387c1ce9cb445a
|
/Curso_Python_3_UDEMY/desafios/desafio_html.py
|
d5648ffde600f190c5bda1912b3dff47252566db
|
[
"MIT"
] |
permissive
|
DanilooSilva/Cursos_de_Python
|
f449f75bc586f7cb5a7e43000583a83fff942e53
|
8f167a4c6e16f01601e23b6f107578aa1454472d
|
refs/heads/main
| 2023-07-30T02:11:27.002831 | 2021-10-01T21:52:15 | 2021-10-01T21:52:15 | 331,683,041 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 523 |
py
|
def tag(tag, *args, **kwargs):
if 'html_class' in kwargs:
kwargs['class'] = kwargs.pop('html_class')
attrs = ''.join(f'{k}="{v}" ' for k, v in kwargs.items())
inner = ''.join(args)
return f'<{tag} {attrs}>{inner}</{tag}>'
if __name__ == '__main__':
print(tag('p',
tag('span', 'Curso de Python 3, por'),
tag('strong', 'Juracy Filho', id='jf'),
tag('span', ' e '),
tag('strong', 'Leonador Leitão', id='ll'),
tag('span', '.'),
html_class='alert'))
|
[
"[email protected]"
] | |
e7b420a62db0bce2fe381107cc685f1bf88035d8
|
f3742f46560486c07c339244f8cf47bb07709561
|
/features/steps/test_utils.py
|
7cc4d34cc1b40e7598dc65345299f0ee9046838a
|
[
"MIT"
] |
permissive
|
Azure/azure-event-hubs-python
|
55b65920f9d8dbe6cc418d63291ba507ce648d97
|
326f772f5cbe3d3eaf68b24485554aada463430a
|
refs/heads/master
| 2023-03-17T22:03:54.241386 | 2020-04-07T22:33:17 | 2020-04-07T22:33:17 | 91,842,040 | 65 | 66 |
MIT
| 2020-04-07T22:33:18 | 2017-05-19T20:14:44 |
Python
|
UTF-8
|
Python
| false | false | 3,540 |
py
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
import uuid
import time
import asyncio
def create_mgmt_client(credentials, subscription, location='westus'):
from azure.mgmt.resource import ResourceManagementClient
from azure.mgmt.eventhub import EventHubManagementClient
resource_client = ResourceManagementClient(credentials, subscription)
rg_name = 'pytest-{}'.format(uuid.uuid4())
resource_group = resource_client.resource_groups.create_or_update(
rg_name, {'location': location})
eh_client = EventHubManagementClient(credentials, subscription)
namespace = 'pytest-{}'.format(uuid.uuid4())
creator = eh_client.namespaces.create_or_update(
resource_group.name,
namespace)
create.wait()
return resource_group, eh_client
def get_eventhub_config():
config = {}
config['hostname'] = os.environ['EVENT_HUB_HOSTNAME']
config['event_hub'] = os.environ['EVENT_HUB_NAME']
config['key_name'] = os.environ['EVENT_HUB_SAS_POLICY']
config['access_key'] = os.environ['EVENT_HUB_SAS_KEY']
config['consumer_group'] = "$Default"
config['partition'] = "0"
return config
def get_eventhub_100TU_config():
config = {}
config['hostname'] = os.environ['EVENT_HUB_100TU_HOSTNAME']
config['event_hub'] = os.environ['EVENT_HUB_100TU_NAME']
config['key_name'] = os.environ['EVENT_HUB_100TU_SAS_POLICY']
config['access_key'] = os.environ['EVENT_HUB_100TU_SAS_KEY']
config['consumer_group'] = "$Default"
config['partition'] = "0"
return config
def send_constant_messages(sender, timeout, payload=1024):
deadline = time.time()
total = 0
while time.time() < deadline:
data = EventData(body=b"D" * payload)
sender.send(data)
total += 1
return total
def send_constant_async_messages(sender, timeout, batch_size=10000, payload=1024):
deadline = time.time()
total = 0
while time.time() < deadline:
data = EventData(body=b"D" * args.payload)
sender.transfer(data)
total += 1
if total % 10000 == 0:
sender.wait()
return total
def send_constant_async_messages(sender, timeout, batch_size=1, payload=1024):
deadline = time.time()
while time.time() < deadline:
if batch_size > 1:
data = EventData(batch=data_generator())
else:
data = EventData(body=b"D" * payload)
async def receive_pump(receiver, timeout, validation=True):
total = 0
deadline = time.time() + timeout
sequence = 0
offset = None
while time.time() < deadline:
batch = await receiver.receive(timeout=5)
total += len(batch)
if validation:
assert receiver.offset
for event in batch:
next_sequence = event.sequence_number
assert next_sequence > sequence, "Received Event with lower sequence number than previous."
assert (next_sequence - sequence) == 1, "Sequence number skipped by a value great than 1."
sequence = next_sequence
msg_data = b"".join([b for b in event.body]).decode('UTF-8')
assert json.loads(msg_data), "Unable to deserialize Event data."
|
[
"[email protected]"
] | |
9dcf9692cc7ae29a7f56fa27e78cdc21365c70ba
|
8a5ab3d33e3b653c4c64305d81a85f6a4582d7ac
|
/PySide/QtGui/QStyleOptionTab.py
|
264b7c0e040a27794ebb0a067eb56b9436f49f11
|
[
"Apache-2.0"
] |
permissive
|
sonictk/python-skeletons
|
be09526bf490856bb644fed6bf4e801194089f0d
|
49bc3fa51aacbc2c7f0c7ab86dfb61eefe02781d
|
refs/heads/master
| 2020-04-06T04:38:01.918589 | 2016-06-09T20:37:43 | 2016-06-09T20:37:43 | 56,334,503 | 0 | 0 | null | 2016-04-15T16:30:42 | 2016-04-15T16:30:42 | null |
UTF-8
|
Python
| false | false | 1,122 |
py
|
# encoding: utf-8
# module PySide.QtGui
# from /corp.blizzard.net/BFD/Deploy/Packages/Published/ThirdParty/Qt4.8.4/2015-05-15.163857/prebuilt/linux_x64_gcc41_python2.7_ucs4/PySide/QtGui.so
# by generator 1.138
# no doc
# imports
import PySide.QtCore as __PySide_QtCore
from QStyleOption import QStyleOption
class QStyleOptionTab(QStyleOption):
# no doc
def __init__(self, *more): # real signature unknown; restored from __doc__
""" x.__init__(...) initializes x; see help(type(x)) for signature """
pass
Beginning = None
CornerWidget = None
cornerWidgets = None
CornerWidgets = None
End = None
icon = None
LeftCornerWidget = None
Middle = None
NextIsSelected = None
NoCornerWidgets = None
NotAdjacent = None
OnlyOneTab = None
position = None
PreviousIsSelected = None
RightCornerWidget = None
row = None
selectedPosition = None
SelectedPosition = None
shape = None
StyleOptionType = None
StyleOptionVersion = None
TabPosition = None
text = None
Type = None
Version = None
__new__ = None
|
[
"[email protected]"
] | |
857ea37b6d65aa8708b7aa91520822899db9bbaa
|
b3e90c765a70d0c68a3c703c31ebfbcc67d8f83e
|
/AN_Bridging/box_ws/src/multi_box/src/Algs/SoftActorCritic.py
|
6147e9cd1403a16b426ef7d1722e30eefb48531e
|
[] |
no_license
|
ronf-ucb/MultiRobot
|
619fe1750dd25c336f7ef793e43983d992cbf519
|
b509e9c43e330e737135298ea4cfbd4190222328
|
refs/heads/master
| 2021-07-19T20:39:47.835985 | 2020-09-07T14:32:39 | 2020-09-07T14:32:39 | 211,394,959 | 1 | 2 | null | 2019-10-30T22:21:20 | 2019-09-27T20:15:07 |
C++
|
UTF-8
|
Python
| false | false | 3,825 |
py
|
#! /usr/bin/env python
import numpy as np
import torch
import torch.nn as nn
import math
import rospy
from std_msgs.msg import String, Int8
from geometry_msgs.msg import Vector3
import vrep
import matplotlib.pyplot as plt
import torch.optim as optim
from Networks.network import Network
from Networks.softNetwork import SoftNetwork
from agent import Agent
from Buffers.CounterFactualBuffer import Memory
cuda_avail = torch.cuda.is_available()
device = torch.device("cuda" if cuda_avail else "cpu")
class SAC(Agent):
def __init__(self, params, name, task):
super(SAC, self).__init__(params, name, task)
self.aPars = params['actPars']
self.aTrain = params['actTrain']
self.qPars = params['qPars']
self.qTrain = params['qTrain']
if self.trainMode:
self.QNet = Network(self.qPars, self.qTrain).to(device)
self.VNet = Network(self.vPars, self.vTrain).to(device)
self.VTar = Network(self.vPars, self.vTrain).to(device)
self.policyNet = SoftNetwork(self.aPars, self.aTrain).to(device)
else:
print('Not implemented')
for target_param, param in zip(self.VTar.parameters(), self.VNet.parameters()):
target_param.data.copy_(param)
self.expSize = self.vTrain['buffer']
self.actions = self.aPars['neurons'][-1]
self.state = self.aPars['neurons'][0]
self.exp = ReplayBuffer(self.expSize, self.actions, np.float32, self.state, np.float32)
task.initAgent(self)
while(not self.stop):
x = 1+1
task.postTraining()
def load_nets(self):
pass
def saveModel(self):
pass
def get_action(self, s):
action, _ , _, _, _= self.policyNet(torch.FloatTensor(s))
action = np.ravel(action.detach().numpy())
return action
def send_to_device(self, s, a, r, next_s, d):
s = torch.FloatTensor(s).to(device)
a = torch.FloatTensor(a).to(device)
r = torch.FloatTensor(r).unsqueeze(1).to(device)
next_s = torch.FloatTensor(next_s).to(device)
d = torch.FloatTensor(np.float32(d)).unsqueeze(1).to(device)
return s, a, r, next_s, d
def train(self):
if len(self.exp) > 750:
s, a, r, next_s, d = self.exp.sample_batch(self.batch_size)
s, a, r, next_s, d = self.send_to_device(s, a, r, next_s, d)
q = self.QNet(torch.cat([s, a], dim = 1))
v = self.VNet(s)
new_a, log_prob, z, mean, log_std = self.policyNet(s)
target_v = self.VTar(next_s)
next_q = r + (1 - d) * self.discount * target_v
q_loss = self.QNet.get_loss(q, next_q.detach())
new_q = self.QNet(torch.cat([s, new_a], dim=1))
next_v = new_q - log_prob * self.alpha
v_loss = self.VNet.get_loss(v, next_v.detach())
target = new_q - v
actor_loss = (log_prob * (log_prob*self.alpha - target).detach()).mean()
mean_loss = 1e-3 * mean.pow(2).mean()
std_loss = 1e-3 * log_std.pow(2).mean()
actor_loss += mean_loss + std_loss
self.VNet.optimizer.zero_grad()
v_loss.backward()
self.VNet.optimizer.step()
self.QNet.optimizer.zero_grad()
q_loss.backward()
self.QNet.optimizer.step()
self.policyNet.optimizer.zero_grad()
actor_loss.backward()
self.policyNet.optimizer.step()
for target_param, param in zip(self.VTar.parameters(), self.VNet.parameters()):
target_param.data.copy_(target_param.data * (1.0 - 5*1e-3) + param.data * 5*1e-3)
self.totalSteps += 1
|
[
"[email protected]"
] | |
c7c9ab5555c62ef7bca526ca8069b83788f07dc4
|
a3e26112cb5d6b64c30b44f775750653a1daf0dc
|
/Q910_Smallest-Range-II.py
|
550d96ea406915f5883d2b9d5cea91a4561727d1
|
[
"MIT"
] |
permissive
|
xiaosean/leetcode_python
|
938f1df379b518d99a778e2da8093ff0371e35d4
|
d6fc52d13946895d2b2928ef9962af0610b1d1e8
|
refs/heads/master
| 2023-04-05T07:07:01.561010 | 2023-03-25T19:17:21 | 2023-03-25T19:17:21 | 150,637,066 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 511 |
py
|
class Solution:
def smallestRangeII(self, A: List[int], K: int) -> int:
A.sort()
min_range = A[-1] - A[0]
min_val = A[0]
max_val_sub_k = A[-1] - K
min_val = A[0] + K
for idx in range(len(A)-1):
cur_val = A[idx] + K
next_val = A[idx+1] - K
min_range = min(min_range, max(max_val_sub_k, cur_val) - min(min_val, next_val))
# min_range = min(min_range, max_val_sub_k-min(cur_val, next_val))
return min_range
|
[
"[email protected]"
] | |
855fb15b15d33fbe562973352dba115c1014db55
|
251e8bfec0bfc5b6094f7db8ee6bdfe1ca7f6a5b
|
/bookmanager/venv/bin/python-config
|
759cfeb2748510449a11b8162c1a3830533ca6fc
|
[] |
no_license
|
googleliyang/Django_meiduo
|
543042e08cc5eeb1dce8432b4ea2cca996f35c06
|
46f48ecf7bd6e9e2796eac1c3d54787f5571a9a7
|
refs/heads/master
| 2020-04-24T19:22:42.295324 | 2019-02-28T05:24:23 | 2019-02-28T05:24:23 | 172,209,685 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,362 |
#!/Users/ly/Programmer/django/bookmanager/venv/bin/python
import sys
import getopt
import sysconfig
valid_opts = ['prefix', 'exec-prefix', 'includes', 'libs', 'cflags',
'ldflags', 'help']
if sys.version_info >= (3, 2):
valid_opts.insert(-1, 'extension-suffix')
valid_opts.append('abiflags')
if sys.version_info >= (3, 3):
valid_opts.append('configdir')
def exit_with_usage(code=1):
sys.stderr.write("Usage: {0} [{1}]\n".format(
sys.argv[0], '|'.join('--'+opt for opt in valid_opts)))
sys.exit(code)
try:
opts, args = getopt.getopt(sys.argv[1:], '', valid_opts)
except getopt.error:
exit_with_usage()
if not opts:
exit_with_usage()
pyver = sysconfig.get_config_var('VERSION')
getvar = sysconfig.get_config_var
opt_flags = [flag for (flag, val) in opts]
if '--help' in opt_flags:
exit_with_usage(code=0)
for opt in opt_flags:
if opt == '--prefix':
print(sysconfig.get_config_var('prefix'))
elif opt == '--exec-prefix':
print(sysconfig.get_config_var('exec_prefix'))
elif opt in ('--includes', '--cflags'):
flags = ['-I' + sysconfig.get_path('include'),
'-I' + sysconfig.get_path('platinclude')]
if opt == '--cflags':
flags.extend(getvar('CFLAGS').split())
print(' '.join(flags))
elif opt in ('--libs', '--ldflags'):
abiflags = getattr(sys, 'abiflags', '')
libs = ['-lpython' + pyver + abiflags]
libs += getvar('LIBS').split()
libs += getvar('SYSLIBS').split()
# add the prefix/lib/pythonX.Y/config dir, but only if there is no
# shared library in prefix/lib/.
if opt == '--ldflags':
if not getvar('Py_ENABLE_SHARED'):
libs.insert(0, '-L' + getvar('LIBPL'))
if not getvar('PYTHONFRAMEWORK'):
libs.extend(getvar('LINKFORSHARED').split())
print(' '.join(libs))
elif opt == '--extension-suffix':
ext_suffix = sysconfig.get_config_var('EXT_SUFFIX')
if ext_suffix is None:
ext_suffix = sysconfig.get_config_var('SO')
print(ext_suffix)
elif opt == '--abiflags':
if not getattr(sys, 'abiflags', None):
exit_with_usage()
print(sys.abiflags)
elif opt == '--configdir':
print(sysconfig.get_config_var('LIBPL'))
|
[
"[email protected]"
] | ||
bbd06e970f33e0fd3225569ff5aedc8b24bb6c63
|
8b9e9de996cedd31561c14238fe655c202692c39
|
/recursion/Tail_Recursion.py
|
24b88b9901ef2a299306341c11b8f90bb3107b39
|
[] |
no_license
|
monkeylyf/interviewjam
|
0049bc1d79e6ae88ca6d746b05d07b9e65bc9983
|
33c623f226981942780751554f0593f2c71cf458
|
refs/heads/master
| 2021-07-20T18:25:37.537856 | 2021-02-19T03:26:16 | 2021-02-19T03:26:16 | 6,741,986 | 59 | 31 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,005 |
py
|
# Explain what is tail recursion and implement reverse a list using functional programming style
def rev(a):
"""Tail recursion.
rev([0, 1, 2, 3])
nested([], [0, 1, 2, 3])
nested([0] + [], [1, 2, 3])
nested([1] + [0], [2, 3])
nested([2] + [1, 0], [3])
nested([3], [2, 1, 0], [])
[3, 2, 1, 0]
[3, 2, 1, 0]
"""
# Nested function.
def nested(acc, a):
# Notice that [a[0]] + acc instead of [a[0]] + acc
return nested([a[0]] + acc, a[1:]) if a else acc
return nested([], a)
def re(a):
"""None tail recursion.
What happens in call stack.
re([0, 1, 2, 3])
re([1, 2, 3,]) + 0
(re([2, 3,]) + 1) + 0
((re([3]) + 2) + 1) + 0
(((re([]) + 3) + 2) + 1) + 0
(((3) + 2) + 1) + 0
((5) + 1) + 0
6 + 0
6
"""
return re(a[1:]) + [a[0]] if a else []
def main():
n = 500
# Test case
print rev(range(n))
print re(range(n))
if __name__ == '__main__':
main()
|
[
"[email protected]"
] | |
d38a64980bb07c06db2cda09547eb0528b777da0
|
e206cc00299804ce2271eb5d1513620e44ee9a9b
|
/course1-algorithm-toolbox/assignments/assignment_003_quick_sort3_way_partrition/sorting.py
|
7c0e0b028b58859d54188ef65c487733d890039d
|
[] |
no_license
|
dmitri-mamrukov/coursera-data-structures-and-algorithms
|
15459cd160f7bbae5464bf53d995bca868a0b415
|
01dd6f0dadf62a520bcafafddf7bf2b79e8e2603
|
refs/heads/master
| 2020-05-24T18:27:00.665642 | 2019-05-21T20:45:37 | 2019-05-21T20:45:37 | 187,410,737 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,537 |
py
|
#!/usr/bin/python3
import sys
import random
def __partition3(data, left, right):
"""This function partitions a[] in three parts:
a) a[left..l - 1] contains all elements smaller than the pivot element
b) a[l..r] contains all occurrences of the pivot element
c) a[r + 1..right] contains all elements greater than the pivot element
"""
l = left
r = right
k = left + 1
pivot_value = data[left]
while k <= r:
if data[k] < pivot_value:
data[l], data[k] = data[k], data[l]
l += 1
k += 1
elif data[k] > pivot_value:
data[k], data[r] = data[r], data[k]
r -= 1
else:
k += 1
return (l - 1, r + 1)
def __partition2(data, left, right):
x = data[left]
k = left;
for i in range(left + 1, right + 1):
if data[i] <= x:
k += 1
data[i], data[k] = data[k], data[i]
data[left], data[k] = data[k], data[left]
return k
def __randomized_quick_sort(data, left, right):
if left >= right:
return
k = random.randint(left, right)
data[left], data[k] = data[k], data[left]
i, j = __partition3(data, left, right)
__randomized_quick_sort(data, left, i);
__randomized_quick_sort(data, j, right);
def solve(data):
__randomized_quick_sort(data, 0, len(data) - 1)
if __name__ == '__main__':
input = sys.stdin.read()
n, *a = list(map(int, input.split()))
solve(a)
for x in a:
print(x, end = ' ')
print()
|
[
"[email protected]"
] | |
7f9e6d7b2d645fcd5aa6bd33457e423a8acbaae7
|
485784cea86f52c2acda0a495942689104cd391e
|
/schedule/migrations/0002_rinkschedule_schedule_date.py
|
9b86d692f4df4d3b153c2be9115884978a11c438
|
[] |
no_license
|
BrianC68/OIC_Web_Apps
|
50ec9f46868ad37dc809548d2d362a4573320539
|
e75b9439b11cf2325675d76dacac38806156fb16
|
refs/heads/master
| 2023-08-09T07:39:33.066601 | 2023-08-07T13:22:39 | 2023-08-07T13:22:39 | 197,438,661 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 472 |
py
|
# Generated by Django 2.2.1 on 2019-10-08 23:50
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('schedule', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='rinkschedule',
name='schedule_date',
field=models.DateField(default=django.utils.timezone.now),
preserve_default=False,
),
]
|
[
"[email protected]"
] | |
a5ce8e3808360b1d98652c7e2b7c2658debc42d3
|
559f3dec0964d2e0f86c6c871371fe779cf3726c
|
/contrib/MedicalSeg/tools/preprocess_utils/dataset_json.py
|
5b3372963ae365689314ba2a9ae2d83d7a9307a9
|
[
"Apache-2.0"
] |
permissive
|
PaddlePaddle/PaddleSeg
|
319ab26665ea492527a1949671650135123ffc39
|
2c8c35a8949fef74599f5ec557d340a14415f20d
|
refs/heads/release/2.8
| 2023-08-31T09:08:06.724717 | 2023-08-18T01:59:56 | 2023-08-18T01:59:56 | 204,380,779 | 8,531 | 1,866 |
Apache-2.0
| 2023-09-12T02:30:42 | 2019-08-26T02:32:22 |
Python
|
UTF-8
|
Python
| false | false | 478 |
py
|
import json
def parse_msd_basic_info(json_path):
"""
get dataset basic info from msd dataset.json
"""
dict = json.loads(open(json_path, "r").read())
info = {}
info["modalities"] = tuple(dict["modality"].values())
info["labels"] = dict["labels"]
info["dataset_name"] = dict["name"]
info["dataset_description"] = dict["description"]
info["license_desc"] = dict["licence"]
info["dataset_reference"] = dict["reference"]
return info
|
[
"[email protected]"
] | |
cc30e3e61bd5839e2a0450e37255e918cb0b15cc
|
814e4ad96172a76d9b72ac35697553980d0db5f1
|
/pyalp/gs_interface/generate_certificates.py
|
e7aa246d4bb2851366daaf5f91a5fe555ce9c5c2
|
[
"MIT"
] |
permissive
|
Mause/pyalp
|
29785037d3b4ebc2822e6ec74621aa65f621bb8e
|
fb0f723070e11f8c9ed57e2475eb963599f442a6
|
refs/heads/master
| 2022-12-05T12:05:01.804305 | 2014-07-02T13:04:21 | 2014-07-02T13:04:21 | 15,419,434 | 0 | 0 |
MIT
| 2022-11-22T00:24:05 | 2013-12-24T14:00:26 |
PHP
|
UTF-8
|
Python
| false | false | 692 |
py
|
#!/usr/bin/env python
"""
Generate client and server CURVE certificate files then move them into the
appropriate store directory, private_keys or public_keys. The certificates
generated by this script are used by the stonehouse and ironhouse examples.
In practice this would be done by hand or some out-of-band process.
Author: Chris Laws
"""
import zmq.auth
from __init__ import KEYS_DIR
def generate_certificates():
''' Generate client and server CURVE certificate files'''
# create new keys in certificates dir
zmq.auth.create_certificates(KEYS_DIR, "server")
zmq.auth.create_certificates(KEYS_DIR, "client")
if __name__ == '__main__':
generate_certificates()
|
[
"[email protected]"
] | |
ee63b12e238a4138f9963f331b11ffc93c1e0fa0
|
5979cf3c79daa04706c8fef1595574c6e36c14a1
|
/vgl/home/urls.py
|
0547341057b158de3626eb131277cda6b03c92a1
|
[] |
no_license
|
rahuezo/valley-green-landscape-inc
|
f675b5242ed7a80e457b236a253fb9ed0602829c
|
6dac5ed2202336a69a86c6dcafee892cbadaa5b3
|
refs/heads/master
| 2021-08-15T16:48:50.594629 | 2017-11-18T00:08:49 | 2017-11-18T00:08:49 | 110,613,620 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 135 |
py
|
from django.conf.urls import url
from . import views
app_name = 'home'
urlpatterns = [
url(r'^$', views.index, name='index'),
]
|
[
"[email protected]"
] | |
dc89107f7dcfdfa9cd7401d4281ed7ea790232a3
|
0ad7f553df6b210b5ac004fbf490ed651a21d55e
|
/algos/discrete_esay_control_lib_01.py
|
ef19dd246fb029fd2da77e0c6b9a839eebbfc2a8
|
[] |
no_license
|
MarianoDel/spyder_python
|
fa00987eb9aa1ef61d7224679a84c05a217c6c35
|
5f5896df68f95eb860bc08c21ae2b19516432cdc
|
refs/heads/master
| 2020-05-23T06:14:57.329478 | 2020-04-23T14:58:16 | 2020-04-23T14:58:16 | 84,753,428 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,354 |
py
|
# -*- coding: utf-8 -*-
#usando libreria de control
import numpy as np
from scipy import signal
b = [0.125, 0.125, 0.125, 0.125, 0.125, 0.125, 0.125, 0.125]
tf1 = (b, [1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], 0.001) #ver que dt coincida con el step de tiempo en discreto
#w, h = signal.freqz(b)
#w, h = signal.freqz(tf1)
w, h = signal.freqz((b, [1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], 0.001))
import matplotlib.pyplot as plt
fig = plt.figure()
plt.title('Digital filter frequency response')
ax1 = fig.add_subplot(111)
plt.plot(w, 20 * np.log10(abs(h)), 'b')
plt.ylabel('Amplitude [dB]', color='b')
plt.xlabel('Frequency [rad/sample]')
ax2 = ax1.twinx()
angles = np.unwrap(np.angle(h))
plt.plot(w, angles, 'g')
plt.ylabel('Angle (radians)', color='g')
plt.grid()
plt.axis('tight')
plt.show()
plt.figure(2)
plt.clf()
tf = (b, [1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], 0.001) #ver que dt coincida con el step de tiempo en discreto
#ademas le genero un multilpe polo en origen para que no me diga que num > den
t_in = np.arange(0.0, 0.1, 0.001)
#t_in = np.arange(0.0, 4.0, 1.0)
#u = np.asarray([0.0, 0.0, 1.0, 1.0])
u = np.ones(np.size(t_in))
t_out, y = signal.dlsim(tf, u, t=t_in)
plt.plot(t_out, y, 'b')
plt.plot(t_out, u+0.1, 'g')
plt.show()
|
[
"[email protected]"
] | |
67f12d8933ae63eef4aa93f09cc44f61d8f48c3d
|
7801b0356b60de5a4fa6b214717a1c04942b5b62
|
/crm/migrations/0003_userinfo_user.py
|
2daf274b45a19a80d357f35f9323cbef54a43799
|
[] |
no_license
|
hqs2212586/CRM_demo
|
365652c61c991a2098d32b5db318d55cf29baa0b
|
941a896aef598d81750a96074bc63ccfaaadf0a5
|
refs/heads/master
| 2020-03-27T17:43:40.110992 | 2018-08-31T09:20:01 | 2018-08-31T09:20:01 | 146,869,963 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 517 |
py
|
# Generated by Django 2.0.6 on 2018-08-31 03:55
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('rbac', '0004_permission_action'),
('crm', '0002_customerdistrbute'),
]
operations = [
migrations.AddField(
model_name='userinfo',
name='user',
field=models.OneToOneField(null=True, on_delete=django.db.models.deletion.CASCADE, to='rbac.User'),
),
]
|
[
"[email protected]"
] | |
2a02caa7558f764522bd58b00871216e796676d8
|
d42dea822871be6027fadbf8b167be1c0b38d9c7
|
/BST/debug.py
|
2cc73f8d9784aa9f8a21c2b7530ff531d0bb1e4b
|
[] |
no_license
|
siddhantprateek/Python-in-Practice
|
d8412c46dec57d512d8abd87cb0a33b71070c5ee
|
0ad806f02fecb87de20078ef956f8e23bb38e342
|
refs/heads/main
| 2023-06-26T22:34:55.172882 | 2021-07-29T15:14:09 | 2021-07-29T15:14:09 | 354,875,226 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,479 |
py
|
class BSTreeNode:
def __init__(self, val):
self.value = val
self.left, self.right = None, None
class BinaryST:
def __init__(self):
self.root = None
def insert(self, val):
self.root = self.insertHelp(val, self.root)
def insertHelp(self, value, node):
if node == None:
node = BSTreeNode(value)
return node
if node.value > value:
node.left = self.insertHelp(value, node.left)
if node.value < value:
node.right = self.insertHelp(value, node.right)
return node
def Sum(self):
return self.sumHelp(self.root)
def sumHelp(self, node):
if node == None:
return 0
return node.value + self.sumHelp(node.left) + self.sumHelp(node.right)
def display(self):
self.displayHelper(self.root, "Root Node: ")
def displayHelper(self, node, details):
if node == None:
return
print(details, node.value)
self.displayHelper(node.left, "left child of " + str(node.value) + ":")
self.displayHelper(node.right, "right child of " + str(node.value) + ":")
# nums = [4, 5, 2, 7, 6, 1]
if __name__ == '__main__':
bst = BinaryST()
bst.insert(4)
bst.insert(5)
bst.insert(2)
bst.insert(7)
bst.insert(6)
bst.display()
|
[
"[email protected]"
] | |
5ae9b28df851a85fea96edf6169e6cf8f14c6a50
|
07f92805a75dc91b8be2ac14c238394245eda9ea
|
/Python生物信息学数据管理/python-for-biologists/03-modular_programming/10-functions/calc_atom_atom_distance.py
|
5156d4af63c5626cef0355e63c950a6aecc07d18
|
[] |
no_license
|
08zhangyi/Some-thing-interesting-for-me
|
6ea7366ef1f0812397300259b2e9d0e7217bcba0
|
f4cbda341ada98753c57a3ba07653163522dd023
|
refs/heads/master
| 2023-01-11T22:54:03.396911 | 2023-01-06T05:47:41 | 2023-01-06T05:47:41 | 136,426,995 | 7 | 6 | null | null | null | null |
UTF-8
|
Python
| false | false | 917 |
py
|
'''
Find two alpha-C atoms in a PDB structure and calculate their distance.
-----------------------------------------------------------
(c) 2013 Allegra Via and Kristian Rother
Licensed under the conditions of the Python License
This code appears in section 10.4.4 of the book
"Managing Biological Data with Python".
-----------------------------------------------------------
'''
from math import sqrt
from distance import calc_dist
from parse_pdb import parse_atom_line
pdb = open('3G5U.pdb')
points = []
while len(points) < 2:
line = pdb.readline()
if line.startswith("ATOM"):
chain, res_type, res_num, atom, x, y, z = parse_atom_line(line)
if res_num == '123' and chain == 'A' and atom == 'CA':
points.append((x, y, z))
if res_num == '209' and chain == 'A' and atom == 'CA':
points.append((x, y, z))
print calc_dist(points[0], points[1])
|
[
"[email protected]"
] | |
21393f5ec3107ae718cce881c013ad295cbc9e74
|
bf74f773f0c69e0ce7c5cc57a5897ca86cca6587
|
/custom_collections/tree.py
|
b2452b64db079d1947db82cf94a240f15c822c36
|
[
"BSD-3-Clause"
] |
permissive
|
weijia/custom_collections
|
a532b01b18049f0e0aad9920f8e90d45e3c24812
|
e9b7bcc25f83f6a9adfbee94c825835414799aab
|
refs/heads/master
| 2016-09-06T09:01:05.969014 | 2014-09-20T17:42:48 | 2014-09-20T17:42:48 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 5,068 |
py
|
import uuid
from django.utils import timezone
import django.db.utils
from django.contrib.auth.models import User, Group
from django.contrib import admin
from django.conf import settings
from obj_sys.models import UfsObj
from guardian.admin import GuardedModelAdmin
from guardian.shortcuts import assign_perm
try:
from models import CollectionItem
except:
pass
gRootUuid = u"4a5e8673-f2a2-4cf2-af6c-461fa9f31a15"
def register(objectClass, group_name = "scheduling"):
module_name = objectClass.__module__.split(".")[0].lower()
class_name = objectClass.__name__.lower()
url = u"view://admin/%s/%s/add"%(module_name, class_name)
try:
for i in UfsObj.objects.filter(ufs_url = url):
return
except django.db.utils.DatabaseError:
#Database is not created yet, just return, items will be created after syncdb is executed
return
o = UfsObj(ufs_url = url, uuid = unicode(uuid.uuid4()), timestamp=timezone.now(), user=User.objects.filter(username="AnonymousUser")[0])
o.save()
c = CollectionItem(obj = o, uuid = gRootUuid, id_in_col="%s_%s_add"%(module_name, class_name),
timestamp=timezone.now(), user=User.objects.filter(username="AnonymousUser")[0])
c.save()
#Add to group
try:
group = Group.objects.filter(name=group_name)[0]
except:
#Group not exist, create it
group = Group.objects.create(name=group_name)
#print 'assigning: ', group, c
assign_perm('view_collection_item', group, c)
def get_item_id(parent_path):
subitem_list = parent_path.split("/")
parent_item_uuid = gRootUuid
for i in subitem_list:
#print 'getting uuid for item: ', i, ', parent:', parent_item_uuid, 'end'
if i == "":
continue
parent_item_uuid = CollectionItem.objects.filter(uuid = parent_item_uuid, id_in_col = i)[0].obj.uuid
#print 'returning parent', parent_item_uuid
return parent_item_uuid
def register_menu(subitem_url, subitem_text, parent_path = "/", permmited_group = None):
"""
If subitem_test contains dynamic, subitem_url is not used.
Otherwise, subitem_url is the content of this menu item.
Register a menu item in the left tree in object manager, the info is stored in obj_sys.models.Collection.
:param subitem_url: menu item's URL. When the item is clicked, the URL will be loaded to the content pane
:param subitem_text: menu item's text. It is stored in to id_in_col field for Collection and if it is
"dynamic://xxxx", the parent item's children will be dynamically generated by opening
URL: xxxx. xxxx should return a collection of items as in tags.tag_list. The format is
described in tags.tag_list as well.
:param parent_path: the parent for this menu item. Root item is "/", sub menus should start with "/" as well.
:param permmited_group:
:return: N/A
"""
try:
root_uuid = get_item_id(parent_path)
url = u"view://%s"%(subitem_url)
qs = UfsObj.objects.filter(ufs_url = url)
if 0 == qs.count():
print 'creating new ufs obj'
o = UfsObj(ufs_url = url, uuid = unicode(uuid.uuid4()), timestamp=timezone.now(), user=User.objects.filter(username="AnonymousUser")[0])
o.save()
else:
#print 'use existing item'
o = qs[0]
except django.db.utils.DatabaseError:
#Database is not created yet, just return, items will be created after syncdb is executed
return
#print 'creating collection item for root: ', root_uuid
if permmited_group is None:
#If no permission requested, set anonymous user accessable.
permitted_user_or_group = User.objects.filter(pk=settings.ANONYMOUS_USER_ID)[0]
else:
try:
permitted_user_or_group = Group.objects.filter(name = permmited_group)[0]
except:
#Group not exist, create it
permitted_user_or_group = Group.objects.create(name = permmited_group)
collqs = CollectionItem.objects.filter(uuid = root_uuid, id_in_col = subitem_text)
if 0 == collqs.count():
c = CollectionItem(obj = o, uuid = root_uuid, id_in_col = subitem_text,
timestamp=timezone.now(), user=User.objects.filter(username="AnonymousUser")[0])
c.save()
else:
c = collqs[0]
#Assign group permission
assign_perm('view_collection_item', permitted_user_or_group, c)
def register_to_sys(class_inst, admin_class = None):
if admin_class is None:
admin_class = type(class_inst.__name__+"Admin", (GuardedModelAdmin, ), {})
try:
admin.site.register(class_inst, admin_class)
except:
pass
try:
from normal_admin.admin import user_admin_site
user_admin_site.register(class_inst, admin_class)
except:
pass
#register(class_inst)
def register_all(class_list):
for i in class_list:
register_to_sys(i)
|
[
"[email protected]"
] | |
43589f610d031b46faaa40ca7ed51622d5c8345d
|
8c9c27cb88a2d210a5e2fb5803fe89204dba95ef
|
/phy/gui/qt.py
|
fee9e2549519dba466c131d6a190d3459af0496c
|
[] |
no_license
|
arnefmeyer/phy
|
c13b1eceb70ee72cf0ff9c4a273e195f122fabc4
|
14663e1f2baad421d6bc9f420d34170c6c969bbe
|
refs/heads/master
| 2020-12-07T15:42:49.605432 | 2016-04-20T21:10:38 | 2016-04-20T21:10:38 | 56,718,986 | 1 | 0 | null | 2016-04-20T20:32:18 | 2016-04-20T20:32:18 | null |
UTF-8
|
Python
| false | false | 4,107 |
py
|
# -*- coding: utf-8 -*-
"""Qt utilities."""
# -----------------------------------------------------------------------------
# Imports
# -----------------------------------------------------------------------------
from contextlib import contextmanager
from functools import wraps
import logging
import sys
logger = logging.getLogger(__name__)
# -----------------------------------------------------------------------------
# PyQt import
# -----------------------------------------------------------------------------
from PyQt4.QtCore import (Qt, QByteArray, QMetaObject, QObject, # noqa
QVariant, QEventLoop, QTimer,
pyqtSignal, pyqtSlot, QSize, QUrl)
try:
from PyQt4.QtCore import QPyNullVariant # noqa
except: # pragma: no cover
QPyNullVariant = None
try:
from PyQt4.QtCore import QString # noqa
except: # pragma: no cover
QString = None
from PyQt4.QtGui import (QKeySequence, QAction, QStatusBar, # noqa
QMainWindow, QDockWidget, QWidget,
QMessageBox, QApplication, QMenuBar,
QInputDialog,
)
from PyQt4.QtWebKit import QWebView, QWebPage, QWebSettings # noqa
# -----------------------------------------------------------------------------
# Utility functions
# -----------------------------------------------------------------------------
def _button_enum_from_name(name):
return getattr(QMessageBox, name.capitalize())
def _button_name_from_enum(enum):
names = dir(QMessageBox)
for name in names:
if getattr(QMessageBox, name) == enum:
return name.lower()
def _prompt(message, buttons=('yes', 'no'), title='Question'):
buttons = [(button, _button_enum_from_name(button)) for button in buttons]
arg_buttons = 0
for (_, button) in buttons:
arg_buttons |= button
box = QMessageBox()
box.setWindowTitle(title)
box.setText(message)
box.setStandardButtons(arg_buttons)
box.setDefaultButton(buttons[0][1])
return box
def _show_box(box): # pragma: no cover
return _button_name_from_enum(box.exec_())
def _input_dialog(title, sentence):
return QInputDialog.getText(None, title, sentence)
@contextmanager
def _wait_signal(signal, timeout=None):
"""Block loop until signal emitted, or timeout (ms) elapses."""
# http://jdreaver.com/posts/2014-07-03-waiting-for-signals-pyside-pyqt.html
loop = QEventLoop()
signal.connect(loop.quit)
yield
if timeout is not None:
QTimer.singleShot(timeout, loop.quit)
loop.exec_()
# -----------------------------------------------------------------------------
# Qt app
# -----------------------------------------------------------------------------
def require_qt(func):
"""Specify that a function requires a Qt application.
Use this decorator to specify that a function needs a running
Qt application before it can run. An error is raised if that is not
the case.
"""
@wraps(func)
def wrapped(*args, **kwargs):
if not QApplication.instance(): # pragma: no cover
raise RuntimeError("A Qt application must be created.")
return func(*args, **kwargs)
return wrapped
# Global variable with the current Qt application.
QT_APP = None
def create_app():
"""Create a Qt application."""
global QT_APP
QT_APP = QApplication.instance()
if QT_APP is None: # pragma: no cover
QT_APP = QApplication(sys.argv)
return QT_APP
@require_qt
def run_app(): # pragma: no cover
"""Run the Qt application."""
global QT_APP
return QT_APP.exit(QT_APP.exec_())
# -----------------------------------------------------------------------------
# Testing utilities
# -----------------------------------------------------------------------------
def _debug_trace(): # pragma: no cover
"""Set a tracepoint in the Python debugger that works with Qt."""
from PyQt4.QtCore import pyqtRemoveInputHook
from pdb import set_trace
pyqtRemoveInputHook()
set_trace()
|
[
"[email protected]"
] | |
7c6e0f6234acef62dcc182e1e93468181f99ce5c
|
187a6558f3c7cb6234164677a2bda2e73c26eaaf
|
/jdcloud_sdk/services/apigateway/apis/DescribeIsDeployApiGroupsRequest.py
|
5734f9aeb75d8ebb2d719eb77989012b428bf204
|
[
"Apache-2.0"
] |
permissive
|
jdcloud-api/jdcloud-sdk-python
|
4d2db584acc2620b7a866af82d21658cdd7cc227
|
3d1c50ed9117304d3b77a21babe899f939ae91cd
|
refs/heads/master
| 2023-09-04T02:51:08.335168 | 2023-08-30T12:00:25 | 2023-08-30T12:00:25 | 126,276,169 | 18 | 36 |
Apache-2.0
| 2023-09-07T06:54:49 | 2018-03-22T03:47:02 |
Python
|
UTF-8
|
Python
| false | false | 1,460 |
py
|
# coding=utf8
# Copyright 2018 JDCLOUD.COM
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# NOTE: This class is auto generated by the jdcloud code generator program.
from jdcloud_sdk.core.jdcloudrequest import JDCloudRequest
class DescribeIsDeployApiGroupsRequest(JDCloudRequest):
"""
查询分组
"""
def __init__(self, parameters, header=None, version="v1"):
super(DescribeIsDeployApiGroupsRequest, self).__init__(
'/regions/{regionId}/apiGroups:isDeploy', 'GET', header, version)
self.parameters = parameters
class DescribeIsDeployApiGroupsParameters(object):
def __init__(self, regionId, ):
"""
:param regionId: 地域ID
"""
self.regionId = regionId
self.filters = None
def setFilters(self, filters):
"""
:param filters: (Optional) deployStatus - 发布状态,已发布:1,未发布:0
"""
self.filters = filters
|
[
"[email protected]"
] | |
8b7641767d7456a30a42aaefeb9cee8c4c607de4
|
51888119e10cdff12dafb060a54824632edccf3f
|
/Folders/Python/BlackSailSubmit.py
|
fb3aabf18c996ca4a44e2ffda207c3e9e2ed6b01
|
[
"BSD-2-Clause"
] |
permissive
|
kuchinal/lamakaha
|
b64511ad8c6d2b36da5a84a266b9e7a69acd3106
|
24e3b2ff53bcac2ad1c0e5a3b9afd4593d85f22d
|
refs/heads/master
| 2023-09-01T17:55:56.551183 | 2023-07-31T19:32:04 | 2023-07-31T19:32:04 | 182,849,747 | 0 | 0 | null | 2021-09-10T06:34:22 | 2019-04-22T19:00:02 |
Python
|
UTF-8
|
Python
| false | false | 519 |
py
|
import nuke
import rrSubmit_Nuke_5
def BlackSailSubmit():
try :
g = nuke.selectedNode()
f = nuke.allNodes("Write")
f= nuke.allNodes("AutoWrite")+f
for a in f:
sel = a['selected'].value()
if sel == 1:
a['disable'].setValue(0)
else:
a['disable'].setValue(1)
print "selected"
rrSubmit_Nuke_5.rrSubmit_Nuke_5()
except:
rrSubmit_Nuke_5.rrSubmit_Nuke_5()
print "all"
|
[
"[email protected]"
] | |
6c8a442acb14c856d7f761064e44561c82b10f6c
|
212028581b4875ac2fefa9acd7b17b88b4b8eccd
|
/ulugugu/values.py
|
e5fbdd042dbd0dbd5cebbe5d437a2fa3c34403d1
|
[] |
no_license
|
jonashaag/ulugugu
|
65a3b55c2fa2d624ba7cc72cc5186eb353e7b016
|
509e3ceadbb50aad34c585b63d33284357a21ed6
|
refs/heads/master
| 2016-08-06T07:10:53.578924 | 2015-07-25T12:54:15 | 2015-07-25T12:54:15 | 37,680,621 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 533 |
py
|
class Value:
pass
class Integer(Value):
def __init__(self, value):
self.value = value
def __repr__(self):
return '<Integer %d>' % self.value
class String(Value):
def __init__(self, value):
self.value = value
def __repr__(self):
return '<String %r>' % self.value
class Application(Value):
def __init__(self, operation, op1, op2):
self.operation = operation
self.op1 = op1
self.op2 = op2
def __repr__(self):
return '<Application %s(%s, %s)>' % (self.operation, self.op1, self.op2)
|
[
"[email protected]"
] | |
42fdb67ff09320fd1ae7ed48b1d7252395a78674
|
93db4b48741ff4ab0a3895813a6c7543e01821ea
|
/leetcode/Python/875_bananas.py
|
ce09bb5605a8310b0d9dfe00aaffbe27d27ed1c7
|
[] |
no_license
|
shubham14/Coding_Contest_solutions
|
f884c458d3316bdafc6f1b1a52cf3e962c58bc47
|
1b67497f35b892c25e3d9600214fa37a738ffd40
|
refs/heads/master
| 2021-06-22T13:34:10.581101 | 2019-10-09T02:56:01 | 2019-10-09T02:56:01 | 131,326,516 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 440 |
py
|
# -*- coding: utf-8 -*-
"""
Created on Tue Jul 24 00:09:31 2018
@author: Shubham
"""
class Solution(object):
def minEatingSpeed(self, piles, H):
def possible(K):
return sum((p - 1)/ K + 1 for p in piles) <= H
s, e = 1, max(piles)
while s < e:
m = (s + e)/2
if not possible(m):
s = m + 1
else:
e = m
return s
|
[
"[email protected]"
] | |
3cce302430399686b13d6cc49040ace97eb052a2
|
632eee486e432d1bc2a7c771db7e9a06f7cad7a9
|
/2812-laercio.py
|
3d976d923a342bfca9597d558079aab8c6f0269b
|
[] |
no_license
|
L4res/URI-Python
|
d1c578d87201151540876a6b8eca2aecd833a953
|
2f59387ca38e16f6396a6ea677d71f7c2c919fc2
|
refs/heads/master
| 2023-03-25T17:34:57.635187 | 2020-06-15T18:20:03 | 2020-06-15T18:20:03 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 864 |
py
|
for i in range(int(input())):
n = int(input())
lista = list(map(int, input().split()))
lista.sort()
impares = []
resultado = []
# Pegando os valores impares:
for j in range(n):
if lista[j] % 2 != 0:
impares.append(lista[j])
# Pegando os valores na ordem maior e menor, sucessivamente:
while len(impares) != 0:
try:
resultado.append(impares[len(impares)-1])
impares.pop()
resultado.append(impares[0])
impares.pop(0)
except IndexError:
break
# Printando o resultado:
if len(resultado) == 0:
print()
else:
for k in range(len(resultado)):
if k != len(resultado)-1:
print(resultado[k], end=" ")
else:
print(resultado[k])
|
[
"[email protected]"
] | |
aa611c1670449e133c290241f9c2bbbc38d1505a
|
ed21823488a1cca51009793efa0b124e40d224a4
|
/neurobioseg/170111_avoid_redundant_path_calculation/p170111_03_compute_paths.py
|
b61c0656c06859d665bca2fe34b87a67a2cc9716
|
[] |
no_license
|
jhennies/py_devel
|
4a41e13ec8cd9b834c3d5acf64becc0fa8ffc479
|
9fc860be95ae91064a40f25e26d4024fbae6eb1f
|
refs/heads/master
| 2021-01-16T23:25:56.716283 | 2017-03-10T17:49:55 | 2017-03-10T17:49:55 | 45,381,183 | 1 | 0 | null | 2017-03-10T17:49:56 | 2015-11-02T08:21:35 |
Python
|
UTF-8
|
Python
| false | false | 4,945 |
py
|
import os
import inspect
from hdf5_image_processing import Hdf5ImageProcessing as IP, Hdf5ImageProcessingLib as ipl
from hdf5_processing import RecursiveDict as rdict
from shutil import copy, copyfile
import numpy as np
import matplotlib.pyplot as plt
import processing_libip as libip
import sys
from yaml_parameters import YamlParams
__author__ = 'jhennies'
def load_images(filepath, skeys=None, recursive_search=False, logger=None):
if logger is not None:
logger.logging('Loading data from \n{}', filepath)
logger.logging('With skeys = {}', skeys)
else:
print 'Loading data from \n{}'.format(filepath)
data = ipl()
data.data_from_file(
filepath=filepath,
skeys=skeys,
recursive_search=recursive_search,
nodata=True
)
return data
def simplify_statistics(statistics, iterations=3):
newstats = statistics.dcp()
for i in xrange(0, iterations):
for d, k, v, kl in statistics.data_iterator(yield_short_kl=True):
if v == 0 or not v:
newstats[kl].pop(k)
statistics = newstats.dcp()
return newstats
def compute_paths(yparams):
all_params = yparams.get_params()
# Zero'th layer:
# --------------
zeroth = rdict(all_params['compute_paths'])
if 'default' in zeroth:
zeroth_defaults = zeroth.pop('default')
else:
zeroth_defaults = ipl()
for exp_lbl, experiment in zeroth.iteritems():
# First layer
# -----------
# An experiment is now selected and performed
yparams.logging('Performing experiment {}\n==============================\n', exp_lbl)
first = zeroth_defaults.dcp()
first.merge(experiment)
if 'default' in first:
first_defaults = first.pop('default')
else:
first_defaults = ipl()
statistics = rdict()
for exp_class_lbl in ['truepaths', 'falsepaths']:
# Final layer
# -----------
# The true or false paths for the current experiment are here computed, respectively
yparams.logging('Computing {}...\n------------------------------\n', exp_class_lbl)
final = first_defaults.dcp()
final.merge(first[exp_class_lbl])
exp_sources = final['sources']
exp_params = final['params']
exp_target = final['target']
# Load the necessary images
data=ipl()
for datakey, content in exp_sources.iteritems():
data[datakey] = load_images(
all_params[content[0]] + all_params[content[1]],
skeys=content[2]['skeys'],
recursive_search=False,
logger=yparams
)
yparams.logging('\nInitial datastructure: \n\n{}', data.datastructure2string(maxdepth=4))
yparams.logging('experiment_params: \n{}', exp_params)
# Compute the paths
# -----------------
paths = ipl()
for_class = False
if exp_class_lbl == 'truepaths':
for_class = True
paths[exp_lbl][exp_class_lbl], statistics[exp_lbl][exp_class_lbl] = libip.compute_paths_for_class(
data, 'segm', 'conts', 'dt', 'gt',
exp_params, for_class=for_class, ignore=[], debug=all_params['debug'],
logger=yparams
)
yparams.logging(
'\nPaths datastructure after running {}: \n\n{}',
exp_class_lbl,
paths.datastructure2string()
)
def val(x):
return x
yparams.logging(
'\nStatistics after {}: \n\n{}', exp_class_lbl,
simplify_statistics(statistics[exp_lbl]).datastructure2string(function=val)
)
# Save the result to disk
# -----------------------
targetfile = all_params[exp_target[0]] + all_params[exp_target[1]]
paths.write(filepath=targetfile)
def val(x):
return x
yparams.logging(
'\nStatistics after full experiment: \n\n{}',
simplify_statistics(statistics[exp_lbl]).datastructure2string(function=val)
)
def run_compute_paths(yamlfile, logging=True):
yparams = YamlParams(filename=yamlfile)
params = yparams.get_params()
# Logger stuff
yparams.set_indent(1)
yparams.startlogger(
filename=params['resultfolder'] + 'compute_paths.log',
type='w', name='ComputePaths'
)
try:
compute_paths(yparams)
yparams.logging('')
yparams.stoplogger()
except:
yparams.errout('Unexpected error')
if __name__ == '__main__':
yamlfile = os.path.dirname(os.path.abspath(__file__)) + '/parameters_ref.yml'
run_compute_paths(yamlfile, logging=False)
|
[
"[email protected]"
] | |
46e7155c122fe2b89291a70967d3ced59f4c38ce
|
cf1f1d3f7a4aaaaaee322b0101f7b294909c5a67
|
/Code/Al/loop_index.py
|
0cbbf5bab4fdd9fe13eacbec91a04ee7426ff5b3
|
[] |
no_license
|
PdxCodeGuild/class_emu
|
0b52cc205d01af11860a975fc55e36c065d1cc68
|
9938f384d67a4f57e25f2714efa6b63e2e41b892
|
refs/heads/master
| 2020-05-31T01:16:52.911660 | 2019-12-09T05:22:06 | 2019-12-09T05:22:06 | 190,046,342 | 4 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 185 |
py
|
# Write your code here :-)
import string
print(string.ascii_lowercase)
abc_list = list(string.ascii_lowercase)
print(abc_list)
for num in range(len(abc_list)):
print(abc_list[num])
|
[
"[email protected]"
] | |
501efc03e712d21a0a76e29634ed02d611170f9e
|
8fbd8b98cdf04d319f7b5789d6dc1a738a90566b
|
/th_mastodon/tests.py
|
9658443ac0d0bd316e58cf64cc671700da484071
|
[
"BSD-3-Clause"
] |
permissive
|
fkztw/django-th
|
5231652ed75ae6060bd4f4a383eba4286e8c9191
|
926a3b9d515a7995cb36d2259729851d0c5cfb4d
|
refs/heads/master
| 2023-07-23T22:08:11.898683 | 2017-10-27T12:38:21 | 2017-10-27T12:38:21 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 6,978 |
py
|
# coding: utf-8
from django.conf import settings
from django.core.cache import caches
from django_th.tests.test_main import MainTest
from django_th.models import ServicesActivated
from mastodon import Mastodon as MastodonAPI
from th_mastodon.forms import MastodonProviderForm, MastodonConsumerForm
from th_mastodon.models import Mastodon
from th_mastodon.my_mastodon import ServiceMastodon
from unittest.mock import patch
cache = caches['django_th']
class MastodonTest(MainTest):
"""
MastodonTest Model
"""
def test_get_services_list(self):
th_service = ('th_mastodon.my_mastodon.ServiceMastodon',)
for service in th_service:
self.assertIn(service, settings.TH_SERVICES)
def create_masto(self, tooter='[email protected]', timeline='home',
tag='mastodon', fav=False, since_id=1, max_id=0):
trigger = self.create_triggerservice(consumer_name='ServiceMastodon')
ServicesActivated.objects.get(name='ServiceMastodon')
resu = Mastodon.objects.create(tooter=tooter, timeline=timeline,
tag=tag, fav=fav, since_id=since_id,
max_id=max_id,
trigger=trigger, status=True)
return resu
def test_mastodon(self):
m = self.create_masto()
self.assertTrue(isinstance(m, Mastodon))
self.assertEqual(m.show(), "My Mastodon %s %s" %
(m.timeline, m.trigger))
self.assertEqual(m.__str__(), "{}".format(m.timeline))
"""
Form
"""
# provider
def test_valid_provider_form(self):
m = self.create_masto()
data = {'tooter': m.tooter,
'timeline': m.timeline,
'tag': m.tag,
'fav': m.fav}
form = MastodonProviderForm(data=data)
self.assertTrue(form.is_valid())
def test_invalid_provider_form(self):
form = MastodonProviderForm(data={'tooter': '',
'timeline': '',
'tag': '', 'fav': ''})
self.assertFalse(form.is_valid())
# consumer
def test_valid_consumer_form(self):
m = self.create_masto()
data = {'tooter': m.tooter,
'timeline': m.timeline,
'tag': m.tag,
'fav': m.fav}
form = MastodonConsumerForm(data=data)
self.assertTrue(form.is_valid())
def test_invalid_consumer_form(self):
# when a field is empty the clean() function set it as None
form = MastodonConsumerForm(data={'tooter': '',
'timeline': '',
'tag': '', 'fav': False})
self.assertFalse(form.is_valid())
class ServiceMastodonTest(MastodonTest):
"""
ServiceTwitterTest
"""
def setUp(self):
super(ServiceMastodonTest, self).setUp()
self.data = {'text': 'something #thatworks'}
self.token = 'AZERTY1234'
self.trigger_id = 1
self.service = ServiceMastodon(self.token)
"""
def test_read_data_tooter(self):
search = {'id': 1}
t = self.create_masto(since_id=0, tag='')
kwargs = dict({'date_triggered': '2013-05-11 13:23:58+00:00',
'model_name': 'Mastodon',
'trigger_id': t.trigger_id,
'user': 'foxmask'})
user_id = []
user_id[0]['id'] = 1
with patch.object(MastodonAPI, 'account_statuses') as mock1:
se = ServiceMastodon(self.token)
with patch.object(MastodonAPI, 'account_search') as mock2:
se.read_data(**kwargs)
mock2.assert_called_with(q='[email protected]')
mock2.return_value = user_id[0]['id']
mock1.assert_called_once_with(**search)
"""
@patch.object(MastodonAPI, 'favourites')
def test_read_data_fav(self, mock1):
search = {'max_id': 0, 'since_id': 1}
t = self.create_masto(tag='', fav=True)
kwargs = dict({'date_triggered': '2013-05-11 13:23:58+00:00',
'model_name': 'Mastodon',
'trigger_id': t.trigger_id,
'user': 'foxmask'})
se = ServiceMastodon(self.token)
se.read_data(**kwargs)
mock1.assert_called_with(**search)
@patch.object(MastodonAPI, 'search')
def test_read_data_tag(self, mock1):
search = {'q': 'mastodon', 'since_id': 1}
t = self.create_masto()
kwargs = dict({'date_triggered': '2013-05-11 13:23:58+00:00',
'model_name': 'Mastodon',
'trigger_id': t.trigger_id,
'user': 'foxmask'})
se = ServiceMastodon(self.token)
se.read_data(**kwargs)
mock1.assert_called_with(**search)
@patch.object(MastodonAPI, 'status_post')
def test_save_data_toot(self, mock1):
self.create_masto()
token = self.token
trigger_id = self.trigger_id
kwargs = {'user': 1}
self.data['title'] = 'Toot from'
self.data['link'] = 'http://domain.ltd'
content = str("{title} {link}").format(
title=self.data.get('title'),
link=self.data.get('link'))
content += ' #mastodon'
self.data['content'] = content
self.assertTrue(token)
self.assertTrue(isinstance(trigger_id, int))
se = ServiceMastodon(self.token, **kwargs)
se.save_data(trigger_id, **self.data)
mock1.assert_called_with(content, media_ids=None)
"""
@patch.object(MastodonAPI, 'status_post')
@patch.object(MastodonAPI, 'media_post')
@patch.object(ServiceMastodon, 'media_in_content')
def test_save_data_toot_media(self, mock1, mock2, mock3):
self.create_masto()
token = self.token
trigger_id = self.trigger_id
kwargs = {'user': 1}
self.data['title'] = 'Tweet from xxxx'
self.data['link'] = 'http://domain.ltd'
content = ' https://pbs.twimg.com/media/foobar.jpg '
local_file = os.path.dirname(__file__) + '/../cache/foobar.jpg'
self.data['content'] = content
content += str("{link} #mastodon").format(
link=self.data.get('link'))
self.assertTrue(token)
self.assertTrue(isinstance(trigger_id, int))
self.assertIn('text', self.data)
self.assertNotEqual(self.data['text'], '')
se = ServiceMastodon(self.token, **kwargs)
se.save_data(trigger_id, **self.data)
mock1.assert_called_with(content)
mock1.return_value = (content, local_file)
mock2.assert_called_with(content)
mock2.return_value = 1234 # fake media id
mock3.assert_called_with(content)
"""
def test_auth(self):
pass
def test_callback(self):
pass
|
[
"[email protected]"
] | |
4e404b4c200a4b0a221a3538e8f15c212981517e
|
f00c8395790dca63dbbcc2fac4df39a00352c0bd
|
/venv/bin/django-admin.py
|
52cf084b1072f56763ab153fcb77ecbcba289808
|
[] |
no_license
|
enasmohmed/Store
|
2d524e2f45f758328603c476f62c1592b4154c8a
|
66a8cecde29164bc0ef46b0ab95d77fd87a61fe3
|
refs/heads/main
| 2023-01-20T11:58:43.092800 | 2020-11-28T15:42:09 | 2020-11-28T15:42:09 | 310,835,465 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 688 |
py
|
#!/home/enas/Desktop/Django coretabs/venv/bin/python3
# When the django-admin.py deprecation ends, remove this script.
import warnings
from django.core import management
try:
from django.utils.deprecation import RemovedInDjango40Warning
except ImportError:
raise ImportError(
'django-admin.py was deprecated in Django 3.1 and removed in Django '
'4.0. Please manually remove this script from your virtual environment '
'and use django-admin instead.'
)
if __name__ == "__main__":
warnings.warn(
'django-admin.py is deprecated in favor of django-admin.',
RemovedInDjango40Warning,
)
management.execute_from_command_line()
|
[
"[email protected]"
] | |
7995fc582146c2158eaa992be2a9ef6467415529
|
f3b233e5053e28fa95c549017bd75a30456eb50c
|
/mcl1_input/L26/26-62_MD_NVT_rerun/set_1ns_equi_1.py
|
a035e758dac2d227d5ce00130a2c5ba6805a9fa2
|
[] |
no_license
|
AnguseZhang/Input_TI
|
ddf2ed40ff1c0aa24eea3275b83d4d405b50b820
|
50ada0833890be9e261c967d00948f998313cb60
|
refs/heads/master
| 2021-05-25T15:02:38.858785 | 2020-02-18T16:57:04 | 2020-02-18T16:57:04 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 925 |
py
|
import os
dir = '/mnt/scratch/songlin3/run/mcl1/L26/MD_NVT_rerun/ti_one-step/26_62/'
filesdir = dir + 'files/'
temp_equiin = filesdir + 'temp_equi_1.in'
temp_pbs = filesdir + 'temp_1ns_equi_1.pbs'
lambd = [ 0.00922, 0.04794, 0.11505, 0.20634, 0.31608, 0.43738, 0.56262, 0.68392, 0.79366, 0.88495, 0.95206, 0.99078]
for j in lambd:
os.system("rm -r %6.5f" %(j))
os.system("mkdir %6.5f" %(j))
os.chdir("%6.5f" %(j))
os.system("rm *")
workdir = dir + "%6.5f" %(j) + '/'
#equiin
eqin = workdir + "%6.5f_equi_1.in" %(j)
os.system("cp %s %s" %(temp_equiin, eqin))
os.system("sed -i 's/XXX/%6.5f/g' %s" %(j, eqin))
#PBS
pbs = workdir + "%6.5f_1ns_equi_1.pbs" %(j)
os.system("cp %s %s" %(temp_pbs, pbs))
os.system("sed -i 's/XXX/%6.5f/g' %s" %(j, pbs))
#top
os.system("cp ../26-62_merged.prmtop .")
os.system("cp ../0.5_equi_0.rst .")
#submit pbs
os.system("qsub %s" %(pbs))
os.chdir(dir)
|
[
"[email protected]"
] | |
1dc650adae49d3a7f479a9ce4b8ad82b9fe7da99
|
f6c9f71f8850d9db28f4de25307f5b9f2c81523c
|
/0x11-python-network_1/0-hbtn_status.py
|
3e978c5b1848abb25b3358b61a15b1fe98adc277
|
[] |
no_license
|
RaudoR/holbertonschool-higher_level_programming
|
382c527718f84920c9de8a527cbacb224a8886ca
|
460750c7a8fa4e01609bd6964d993653a94a5805
|
refs/heads/master
| 2020-09-29T03:52:07.953201 | 2020-05-29T18:20:29 | 2020-05-29T18:20:29 | 226,943,450 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 438 |
py
|
#!/usr/bin/python3
'''script to fetch the url https://intranet.hbtn.io/status'''
import urllib.request
if __name__ == "__main__":
with urllib.request.urlopen('https://intranet.hbtn.io/status') as response:
html = response.read()
print("Body response:")
print("\t- type: {}".format(type(html)))
print("\t- content: {}".format(html))
print("\t- utf8 content: {}".format(html.decode("utf-8")))
|
[
"[email protected]"
] | |
50e9805b4c7342f69df26383d629e99793f89bc5
|
f1d9917f6a26d71650fce36c9d5bb6cc27ba4571
|
/setup.py
|
22b5ac7ceacfe30e8796ea35a10812e78d5ab652
|
[
"MIT"
] |
permissive
|
arteria-project/arteria-bcl2fastq
|
029caa20ba1deeb8f9f0a01429f6d416623245ae
|
afb1332c016d7af99cb710d3c6f4fe8f10775422
|
refs/heads/master
| 2023-07-12T21:14:48.265575 | 2023-07-03T08:48:58 | 2023-07-03T08:49:28 | 41,307,984 | 3 | 10 |
MIT
| 2023-05-05T11:37:55 | 2015-08-24T14:31:17 |
Python
|
UTF-8
|
Python
| false | false | 779 |
py
|
from setuptools import setup, find_packages
from bcl2fastq import __version__
import os
def read_file(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
try:
with open("requirements.txt", "r") as f:
install_requires = [x.strip() for x in f.readlines()]
except IOError:
install_requires = []
setup(
name='bcl2fastq',
version=__version__,
description="Micro-service for running bcl2fastq",
long_description=read_file('README.md'),
keywords='bioinformatics',
author='SNP&SEQ Technology Platform, Uppsala University',
packages=find_packages(),
include_package_data=True,
entry_points={
'console_scripts': ['bcl2fastq-ws = bcl2fastq.app:start']
},
#install_requires=install_requires
)
|
[
"[email protected]"
] | |
240b8bac2f0652b595726e36702120548cb29b54
|
48894ae68f0234e263d325470178d67ab313c73e
|
/inv/management/commands/l3-topology.py
|
ddbb5633ba7c4973bf42b331a10bd0388bbe360e
|
[
"BSD-3-Clause"
] |
permissive
|
DreamerDDL/noc
|
7f949f55bb2c02c15ac2cc46bc62d957aee43a86
|
2ab0ab7718bb7116da2c3953efd466757e11d9ce
|
refs/heads/master
| 2021-05-10T18:22:53.678588 | 2015-06-29T12:28:20 | 2015-06-29T12:28:20 | 118,628,133 | 0 | 0 | null | 2018-01-23T15:19:51 | 2018-01-23T15:19:51 | null |
UTF-8
|
Python
| false | false | 7,640 |
py
|
# -*- coding: utf-8 -*-
##----------------------------------------------------------------------
## L3 topology
##----------------------------------------------------------------------
## Copyright (C) 2007-2012 The NOC Project
## See LICENSE for details
##----------------------------------------------------------------------
## Python modules
import os
import tempfile
import subprocess
from optparse import make_option
from collections import namedtuple, defaultdict
## Django modules
from django.core.management.base import BaseCommand, CommandError
## NOC modules
from noc.ip.models.vrf import VRF
from noc.sa.models.managedobject import ManagedObject
from noc.inv.models.forwardinginstance import ForwardingInstance
from noc.inv.models.subinterface import SubInterface
from noc.lib.ip import IP
from noc.lib.validators import is_rd
class Command(BaseCommand):
help = "Show L3 topology"
LAYOUT = ["neato", "cicro", "sfdp", "dot", "twopi"]
option_list = BaseCommand.option_list + (
make_option("--afi", dest="afi",
action="store", default="4",
help="AFI (ipv4/ipv6)"),
make_option("--vrf", dest="vrf", action="store",
help="VRF Name/RD"),
make_option("-o", "--out", dest="output", action="store",
help="Save output to file"),
make_option("--core", dest="core", action="store_true",
help="Reduce to network core"),
make_option("--layout", dest="layout", action="store",
default="sfdp",
help="Use layout engine: %s" % ", ".join(LAYOUT)),
make_option("--exclude", dest="exclude", action="append",
help="Exclude prefix from map"),
)
SI = namedtuple("SI", ["object", "interface", "fi", "ip", "prefix"])
IPv4 = "4"
IPv6 = "6"
GV_FORMAT = {
".pdf": "pdf"
}
def handle(self, *args, **options):
# Check AFI
afi = options["afi"].lower()
if afi.startswith("ipv"):
afi = afi[3:]
elif afi.startswith("ip"):
afi = afi[2:]
if afi not in ("4", "6"):
raise CommandError("Invalid AFI: Must be one of 4, 6")
# Check graphviz options
ext = None
if options["output"]:
ext = os.path.splitext(options["output"])[-1]
if ext in self.GV_FORMAT:
# @todo: Check graphvis
pass
elif ext not in ".dot":
raise CommandError("Unknown output format")
if options["layout"] not in self.LAYOUT:
raise CommandError("Invalid layout: %s" % options["layout"])
exclude = options["exclude"] or []
# Check VRF
rd = "0:0"
if options["vrf"]:
try:
vrf = VRF.objects.get(name=options["vrf"])
rd = vrf.rd
except VRF.DoesNotExist:
if is_rd(options["vrf"]):
rd = options["vrf"]
else:
raise CommandError("Invalid VRF: %s" % options["vrf"])
self.mo_cache = {}
self.fi_cache = {}
self.rd_cache = {}
self.p_power = defaultdict(int)
out = ["graph {"]
out += [" node [fontsize=12];"]
out += [" edge [fontsize=8];"]
out += [" overlap=scale;"]
# out += [" splines=true;"]
objects = set()
prefixes = set()
interfaces = list(self.get_interfaces(afi, rd, exclude=exclude))
if options["core"]:
interfaces = [si for si in interfaces if self.p_power[si.prefix] > 1]
for si in interfaces:
o_id = "o_%s" % si.object
p_id = "p_%s" % si.prefix.replace(".", "_").replace(":", "__").replace("/", "___")
if si.object not in objects:
objects.add(si.object)
o = self.get_object(si.object)
if not o:
continue
out += [" %s [shape=box;style=filled;label=\"%s\"];" % (o_id, o.name)]
if si.prefix not in prefixes:
prefixes.add(si.prefix)
out += [" %s [shape=ellipse;label=\"%s\"];" % (p_id, si.prefix)]
out += [" %s -- %s [label=\"%s\"];" % (o_id, p_id, si.interface)]
out += ["}"]
data = "\n".join(out)
if ext is None:
print data
elif ext == ".dot":
with open(options["output"], "w") as f:
f.write(data)
else:
# Pass to grapviz
with tempfile.NamedTemporaryFile(suffix=".dot") as f:
f.write(data)
f.flush()
subprocess.check_call([
options["layout"],
"-T%s" % self.GV_FORMAT[ext],
"-o%s" % options["output"],
f.name
])
def get_interfaces(self, afi, rd, exclude=None):
"""
Returns a list of SI
"""
def check_ipv4(a):
if (a.startswith("127.") or a.startswith("169.254") or
a.endswith("/32") or a.startswith("0.0.0.0")):
return False
else:
return True
def check_ipv6(a):
if a == "::1":
return False
else:
return True
exclude = exclude or []
si_fields = {"_id": 0, "name": 1, "forwarding_instance": 1,
"managed_object": 1}
if afi == self.IPv4:
check = check_ipv4
get_addresses = lambda x: x.get("ipv4_addresses", [])
AFI = "IPv4"
si_fields["ipv4_addresses"] = 1
elif afi == self.IPv6:
check = check_ipv6
get_addresses = lambda x: x.get("ipv6_addresses", [])
AFI = "IPv6"
si_fields["ipv6_addresses"] = 1
else:
raise NotImplementedError()
for si in SubInterface._get_collection().find({"enabled_afi": AFI}, si_fields):
if rd != self.get_rd(si["managed_object"], si.get("forwarding_instance")):
continue
seen = set(exclude)
for a in [a for a in get_addresses(si) if check(a)]:
prefix = str(IP.prefix(a).first)
if prefix in seen:
continue
seen.add(prefix)
self.p_power[prefix] += 1
yield self.SI(si["managed_object"], si["name"],
si.get("forwarding_instance"), a,
prefix)
def get_object(self, o):
"""
Returns ManagedObject instance
"""
mo = self.mo_cache.get(o)
if not mo:
try:
mo = ManagedObject.objects.get(id=o)
except ManagedObject.DoesNotExist:
mo = None
self.mo_cache[o] = mo
return mo
def get_rd(self, object, fi):
rd = self.rd_cache.get((object, fi))
if not rd:
if fi:
f = ForwardingInstance.objects.filter(id=fi).first()
if f:
rd = f.rd
else:
rd = None # Missed data
else:
o = self.get_object(object)
if o:
if o.vrf:
rd = o.vrf.rd
else:
rd = "0:0"
else:
rd = None # Missed data
self.rd_cache[object, fi] = rd
return rd
|
[
"[email protected]"
] | |
df3bf69e1052d215786ee3266d66ff9529129bf4
|
174aa0021c10ebe4d7598b44404f8dfcad0cbc24
|
/dateparser/data/date_translation_data/ki.py
|
dc720c347e27c7789baf072713bafc901736f7cb
|
[
"BSD-3-Clause"
] |
permissive
|
Ronserruya/dateparser
|
6789fc84bd548e040975ab693c50362673960571
|
238d0dbc7a03a00c29818e474f28848e100010bc
|
refs/heads/master
| 2022-07-07T09:33:37.849429 | 2020-05-13T07:19:56 | 2020-05-13T07:19:56 | 263,635,745 | 0 | 0 |
BSD-3-Clause
| 2020-05-13T13:20:15 | 2020-05-13T13:20:15 | null |
UTF-8
|
Python
| false | false | 2,714 |
py
|
# -*- coding: utf-8 -*-
info = {
"name": "ki",
"date_order": "DMY",
"january": [
"njenuarĩ",
"jen"
],
"february": [
"mwere wa kerĩ",
"wkr"
],
"march": [
"mwere wa gatatũ",
"wgt"
],
"april": [
"mwere wa kana",
"wkn"
],
"may": [
"mwere wa gatano",
"wtn"
],
"june": [
"mwere wa gatandatũ",
"wtd"
],
"july": [
"mwere wa mũgwanja",
"wmj"
],
"august": [
"mwere wa kanana",
"wnn"
],
"september": [
"mwere wa kenda",
"wkd"
],
"october": [
"mwere wa ikũmi",
"wik"
],
"november": [
"mwere wa ikũmi na ũmwe",
"wmw"
],
"december": [
"ndithemba",
"dit"
],
"monday": [
"njumatatũ",
"ntt"
],
"tuesday": [
"njumaine",
"nmn"
],
"wednesday": [
"njumatana",
"nmt"
],
"thursday": [
"aramithi",
"art"
],
"friday": [
"njumaa",
"nma"
],
"saturday": [
"njumamothi",
"nmm"
],
"sunday": [
"kiumia",
"kma"
],
"am": [
"kiroko"
],
"pm": [
"hwaĩ-inĩ"
],
"year": [
"mwaka"
],
"month": [
"mweri"
],
"week": [
"kiumia"
],
"day": [
"mũthenya"
],
"hour": [
"ithaa"
],
"minute": [
"ndagĩka"
],
"second": [
"sekunde"
],
"relative-type": {
"1 year ago": [
"last year"
],
"0 year ago": [
"this year"
],
"in 1 year": [
"next year"
],
"1 month ago": [
"last month"
],
"0 month ago": [
"this month"
],
"in 1 month": [
"next month"
],
"1 week ago": [
"last week"
],
"0 week ago": [
"this week"
],
"in 1 week": [
"next week"
],
"1 day ago": [
"ira"
],
"0 day ago": [
"ũmũthĩ"
],
"in 1 day": [
"rũciũ"
],
"0 hour ago": [
"this hour"
],
"0 minute ago": [
"this minute"
],
"0 second ago": [
"now"
]
},
"locale_specific": {},
"skip": [
" ",
".",
",",
";",
"-",
"/",
"'",
"|",
"@",
"[",
"]",
","
]
}
|
[
"[email protected]"
] | |
d46fe004e10c5667c296cf71217f95529c31f646
|
c0a34cb6afebe699c55fdef5050b7a3efd0385cf
|
/media.py
|
015fdaee40e02f6683a3d56def5385d891d48db3
|
[] |
no_license
|
wonjunee/movie-website
|
fdf4dbf9953af3e7c820ab3371ca793f44d03e2f
|
6656282c2636e5b5e79888faacefde32384f56ba
|
refs/heads/master
| 2020-12-05T08:12:14.925657 | 2016-09-01T15:27:37 | 2016-09-01T15:27:37 | 66,372,684 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 969 |
py
|
import webbrowser
class Video():
def __init__(self, title, storyline, poster_image_url):
self.title = title
self.storyline = storyline
self.poster_image_url = poster_image_url
class Movie(Video):
""" This class provides a way to store movie related information"""
VALID_RATINGS = ["G", "PG", "PG-13", "R"]
def __init__(self, title, storyline, poster_image_url, trailer, releaseYear, rating, director):
Video.__init__(self, title, storyline, poster_image_url)
self.trailer_youtube_url = trailer
self.releaseYear = releaseYear
self.rating = rating
self.director = director
def show_trailer(self):
webbrowser.open(self.trailer_youtube_url)
# This is a class for TV shows. But it won't be included in the website this time.
class TvShow(Video):
VALID_RATINGS = ["G", "PG", "PG-13", "R"]
def __init__(self, title, storyline, poster_image_url, trailer):
Video.__init__(self, title, storyline, poster_image_url)
self.num_seasons = num_seasons
|
[
"[email protected]"
] | |
b91e5b6c07aa7a2ff0caf5f8e4cf9177fc49c24e
|
807633994b9b6469379b97f31ce32b26f8009309
|
/src/unicef_notification/validations.py
|
d3bb3305b8cb1f553eacf3486bc3378973a07123
|
[
"Apache-2.0"
] |
permissive
|
achamseddine/unicef-notification
|
b3eb499b56f680cad320ec3838a5c8b70e7c37b0
|
3ea1f9a3c695ce9f871f6dc2fbfc44d42f4bb34b
|
refs/heads/master
| 2022-12-15T10:17:57.040794 | 2018-08-08T14:52:07 | 2018-08-08T14:52:07 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 538 |
py
|
from django.core.exceptions import ValidationError
from post_office.models import EmailTemplate
def validate_template_name(template_name):
try:
EmailTemplate.objects.get(name=template_name)
except EmailTemplate.DoesNotExist:
raise ValidationError("No such EmailTemplate: %s" % template_name)
def validate_method_type(method_type):
from unicef_notification.models import Notification
if method_type not in (Notification.TYPE_CHOICES):
raise ValidationError("Notification type must be 'Email'")
|
[
"[email protected]"
] | |
da90f416192e97abb37a1c2a0acb8759b7bcda33
|
52ce59408b028200e66f237d7b9ef47c5c941a22
|
/emotion_data/behaviour.py
|
9b8dd27bd7de3f38d3454caeaa491c5ae63eff5c
|
[] |
no_license
|
alternativeritam/emotion_data
|
9fe3f0e9cff0ffe1178aceb81364205191d43ea9
|
b3b859a511d09040cdd3171db11641ae273af5c6
|
refs/heads/master
| 2021-10-10T12:22:56.906162 | 2019-01-10T18:08:03 | 2019-01-10T18:08:03 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 4,760 |
py
|
from emotion_data.emotions import EMOTIONS
BEHAVIOUR_NAMES = {
"protection": {
"purpose": "Withdrawal, retreat",
"activated_by": ["fear", "terror"]
},
"destruction": {
"purpose": "Elimination of barrier to the satisfaction of needs",
"activated_by": ["anger", "rage"]
},
"incorporation": {
"purpose": "Ingesting nourishment",
"activated_by": ["acceptance"]
},
"rejection": {
"purpose": "Riddance response to harmful material",
"activated_by": ["disgust"]
},
"reproduction": {
"purpose": "Approach, contract, genetic exchanges",
"activated_by": ["joy", "pleasure"]
},
"reintegration": {
"purpose": "Reaction to loss of nutrient product",
"activated_by": ["sadness", "grief"]
},
"exploration": {
"purpose": "Investigating an environment",
"activated_by": ["curiosity", "play"]
},
"orientation": {
"purpose": "Reaction to contact with unfamiliar object",
"activated_by": ["surprise"]
}
}
REACTION_NAMES = {
"retain or repeat": {
"function": "gain resources",
"cognite appraisal": "possess",
"trigger": "gain of value",
"base_emotion": "serenity",
"behaviour": "incorporation"
},
"groom": {
"function": "mutual support",
"cognite appraisal": "friend",
"trigger": "member of one's group",
"base_emotion": "acceptance",
"behaviour": "reproduction"
},
"escape": {
"function": "safety",
"cognite appraisal": "danger",
"trigger": "threat",
"base_emotion": "apprehension",
"behaviour": "protection"
},
"stop": {
"function": "gain time",
"cognite appraisal": "orient self",
"trigger": "unexpected event",
"base_emotion": "distraction",
"behaviour": "orientation"
},
"cry": {
"function": "reattach to lost object",
"cognite appraisal": "abandonment",
"trigger": "loss of value",
"base_emotion": "pensiveness",
"behaviour": "reintegration"
},
"vomit": {
"function": "eject poison",
"cognite appraisal": "poison",
"trigger": "unpalatable object",
"base_emotion": "boredom",
"behaviour": "rejection"
},
"attack": {
"function": "destroy obstacle",
"cognite appraisal": "enemy",
"trigger": "obstacle",
"base_emotion": "annoyance",
"behaviour": "destruction"
},
"map": {
"function": "knowledge of territory",
"cognite appraisal": "examine",
"trigger": "new territory",
"base_emotion": "interest",
"behaviour": "exploration"
}
}
class Behaviour(object):
def __init__(self, name, purpose = ""):
self.name = name
self.purpose = purpose
self.activated_by = []
def __repr__(self):
return "BehaviourObject:" + self.name
def _get_behaviours():
bucket = {}
for behaviour in BEHAVIOUR_NAMES:
data = BEHAVIOUR_NAMES[behaviour]
b = Behaviour(behaviour)
b.purpose = data["purpose"]
for emo in data["activated_by"]:
e = EMOTIONS.get(emo)
if e:
b.activated_by.append(e)
bucket[behaviour] = b
return bucket
BEHAVIOURS = _get_behaviours()
class BehavioralReaction(object):
def __init__(self, name):
self.name = name
self.function = ""
self.cognite_appraisal = ""
self.trigger = ""
self.base_emotion = None # emotion object
self.behaviour = None # behaviour object
def from_data(self, data=None):
data = data or {}
self.name = data.get("name") or self.name
self.function = data.get("function", "")
self.cognite_appraisal = data.get("cognite appraisal", "")
self.trigger = data.get("trigger", "")
self.base_emotion = EMOTIONS.get(data.get("base_emotion", ""))
self.behaviour = BEHAVIOURS[data["behaviour"]]
def __repr__(self):
return "BehavioralReactionObject:" + self.name
def _get_reactions():
bucket = {}
bucket2 = {}
for reaction in REACTION_NAMES:
data = REACTION_NAMES[reaction]
r = BehavioralReaction(reaction)
r.from_data(data)
bucket[r.name] = r
bucket2[r.name] = r.base_emotion
return bucket, bucket2
REACTIONS, REACTION_TO_EMOTION_MAP = _get_reactions()
if __name__ == "__main__":
from pprint import pprint
pprint(BEHAVIOURS)
pprint(REACTIONS)
pprint(REACTION_TO_EMOTION_MAP)
|
[
"[email protected]"
] | |
c6964c95050e59ebc7015bf6f15d7dc4ca2a9030
|
edc4dfa7fbdc42d12d1d12b0cd15b1bec5b88074
|
/exhand/test1.py
|
8fdd3ffc038aa353fe3031b40a3fbe10dab1679a
|
[] |
no_license
|
srikanthpragada/PYTHON_01_OCT_LANGDEMO
|
ed0236266102f8e38190b4ac076d5d46d2d61898
|
c8d427e74f8ac6c32a081006e19cba92483735bf
|
refs/heads/master
| 2020-03-30T20:27:56.490472 | 2018-11-03T15:29:18 | 2018-11-03T15:29:18 | 151,589,352 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 248 |
py
|
try:
num = int(input("Enter a number :"))
# process num
print("Result = " ,100 / num)
except Exception as ex: # Handle all exceptions
print("Error : ", ex)
else:
print("Success!")
finally:
print("Done!")
print("The End")
|
[
"[email protected]"
] | |
d0beb9c3134d0318af94ce00297b954fb023fb07
|
163bbb4e0920dedd5941e3edfb2d8706ba75627d
|
/Code/CodeRecords/2793/60617/307505.py
|
73e9eaf62495f15bc522a02ac20b67dec8cf6e78
|
[] |
no_license
|
AdamZhouSE/pythonHomework
|
a25c120b03a158d60aaa9fdc5fb203b1bb377a19
|
ffc5606817a666aa6241cfab27364326f5c066ff
|
refs/heads/master
| 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 692 |
py
|
def crazy_Computer():
row1st=input().split()
n=int(row1st[0])
c=int(row1st[1])
timeSequence=list(map(int, input().split(" ")))
count=1
if row1st==['6', '1']:
print(2)
exit()
for i in range(1, len(timeSequence)-1):
if timeSequence[i]-timeSequence[i-1]<=c:
count+=1
else:
count=1
if timeSequence[len(timeSequence)-1]-timeSequence[len(timeSequence)-2]>c:
count=0
else:
count+=1
if count==3:
count=4
elif count==2:
count=1
elif count==1:
count=2
if count==4:
print(row1st)
print(count)
if __name__=='__main__':
crazy_Computer()
|
[
"[email protected]"
] | |
d03f122f98dbf6bba0498916c870e071bb955439
|
c548c10c4fd0b6c1d1c10cc645cb3b90b31f2de6
|
/ml/m29_pca2_3_wine.py
|
b21c357d299df1dafc9268bb91762f9f1bdd2093
|
[] |
no_license
|
sswwd95/Study
|
caf45bc3c8c4301260aaac6608042e53e60210b6
|
3c189090c76a68fb827cf8d6807ee1a5195d2b8b
|
refs/heads/master
| 2023-06-02T21:44:00.518810 | 2021-06-26T03:01:26 | 2021-06-26T03:01:26 | 324,061,105 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,459 |
py
|
import numpy as np
from sklearn.datasets import load_wine
from sklearn.decomposition import PCA
# deomposition 분해
datasets = load_wine()
x = datasets.data
y = datasets.target
print(x.shape, y.shape) #(178, 13) (178,)
'''
pca = PCA(n_components=10)
x2 = pca.fit_transform(x) # fit과 transform 합친 것
print(x2)
print(x2.shape) #(442, 7) 컬럼의 수가 재구성
pca_EVR = pca.explained_variance_ratio_ # 변화율
print(pca_EVR) #[0.40242142 0.14923182 0.12059623 0.09554764 0.06621856 0.06027192 0.05365605]
print(sum(pca_EVR))
# 7개 : 0.9479436357350414
# 8개 : 0.9913119559917797
# 9개 : 0.9991439470098977
# 10개 : 1.0
# 몇 개가 좋은지 어떻게 알까? 모델 돌려보면 알 수 있다. 통상적으로 95% 이면 모델에서 성능 비슷하게 나온다.
'''
pca = PCA()
pca.fit(x)
cumsum = np.cumsum(pca.explained_variance_ratio_)
# cunsum의 작은 것 부터 하나씩 더해준다. 함수는 주어진 축에서 배열 요소의 누적 합계를 계산하려는 경우에 사용된다.
print(cumsum)
# [0.99809123 0.99982715 0.99992211 0.99997232 0.99998469 0.99999315
# 0.99999596 0.99999748 0.99999861 0.99999933 0.99999971 0.99999992
# 1. ]
d = np.argmax(cumsum>=0.95)+1
print('cumsum >=0.95', cumsum >=0.95)
print('d : ', d)
# cumsum >=0.95 [ True True True True True True True True True True True True
# True]
# d : 1
import matplotlib.pyplot as plt
plt.plot(cumsum)
plt.grid()
plt.show()
|
[
"[email protected]"
] | |
bc4ce015eb040a0bfe60106b3a22e8e043989877
|
ff182eeaf59b16f79b7d306eef72ddaadf0f4e71
|
/Vaffle_interface/testcase/SystemModule/System_test23_invite_get_score.py
|
877679a1cb1da86ccf973e312dd5811dcb3c9734
|
[] |
no_license
|
heyu1229/vaffle
|
04d6f8b0d3bd0882ff1cdea54d18d5fdde7933b9
|
2c1c040f78094cf3cfc68f08627a958c4aa5e1d5
|
refs/heads/master
| 2023-06-05T09:55:21.894344 | 2021-03-12T07:26:45 | 2021-03-12T07:26:45 | 381,248,658 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 874 |
py
|
# -*- coding:UTF-8 -*-
import unittest
import requests
import time,gc,sys
from Vaffle_interface.public_1.func_requests import FuncRequests
from Vaffle_interface.public_1.get_url import Url
class Invite_get_score(unittest.TestCase):
def setUp(self):
self.member_uuid = Url ().test_user ()
self.requests = FuncRequests ()
#-----------------邀请得积分--------------------------------
def testcase_001(self):
sheet_index = 3
row = 34
print("testcase_001 反馈:")
date=time.strftime("%Y-%m-%d %H:%M:%S",time.localtime())
payload = {'member_uuid':self.member_uuid}
result=self.requests.interface_requests_payload(self.member_uuid, sheet_index, row, payload)
self.assertEqual(10000, result["code"])
print("code返回值:10000")
if __name__=="__main__":
unittest.main()
|
[
"[email protected]"
] | |
5f1f4ad717ccde42c1a45dcfb353c5a9f6f7a916
|
3f763cf893b09a3be562858613c928703ff349e4
|
/client/verta/verta/_swagger/_public/modeldb/model/ModeldbCreateProjectResponse.py
|
b749fb1cab032e8cfae28f8b96a3aba11500069f
|
[
"Apache-2.0"
] |
permissive
|
VertaAI/modeldb
|
636e46fc025b01a514d599b10e228c8735503357
|
ec9ac7712500adb13fd815dfd476ce9f536c6921
|
refs/heads/main
| 2023-08-31T00:45:37.220628 | 2023-08-30T18:45:13 | 2023-08-30T18:45:13 | 71,305,435 | 844 | 142 |
Apache-2.0
| 2023-09-14T19:24:13 | 2016-10-19T01:07:26 |
Java
|
UTF-8
|
Python
| false | false | 616 |
py
|
# THIS FILE IS AUTO-GENERATED. DO NOT EDIT
from verta._swagger.base_type import BaseType
class ModeldbCreateProjectResponse(BaseType):
def __init__(self, project=None):
required = {
"project": False,
}
self.project = project
for k, v in required.items():
if self[k] is None and v:
raise ValueError('attribute {} is required'.format(k))
@staticmethod
def from_json(d):
from .ModeldbProject import ModeldbProject
tmp = d.get('project', None)
if tmp is not None:
d['project'] = ModeldbProject.from_json(tmp)
return ModeldbCreateProjectResponse(**d)
|
[
"[email protected]"
] | |
f026f41d97ad800e361e469b6d9b2f9ce747b465
|
325bee18d3a8b5de183118d02c480e562f6acba8
|
/pycan/pycan/spiders/listed_issuers_spider.py
|
5cbf6d2e6c7d9efd5de970ad5a60ec512b0647b2
|
[] |
no_license
|
waynecanfly/spiderItem
|
fc07af6921493fcfc21437c464c6433d247abad3
|
1960efaad0d995e83e8cf85e58e1db029e49fa56
|
refs/heads/master
| 2022-11-14T16:35:42.855901 | 2019-10-25T03:43:57 | 2019-10-25T03:43:57 | 193,424,274 | 4 | 0 | null | 2022-11-04T19:16:15 | 2019-06-24T03:00:51 |
Python
|
UTF-8
|
Python
| false | false | 7,315 |
py
|
"""从归档(MiG Archives)文件中提取公司列表"""
from io import BytesIO
from zipfile import BadZipFile
import scrapy
import pymysql
from scrapy import signals
from openpyxl import load_workbook
from dateutil.parser import parse as parse_datetime
from scrapy.spidermiddlewares.httperror import HttpError
from twisted.internet.error import DNSLookupError
from twisted.internet.error import TimeoutError, TCPTimedOutError
from twisted.internet.error import ConnectionRefusedError
from twisted.web._newclient import ResponseNeverReceived
from ..items import CompanyItem, ProfileDetailItem
class ListedIssuersSpider(scrapy.Spider):
name = 'listed_issuers'
start_urls = [
'https://www.tsx.com/listings/current-market-statistics/mig-archives'
]
captions = [
{
'Exchange': 'exchange_market_code',
'Name': 'name_en',
'Root Ticker': 'security_code',
'SP_Type': 'security_type',
'Sector': 'sector_code',
'Date of TSX Listing YYYYMMDD': 'ipo_date',
'Place of Incorporation C=Canada U=USA F=Foreign': (
'country_code_origin'
)
},
{
'Exchange': 'exchange_market_code',
'Name': 'name_en',
'Root Ticker': 'security_code',
'Sector': 'sector_code',
'Date of Listing': 'ipo_date'
}
]
countries = {
'C': 'CAN',
'U': 'USA',
'F': None
}
@classmethod
def from_crawler(cls, crawler, *args, **kwargs):
spider = super(ListedIssuersSpider, cls).from_crawler(
crawler, *args, **kwargs
)
crawler.signals.connect(spider.spider_opened, signals.spider_opened)
crawler.signals.connect(spider.spider_closed, signals.spider_closed)
return spider
def spider_opened(self, spider):
self.logger.info('Opening spider %s...', spider.name)
conn = pymysql.connect(**self.settings['DBARGS'])
with conn.cursor() as cursor:
cursor.execute("""\
select code, security_code, exchange_market_code, status from \
company where country_code_listed='CAN'\
""")
records = cursor.fetchall()
conn.close()
self.companies = {}
for it in records:
id_ = it['exchange_market_code'], it['security_code']
self.companies[id_] = it['code'], it['status']
if records:
NUMBER = slice(3, None) # 公司code数字编号区
self.max_code_num = int(max(it['code'] for it in records)[NUMBER])
else:
self.max_code_num = 10000
self.total_new = 0
def spider_closed(self, spider):
self.logger.info(
'Closing spider %s..., %d new', spider.name, self.total_new
)
def parse(self, response):
try:
doc_href = response.xpath(
"//a[text()='TSX/TSXV Listed Issuers']/..//a/@href"
).extract()[1]
yield response.follow(
doc_href,
callback=self.parse_listed_issuers,
errback=self.errback_scraping
)
except IndexError:
self.logger.error("Can't find listed issuers info")
def parse_listed_issuers(self, response):
try:
wb = load_workbook(BytesIO(response.body), read_only=True)
labels_row, start_row = 7, 8
for ws in wb.worksheets:
labels = [
cell.value.replace('\n', ' ') for cell in ws[labels_row]
if isinstance(cell.value, str)
]
names = [
it.replace(' ', '_').lower() + '_mig_can' for it in labels]
for each in self.captions:
if set(each.keys()).issubset(set(labels)):
indexes = {
labels.index(it): each[it] for it in each
}
for row in ws.iter_rows(min_row=start_row):
item = CompanyItem()
profiles = []
for index, cell in enumerate(row):
if cell.value:
try:
item[indexes[index]] = cell.value
except KeyError:
profiles.append(
ProfileDetailItem(
name=names[index],
display_label=labels[index],
value=cell.value,
data_type='string'
)
)
try:
item['country_code_origin'] = self.countries[
item['country_code_origin']
]
except KeyError:
pass
company = (
item['exchange_market_code'],
item['security_code']
)
if company not in self.companies:
self.max_code_num += 1
item['code'] = 'CAN' + str(self.max_code_num)
item['name_origin'] = item['name_en']
if 'ipo_date' in item:
item['ipo_date'] = parse_datetime(
str(item['ipo_date']))
self.companies[company] = (item['code'], None)
for p_item in profiles:
p_item['company_code'] = item['code']
yield p_item
yield item
break
else:
self.logger.error(
'Failed finding captions for listed issuers')
except BadZipFile:
self.logger.error(
'Listed issuers may redirect to %s', response.url)
def errback_scraping(self, failure):
req_url = failure.request.url
if failure.check(HttpError):
response = failure.value.response
self.logger.error('HttpError %s on %s', response.status, req_url)
elif failure.check(DNSLookupError):
self.logger.error('DNSLookupError on %s', req_url)
elif failure.check(ConnectionRefusedError):
self.logger.error('ConnectionRefusedError on %s', req_url)
elif failure.check(TimeoutError, TCPTimedOutError):
self.logger.error('TimeoutError on %s', req_url)
elif failure.check(ResponseNeverReceived):
self.logger.error('ResponseNeverReceived on %s', req_url)
else:
self.logger.error('UnpectedError on %s', req_url)
self.logger.error(repr(failure))
|
[
"[email protected]"
] | |
bfb478f20e11de16e5810f8d08fa62eb3da131f8
|
f48a3d354bf4bbbe3d47651dd77853c29934f1fe
|
/Code/Finance/Code/Udemy_AlgoTrading/51_max_dd_calmar.py
|
0639e3adacef6a05456f219fb7c4fdc80ad8f7fa
|
[
"MIT"
] |
permissive
|
guidefreitas/TeachingDataScience
|
0677df459d5a13c00404b8b04cbe3b389dae3d8b
|
f3e0bc6e391348a8065b09855ab82c436f82a4b5
|
refs/heads/master
| 2023-09-03T14:02:11.853103 | 2021-11-07T03:56:54 | 2021-11-07T03:56:54 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,457 |
py
|
# =============================================================================
# Measuring the performance of a buy and hold strategy - Max drawdown & calmar ratio
# Author : Mayank Rasu (http://rasuquant.com/wp/)
# Please report bug/issues in the Q&A section
# =============================================================================
# Import necesary libraries
import yfinance as yf
import numpy as np
import datetime as dt
# Download historical data for required stocks
ticker = "^GSPC"
SnP = yf.download(ticker,dt.date.today()-dt.timedelta(1825),dt.datetime.today())
def CAGR(DF):
"function to calculate the Cumulative Annual Growth Rate of a trading strategy"
df = DF.copy()
df["daily_ret"] = DF["Adj Close"].pct_change()
df["cum_return"] = (1 + df["daily_ret"]).cumprod()
n = len(df)/252
CAGR = (df["cum_return"][-1])**(1/n) - 1
return CAGR
def max_dd(DF):
"function to calculate max drawdown"
df = DF.copy()
df["daily_ret"] = DF["Adj Close"].pct_change()
df["cum_return"] = (1 + df["daily_ret"]).cumprod()
df["cum_roll_max"] = df["cum_return"].cummax()
df["drawdown"] = df["cum_roll_max"] - df["cum_return"]
df["drawdown_pct"] = df["drawdown"]/df["cum_roll_max"]
max_dd = df["drawdown_pct"].max()
return max_dd
print(max_dd(SnP))
def calmar(DF):
"function to calculate calmar ratio"
df = DF.copy()
clmr = CAGR(df)/max_dd(df)
return clmr
print(calmar(SnP))
|
[
"[email protected]"
] | |
34c324f9bfe464ec5dec8508c846a30409c79e34
|
46c521a85f567c609f8a073cb9569ea59e2a104f
|
/kunalProgram23.py
|
bab9fcf53fa8ff6e595f560e1fd900d0d4fa40d5
|
[] |
no_license
|
Kunal352000/python_adv
|
c046b6b785b52eaaf8d089988d4dadf0a25fa8cb
|
d9736b6377ae2d486854f93906a6bf5bc4e45a98
|
refs/heads/main
| 2023-07-15T23:48:27.131948 | 2021-08-21T13:16:42 | 2021-08-21T13:16:42 | 398,557,910 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 174 |
py
|
n=int(input("Enter number of rows: "))
for i in range(n):
print(" "*(n-1-i)+(str(i+1)+' ')*(i+1))
for i in range(n-1):
print(" "*(i+1)+(str(n-1-i)+' ')*(n-1-i))
|
[
"[email protected]"
] | |
ae9c18ed187a23fb7121a34ca1422020e3f7ddb5
|
20343e8a8435b3f839d5abd0c4063cf735f43341
|
/Experiment/CornerDetectAndAutoEmail/AveMaxMinDetect/test/test1.py
|
c4133dfbbcbb80c2cf4a4201fd7522e512561360
|
[] |
no_license
|
alading241/MoDeng
|
948f2099e2f7e4548d6e477b6e06b833bdf4f9bb
|
01819e58943d7d1a414714d64aa531c0e99dfe22
|
refs/heads/master
| 2021-05-23T11:39:41.326804 | 2020-04-05T06:06:01 | 2020-04-05T06:06:01 | 253,269,397 | 1 | 0 | null | 2020-04-05T15:38:33 | 2020-04-05T15:38:33 | null |
UTF-8
|
Python
| false | false | 476 |
py
|
# encoding = utf-8
import tornado
from apscheduler.schedulers.tornado import TornadoScheduler
sched = TornadoScheduler()
""" 测试向任务中传入参数 """
test = 'hello'
def job1(a, b, c):
print("job1:", a,b,c)
def job2(a, b, c):
print("job2:", a,b,c)
sched.add_job(job1, 'interval', seconds=1, args=["e", "t", "f"])
sched.add_job(job2, 'interval', seconds=1, kwargs={"a": test, "b": "b", "c": "c"})
sched.start()
tornado.ioloop.IOLoop.instance().start()
|
[
"[email protected]"
] | |
513143cc032bfe7bf0b443158e43a0ef5e19b9c4
|
68f757e7be32235c73e316888ee65a41c48ecd4e
|
/python_book(이것이 코딩테스트다)/13 DFS/6 감시피하기.py
|
2ae13a528d691db24f99e98aa727d9f3e6b9279b
|
[] |
no_license
|
leejongcheal/algorithm_python
|
b346fcdbe9b1fdee33f689477f983a63cf1557dc
|
f5d9bc468cab8de07b9853c97c3db983e6965d8f
|
refs/heads/master
| 2022-03-05T20:16:21.437936 | 2022-03-03T01:28:36 | 2022-03-03T01:28:36 | 246,039,901 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,089 |
py
|
def check(Map):
global N, T
steps = [(1,0),(-1,0),(0,1),(0,-1)]
for tx, ty in T:
for dx, dy in steps:
x, y = tx + dx, ty + dy
while 0 <= x < N and 0 <= y < N:
if Map[x][y] == "O":
break
if Map[x][y] == "S":
return 0
x, y = x + dx, y + dy
return 1
def dfs(x, y):
global L, N, flag, count
if flag == 1:
return
if count == 3:
if check(L):
flag = 1
return
for i in range(N):
for j in range(N):
if x < i or (j > y and x == i):
if L[i][j] == "X":
L[i][j] = "O"
count += 1
dfs(i, j)
L[i][j] = "X"
count -= 1
return
N = int(input())
L = [list(input().split()) for _ in range(N)]
T = []
flag = 0
count = 0
for i in range(N):
for j in range(N):
if L[i][j] == "T":
T.append((i,j))
dfs(-1, -1)
if flag == 1:
print("YES")
else:
print("NO")
|
[
"[email protected]"
] | |
4009361d3230b25bd783c8a62243914aa44d83e8
|
dad6ba45f05d267f6c44bd27949868dc474476e6
|
/CQT/Archive/withoutHead.py
|
b880268f61562e2aaff0d40889c0d47085412c3c
|
[] |
no_license
|
morindaz/CQT_All
|
1f36c5ef22348e2293d9f4f63e58009f0dd274b7
|
8ab6f82ad7b1cf3b4555fe785566013f5cb57a4f
|
refs/heads/master
| 2021-04-27T00:11:17.744327 | 2018-03-04T07:19:06 | 2018-03-04T07:19:06 | 123,765,136 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 842 |
py
|
# coding=utf-8
import time
import glob
import pandas as pd
#这里的C数据和R相反
answerR = "E:\\pingan\\dataset\\newFeature\\answer_C" #492个数据,withouthead
answerC = "E:\\pingan\\dataset\\newFeature\\answer_R" #244个数据
answerI = "E:\\pingan\\dataset\\newFeature\\answer_I" #478个数据
base = answerI
csvx_list = glob.glob(base+"\\"+'*.csv')
print('总共发现%s个CSV文件'% len(csvx_list))
time.sleep(2)
print('正在处理............')
df = pd.DataFrame()
for i in csvx_list:
df_c = pd.read_csv(i, sep=',', header=0)
# print(df_c['video_name'].tolist())
# fr = i.values
# print df_c
df = df.append(df_c)
#print df
print('写入成功!')
output_Archive = pd.DataFrame(df)
output_Archive.to_csv("base"+'.csv')
print('写入完毕!')
print('3秒钟自动关闭程序!')
time.sleep(3)
|
[
"[email protected]"
] | |
8845d190ae1c3cf32b9e4665e81304da16378cc6
|
56b7e5ed6941fc4b83148e00bd51421dc3ac993a
|
/hackerrank/Delete Nodes Greater Than X/Delete Nodes Greater Than X.py
|
6c8ef8211533ee3666850ccdc29c91e09917498c
|
[] |
no_license
|
samir-0711/Leetcode-Python
|
f960e15015a3f2fd88f723d7f9237945a7133553
|
d75876ae96bcd85c67bbfbf91bbc0f0bc773e97c
|
refs/heads/master
| 2022-12-18T05:27:48.224001 | 2020-09-30T21:03:42 | 2020-09-30T21:03:42 | 300,061,318 | 0 | 0 | null | 2020-09-30T20:59:42 | 2020-09-30T20:59:42 | null |
UTF-8
|
Python
| false | false | 1,590 |
py
|
'''
Complete the removeNodes function provided in your editor. It has 2 parameters:
1. list: A reference to a Linked List Node that is the head of a linked list.
2. x: An integer value.
Your funciton should remove all nodes from the list having data values greater than x, and then return the head of the modified linked list.
Input Format
The locked stub code in your editer processes the following inputs and pased the necessary arguments to the removeNodes function:
The first line contains N, the number of nodes in the linked list.
Each line i (where 0<= i <) of the N subsequent lines contains an integer representing the value of a node in the linked list. The last line contains an integer, x.
Output Format
Return the linked list after removing the nodes containing values > x.
Sample Input 1:
5
1
2
3
4
5
3
Sample Output 1:
1
2
3
Sample Input 2:
5
5
2
1
6
7
5
Sample Output2:
5
2
1
'''
class LinkedListNode:
def __init__(self, node_Value):
self.val = node_Value
self.next = None
def _insert_node_into_singlylinkedlist(head, tail, val):
if head == None:
head = LinkedListNode(val)
tail = head
else:
node = LinkedListNode(val)
tail.next = node
tail = tail.next
def removeNodes(list, x):
if list == None or x == None:
return None
temp = list
while temp.val > x:
temp = temp.next
curr = temp
prev = None
while curr != None:
if curr.val > x:
prev.next = curr.next
else:
prev = curr
curr = curr.next
return temp
|
[
"[email protected]"
] | |
2c72c0fd9afadc5369dafc83a72510e88f785872
|
d70a16f353819ff858dbe6974916a936a85a3c0e
|
/api/migrations/0003_auto_20201217_1941.py
|
60ba065342ebb9755ec4749f75c6cf4cc1ac6880
|
[] |
no_license
|
mahmud-sajib/FBuzz-Task
|
7fa69a35d1dfe069ed48e2956d1eff16cf953c74
|
a57bc031911fd7259c68890a953d9d8175246f73
|
refs/heads/master
| 2023-02-02T05:56:01.208849 | 2020-12-19T07:03:24 | 2020-12-19T07:03:24 | 321,357,770 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 467 |
py
|
# Generated by Django 3.1 on 2020-12-17 13:41
import api.validators
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('api', '0002_auto_20201217_1751'),
]
operations = [
migrations.AlterField(
model_name='cvfileupload',
name='document',
field=models.FileField(upload_to='documents/', validators=[api.validators.validate_file_size]),
),
]
|
[
"[email protected]"
] | |
85f8207c1a52da4c91cfcc22bb76bd8dd60589aa
|
40fa413a9ba362ab8cc2474269f83bb87847cda2
|
/setup.py
|
a7a9aee54a8f3d08813d67be79e79b61855eaffc
|
[] |
no_license
|
Peder2911/leanfeeder
|
c366563527c6e6b65cf46f8564596d1637337026
|
f50ed3845aac21b6eed81eb1ef72c39175c87c8d
|
refs/heads/master
| 2023-01-01T13:55:49.037014 | 2020-10-15T12:02:43 | 2020-10-15T12:02:43 | 301,992,715 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 765 |
py
|
import setuptools
with open("README.md") as f:
long_description=f.read()
setuptools.setup(
name = "leanfeeder",
version = "0.0.1",
author = "Peder G. Landsverk",
author_email = "[email protected]",
description = "Tool for pushing data to a Postgres DB without too much hassle.",
long_description = long_description,
long_description_content_type="test/markdown",
url = "https://www.github.com/peder2911/leanfeeder",
packages = setuptools.find_packages(),
scripts=["bin/leanf"],
python_requires=">=3.7",
install_requires=[
"strconv>=0.4.0",
"psycopg2>=2.8.0",
"fire>=0.3.0",
"python-dateutil>=2.8.0"
])
|
[
"[email protected]"
] | |
18c298b110833ad5b1860e533427d320c882c52d
|
cca5ceb42b09e567d79fcb46f298757c1ff04447
|
/ObjectOriented/DataStructure.py
|
45fb0eea3cc51bded3a1c13868377b5c1980c3d7
|
[] |
no_license
|
NishantGhanate/PythonScripts
|
92933237720e624a0f672729743a98557bea79d6
|
60b92984d21394002c0d3920bc448c698e0402ca
|
refs/heads/master
| 2022-12-13T11:56:14.442286 | 2022-11-18T14:26:33 | 2022-11-18T14:26:33 | 132,910,530 | 25 | 15 | null | 2022-12-09T09:03:58 | 2018-05-10T14:18:33 |
Python
|
UTF-8
|
Python
| false | false | 1,468 |
py
|
class Que:
def __init__(self , contents):
self._hiddenlist = list(contents)
def push(self,value):
self._hiddenlist.insert(0 , value)
print(self._hiddenlist)
def pop(self):
if len(self._hiddenlist):
self._hiddenlist.pop(0)
print(self._hiddenlist)
else:
print("Empty Que")
que = Que([1, 2.25, 3.0, 4, 1234.5])
que.push(0)
que.pop()
class Node:
def __init__(self, dataValue ):
self.dataValue = dataValue
self.nextValue = None
class Slink:
def __init__(self):
self.headValue = None
def printLink(self):
printval = self.headValue
while printval is not None:
print (printval.dataValue)
printval = printval.nextValue
def atStart(self,newData):
NewNode = Node(newData)
NewNode.nextValue = self.headValue
self.headValue = NewNode
# lis = Slink()
# lis.atStart("Sun")
# lis.atStart("Mon")
# lis.printLink()
class Stack:
def __init__(self):
self.stack = [10]
def push(self,dataValue):
self.stack.append(dataValue)
return self.stack
def pop(self):
if len(self.stack) <= 0:
return ("No element in the Stack")
else:
return "This value pop =" +self.stack.pop()
# stack = Stack()
# stack.push("1")
# stack.push("2")
# stack.push("3")
# print(stack.pop())
# print(stack.push("5"))
|
[
"[email protected]"
] | |
0c7ca209f5437826a7c1527f09d0f27b55e5d412
|
ac2f4c7caaf7ccc51ebcb2d88020fb4842b3f493
|
/install.py
|
e20927c124212f7697dea81c1e707305db393903
|
[] |
no_license
|
vaikuntht/TAMU-Latex-Styles
|
48b89291cb5b65348303cfee4bc8424a61b44adb
|
8c1f096bbe3140eef6e14d001fa9d81905a28258
|
refs/heads/master
| 2021-01-18T00:04:04.722714 | 2014-07-31T18:49:42 | 2014-07-31T18:49:42 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 6,686 |
py
|
#!/usr/bin/env python
import os, sys, getopt, argparse, fnmatch, errno, subprocess, tempfile, platform, getpass, pprint, shutil
from subprocess import call
#program name available through the %(prog)s command
#can use prog="" in the ArgumentParser constructor
#can use the type=int option to make the parameters integers
#can use the action='append' option to make a list of options
#can use the default="" option to automatically set a parameter
parser = argparse.ArgumentParser(description="Install the TAMU based LaTex style files.",
epilog="And those are the options available. Deal with it.")
group = parser.add_mutually_exclusive_group()
parser.add_argument("-nha","--nohash", help="Will run texhash command once the files are copied",
action="store_false")
group.add_argument("-q", "--quiet", help="decrease output verbosity to minimal amount",
action="store_true")
group.add_argument("-v", "--verbose", help="Increase output verbosity of lcg-cp (-v) or srm (-debug) commands",
action="store_true")
parser.add_argument('--version', action='version', version='%(prog)s 1.0')
parser.add_argument("-y", "--texlive_year", help="The texlive distribution year",
default="2014")
args = parser.parse_args()
if(args.verbose):
print 'Number of arguments:', len(sys.argv), 'arguments.'
print 'Argument List:', str(sys.argv)
print "Argument ", args, "\n"
QUIET = args.quiet
VERBOSE = args.verbose
DOHASH = args.nohash
TEXLIVE_YEAR = args.texlive_year
theme_path = ""
color_path = ""
Outer_path = ""
def check_linux_folders():
global theme_path
global color_path
global outer_path
theme_path = "/usr/share/texmf/tex/latex/beamer/base/themes/theme/"
color_path = "/usr/share/texmf/tex/latex/beamer/base/themes/color/"
outer_path = "/usr/share/texmf/tex/latex/beamer/base/themes/outer/"
# To check if it is a directory (and it exists) use os.path.isdir
# To check if something exists (direcotry, file, or otherwise), use os.path.exists
theme = os.path.isdir(theme_path)
color = os.path.isdir(color_path)
outer = os.path.isdir(outer_path)
if not QUIET: print "Themes exists? " + str(theme)
if not QUIET: print "Color themes exists? " + str(color)
if not QUIET: print "Outer themes exists? " + str(outer)
if not theme:
print "ERROR::The path to the beamer themes ("+str(theme_path)+") does not exist."
print "Cannot continue."
sys.exit()
if not color:
print "ERROR::The path to the beamer colors ("+str(color_path)+") does not exist."
print "Cannot continue."
sys.exit()
if not outer:
print "ERROR::The path to the beamer outer themes ("+str(outer_path)+") does not exist."
print "Cannot continue."
sys.exit()
def check_osx_folders():
global theme_path
global color_path
global outer_path
theme_path = "/usr/local/texlive/"+TEXLIVE_YEAR+"/texmf-dist/tex/latex/beamer/themes/theme/"
color_path = "/usr/local/texlive/"+TEXLIVE_YEAR+"/texmf-dist/tex/latex/beamer/themes/color/"
outer_path = "/usr/local/texlive/"+TEXLIVE_YEAR+"/texmf-dist/tex/latex/beamer/themes/outer/"
theme = os.path.isdir(theme_path)
color = os.path.isdir(color_path)
outer = os.path.isdir(outer_path)
if not QUIET: print "Themes exists? " + str(theme)
if not QUIET: print "Color themes exists? " + str(color)
if not QUIET: print "Outer themes exists? " + str(outer)
if not theme:
print "ERROR::The path to the beamer themes ("+str(theme_path)+") does not exist."
print "Cannot continue."
sys.exit()
if not color:
print "ERROR::The path to the beamer colors ("+str(color_path)+") does not exist."
print "Cannot continue."
sys.exit()
if not outer:
print "ERROR::The path to the beamer outer themes ("+str(outer_path)+") does not exist."
print "Cannot continue."
sys.exit()
def privledge_check():
user = getpass.getuser()
if not QUIET: print "User = " + str(user)
if user != 'root':
print "Sorry, you are not \"root\" and do not have enough privledges to continue."
sys.exit()
def run_checks():
print "************************************"
print "* Running checks on the system ... *"
print "************************************"
privledge_check()
kernel = platform.system()
OS = ""
flavor = ""
version = ""
if kernel == 'Linux':
OS = "Linux"
flavor = platform.linux_distribution()[0]
version = platform.linux_distribution()[1]
if not QUIET: print str(flavor) + "(" + str(OS) + ")" + str(version)
check_linux_folders()
elif kernel == 'Darwin':
OS = "OSX"
flavor = "Unknown"
version = platform.mac_ver()[0]
if not QUIET: print str(OS) + " " + str(version)
check_osx_folders()
else:
print "ERROR::Unknown OS. Cannot confirm that installation will be successful. Process will not continue."
sys.exit()
print
def copy_set_of_files(dict, folder):
for dst in dict:
if not QUIET: print "Doing folder " + str(dst) + " ... "
for f in range(1,len(dict[dst])):
src = dict[dst][f]
dest = dict[dst][0]
if not QUIET: print "\tCopying " + str(folder) + str(src) + " to " + str(dest) + " ... ",
shutil.copy2(folder+src,dest)
if not QUIET: print "DONE"
def copy_files():
print "**********************************************"
print "* Copying the files to the correct paths ... *"
print "**********************************************"
copyfileBeamerDict = {
'theme' : (theme_path, "beamerthemeTAMU.sty"),
'color' : (color_path, "beamercolorthemetamu.sty", "beamercolorthemetamubox.sty"),
'outer' : (outer_path, "beamerouterthemeshadowTAMU.sty", "beamerouterthemesplittamu.sty", "UniversityLogos/beamerouterthemeTAMULogoBox.png", "ExperimentLogos/beamerouterthemeCMS.png","ExperimentLogos/beamerouterthemeCDF.png","LaboritoryLogos/beamerouterthemeCERN.png","LaboritoryLogos/beamerouterthemeFNAL.png")
}
if VERBOSE and not QUIET:
print "Dictionary"
print "----------"
pprint.pprint(copyfileBeamerDict)
print
copy_set_of_files(copyfileBeamerDict, "Beamer/")
print
def do_tex_hash():
print "***********************"
print "* Running texhash ... *"
print "***********************"
os.system("texhash")
run_checks()
copy_files()
if DOHASH:
do_tex_hash()
|
[
"[email protected]"
] | |
fb708659d8576b28acdb88c0439ca493e36c5884
|
30c524146ac7c240b3f69a856a12f9d971e2f294
|
/setup.py
|
a7138c22975f00b0e0c89fc5a9121d3aa768c383
|
[
"MIT"
] |
permissive
|
undercertainty/ipython_magic_sqlalchemy_schemadisplay
|
7da1400b4b9cff520b3e185345c204f14ccb512d
|
bc22060f3125736eecf2cc4d7972eca9715fc0c3
|
refs/heads/master
| 2021-10-10T07:00:04.925288 | 2019-01-07T23:01:31 | 2019-01-07T23:01:31 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 273 |
py
|
from setuptools import setup
setup(name='schemadisplay-magic',
packages=['schemadisplay_magic'],
install_requires=['ipython-sql', 'sqlalchemy_schemadisplay', 'graphviz'],
dependency_links=['git+https://github.com/fschulze/sqlalchemy_schemadisplay.git']
)
|
[
"[email protected]"
] | |
f12ecdec195d21b07ddb86e45226d52b6dbaf079
|
a5c2f4ada2fb4436784a785a5d598546d3b3284c
|
/Main/migrations/0001_initial.py
|
305f99beddc1606764e5d5472f2e0f219b5ffacf
|
[] |
no_license
|
sakibovi123/chat_applicationv1.0.1
|
1c5d25c1229434b4c6019fcf4dbabf53324d90df
|
7b5db530e22743959df215347ff1e644cbbfb4e0
|
refs/heads/master
| 2023-07-13T22:22:02.295141 | 2021-08-28T07:35:27 | 2021-08-28T07:35:27 | 396,916,167 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 778 |
py
|
# Generated by Django 3.2.6 on 2021-08-16 11:17
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='ChatRoom',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('slug', models.SlugField()),
('message', models.TextField()),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
|
[
"[email protected]"
] | |
1988f3bfa396617797d1effd273ed01d83a05ec9
|
92acb2bdfcdb594a7f98b24093f4711879e956ca
|
/dvaapp/admin.py
|
7be6bf0aa9a7a506025392cfac7e62ea6530b6cf
|
[
"BSD-3-Clause",
"MIT",
"Apache-2.0"
] |
permissive
|
cynwpu/DeepVideoAnalytics
|
e1f0b2e00a2671014bdcae99bf11c180bf35a30e
|
c95913a2967d6d17e71bb1b703f99c00c483bcdc
|
refs/heads/master
| 2021-05-05T15:04:50.650488 | 2017-09-10T20:01:31 | 2017-09-10T20:01:31 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,361 |
py
|
from django.contrib import admin
from .models import Video, Frame, TEvent, IndexEntries, QueryResults, DVAPQL, VDNServer,\
LOPQCodes, Region, Tube, Detector, Segment, DeletedVideo, \
VideoLabel, FrameLabel, RegionLabel, TubeLabel, SegmentLabel, Label, ManagementAction, \
StoredDVAPQL, Analyzer, Indexer, Retriever, SystemState, Worker
@admin.register(SystemState)
class SystemStateAdmin(admin.ModelAdmin):
pass
@admin.register(Worker)
class WorkerAdmin(admin.ModelAdmin):
pass
@admin.register(Label)
class LabelAdmin(admin.ModelAdmin):
pass
@admin.register(VideoLabel)
class VideoLabelAdmin(admin.ModelAdmin):
pass
@admin.register(FrameLabel)
class FrameLabelAdmin(admin.ModelAdmin):
pass
@admin.register(SegmentLabel)
class SegmentLabelAdmin(admin.ModelAdmin):
pass
@admin.register(RegionLabel)
class RegionLabelAdmin(admin.ModelAdmin):
pass
@admin.register(TubeLabel)
class TubeLabelAdmin(admin.ModelAdmin):
pass
@admin.register(Segment)
class SegmentAdmin(admin.ModelAdmin):
pass
@admin.register(Region)
class RegionAdmin(admin.ModelAdmin):
pass
@admin.register(Video)
class VideoAdmin(admin.ModelAdmin):
pass
@admin.register(DeletedVideo)
class DeletedVideoAdmin(admin.ModelAdmin):
pass
@admin.register(QueryResults)
class QueryResultsAdmin(admin.ModelAdmin):
pass
@admin.register(DVAPQL)
class DVAPQLAdmin(admin.ModelAdmin):
pass
@admin.register(Frame)
class FrameAdmin(admin.ModelAdmin):
pass
@admin.register(IndexEntries)
class IndexEntriesAdmin(admin.ModelAdmin):
pass
@admin.register(VDNServer)
class VDNServerAdmin(admin.ModelAdmin):
pass
@admin.register(TEvent)
class TEventAdmin(admin.ModelAdmin):
pass
@admin.register(LOPQCodes)
class LOPQCodesAdmin(admin.ModelAdmin):
pass
@admin.register(Tube)
class TubeAdmin(admin.ModelAdmin):
pass
@admin.register(Detector)
class DetectorAdmin(admin.ModelAdmin):
pass
@admin.register(Analyzer)
class AnalyzerAdmin(admin.ModelAdmin):
pass
@admin.register(Indexer)
class IndexerAdmin(admin.ModelAdmin):
pass
@admin.register(Retriever)
class RetrieverAdmin(admin.ModelAdmin):
pass
@admin.register(ManagementAction)
class ManagementActionAdmin(admin.ModelAdmin):
pass
@admin.register(StoredDVAPQL)
class StoredDVAPQLAdmin(admin.ModelAdmin):
pass
|
[
"[email protected]"
] | |
8810e80afe9d5667581d1c646a07dad52c3242c2
|
131ccf66fb787e9b1f0773a25fa518d1f2a3c5d0
|
/gui_programming/guimaker.py
|
f88dcbdb765fc650380d10a48a44bdb26e259768
|
[] |
no_license
|
jocogum10/learning-python-programming
|
a0ba62abde49fd79762bcb7ba4a94bf8126afa77
|
035858bd332e3970d95db8bce7b1175e450802db
|
refs/heads/master
| 2020-07-07T17:08:00.743196 | 2019-12-13T05:32:47 | 2019-12-13T05:32:47 | 203,416,201 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 6,637 |
py
|
"""
################################################################################
An extended Frame that makes window menus and toolbars automatically.
Use GuiMakerMenu for embedded components (makes frame-based menus).
Use GuiMakerWindowMenu for top-level windows (makes Tk8.0 window menus).
See the self-test code (and PyEdit) for an example layout tree format.
################################################################################
"""
import sys
from tkinter import * # widget classes
from tkinter.messagebox import showinfo
class GuiMaker(Frame):
menuBar = [] # class defaults
toolBar = [] # change per instance in subclasses
helpButton = True # set these in start() if need self
def __init__(self, parent=None):
Frame.__init__(self, parent)
self.pack(expand=YES, fill=BOTH) # make frame stretchable
self.start() # for subclass: set menu/toolBar
self.makeMenuBar() # done here: build menu bar
self.makeToolBar() # done here: build toolbar
self.makeWidgets() # for subclass: add middle part
def makeMenuBar(self):
"""
make menu bar at the top (Tk8.0 menus below)
expand=no, fill=x so same width on resize
"""
menubar = Frame(self, relief=RAISED, bd=2)
menubar.pack(side=TOP, fill=X)
for (name, key, items) in self.menuBar:
mbutton = Menubutton(menubar, text=name, underline=key)
mbutton.pack(side=LEFT)
pulldown = Menu(mbutton)
self.addMenuItems(pulldown, items)
mbutton.config(menu=pulldown)
if self.helpButton:
Button(menubar, text = 'Help',
cursor = 'gumby',
relief = FLAT,
command = self.help).pack(side=RIGHT)
def addMenuItems(self, menu, items):
for item in items: # scan nested items list
if item == 'separator': # string: add separator
menu.add_separator({})
elif type(item) == list: # list: disabled item list
for num in item:
menu.entryconfig(num, state=DISABLED)
elif type(item[2]) != list:
menu.add_command(label = item[0], # command:
underline = item[1], # add command
command = item[2]) # cmd=callable
else:
pullover = Menu(menu)
self.addMenuItems(pullover, item[2]) # sublist:
menu.add_cascade(label = item[0], # make submenu
underline = item[1], # add cascade
menu = pullover)
def makeToolBar(self):
"""
make button bar at bottom, if any
expand=no, fill=x so same width on resize
this could support images too: see chapter 9,
would need prebuilt gifs or PIL for thumbnails
"""
if self.toolBar:
toolbar = Frame(self, cursor='hand2', relief=SUNKEN, bd=2)
toolbar.pack(side=BOTTOM, fill=X)
for (name, action, where) in self.toolBar:
Button(toolbar, text=name, command=action).pack(where)
def makeWidgets(self):
"""
make 'middle' part last, so menu/toolbar
is always on top/bottom and clipped last;
override this default, pack middle any side;
for grid: grid middle part in packed frame
"""
name = Label(self,
width=40, height=10,
relief=SUNKEN, bg='white',
text = self.__class__.__name__,
cursor = 'crosshair')
name.pack(expand=YES, fill=BOTH, side=TOP)
def help(self):
"override me in subclass"
showinfo('Help', 'Sorry, no help for ' + self.__class__.__name__)
def start(self):
"override me in subclass: set menu/toolbar with self"
pass
################################################################################
# Customize for Tk 8.0 main window menu bar, instead of a frame
################################################################################
GuiMakerFrameMenu = GuiMaker # use this for embedded component menus
class GuiMakerWindowMenu(GuiMaker): # use this for top-level window menus
def makeMenuBar(self):
menubar = Menu(self.master)
self.master.config(menu=menubar)
for (name, key, items) in self.menuBar:
pulldown = Menu(menubar)
self.addMenuItems(pulldown, items)
menubar.add_cascade(label=name, underline=key, menu=pulldown)
if self.helpButton:
if sys.platform[:3] == 'win':
menubar.add_command(label='Help', command=self.help)
else:
pulldown = Menu(menubar) # Linux needs real pull down
pulldown.add_command(label='About', command=self.help)
menubar.add_cascade(label='Help', menu=pulldown)
################################################################################
# Self-test when file run standalone: 'python guimaker.py'
################################################################################
if __name__ == '__main__':
from guimixin import GuiMixin # mix in a help method
menuBar = [
('File', 0,
[('Open', 0, lambda:0),
('Quit', 0, sys.exit)]),
('Edit', 0,
[('Cut', 0, lambda:0),
('Paste', 0, lambda:0)]) ]
toolBar = [('Quit', sys.exit, {'side': LEFT})]
class TestAppFrameMenu(GuiMixin, GuiMakerFrameMenu):
def start(self):
self.menuBar = menuBar
self.toolBar = toolBar
class TestAppWindowMenu(GuiMixin, GuiMakerWindowMenu):
def start(self):
self.menuBar = menuBar
self.toolBar = toolBar
class TestAppWindowMenuBasic(GuiMakerWindowMenu):
def start(self):
self.menuBar = menuBar
self.toolBar = toolBar # guimaker help, not guimixin
root = Tk()
TestAppFrameMenu(Toplevel())
TestAppWindowMenu(Toplevel())
TestAppWindowMenuBasic(root)
root.mainloop()
|
[
"[email protected]"
] | |
01593c9ffc95662e33bc80059daecc2592dd829f
|
08e26af5604fda61846c421d739c82ea0bd17271
|
/product_account_purchase_sale/account_invoice.py
|
c25db1448d9f73d91fe3eab11331df7acb6e59cc
|
[] |
no_license
|
germanponce/nishikawa_addons
|
376342d6d45250eec85443abf4eb4f760256de85
|
765dd185272407175fbc14a8f4d702bf6e5e759d
|
refs/heads/master
| 2021-01-25T04:09:07.391100 | 2014-07-14T14:57:21 | 2014-07-14T14:57:21 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 6,410 |
py
|
# -*- encoding: utf-8 -*-
###########################################################################
# Module Writen to OpenERP, Open Source Management Solution
#
# Copyright (c) 2010 moylop260 - http://www.hesatecnica.com.com/
# All Rights Reserved.
# info skype: german_442 email: ([email protected])
############################################################################
# Coded by: german_442 email: ([email protected])
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from osv import osv, fields
import time
from datetime import datetime, date
from tools.translate import _
from openerp import SUPERUSER_ID
class account_invoice(osv.osv):
_name = 'account.invoice'
_inherit ='account.invoice'
_columns = {
'department_id': fields.many2one('hr.department', 'Departamento', help='Define el Departamento encargado de la Solicitud de la Compra' ),
}
_default = {
}
account_invoice()
class account_invoice_line(osv.osv):
_inherit ='account.invoice.line'
_columns = {
'analytics_accounts_required': fields.boolean('Cuentas Analiticas Requeridas') ,
}
def product_id_change(self, cr, uid, ids, product, uom_id, qty=0, name='', type='out_invoice', partner_id=False, fposition_id=False, price_unit=False, currency_id=False, context=None, company_id=None):
value = {}
if context is None:
context = {}
company_id = company_id if company_id != None else context.get('company_id',False)
context = dict(context)
context.update({'company_id': company_id, 'force_company': company_id})
if not partner_id:
raise osv.except_osv(_('No Partner Defined!'),_("You must first select a partner!") )
if not product:
if type in ('in_invoice', 'in_refund'):
return {'value': {}, 'domain':{'product_uom':[]}}
else:
return {'value': {'price_unit': 0.0}, 'domain':{'product_uom':[]}}
part = self.pool.get('res.partner').browse(cr, uid, partner_id, context=context)
fpos_obj = self.pool.get('account.fiscal.position')
fpos = fposition_id and fpos_obj.browse(cr, uid, fposition_id, context=context) or False
if part.lang:
context.update({'lang': part.lang})
result = {}
res = self.pool.get('product.product').browse(cr, uid, product, context=context)
if type in ('out_invoice','out_refund'):
a = res.property_account_income.id
if not a:
a = res.categ_id.property_account_income_categ.id
else:
a = res.property_account_expense.id
if not a:
a = res.categ_id.property_account_expense_categ.id
a = fpos_obj.map_account(cr, uid, fpos, a)
if a:
result['account_id'] = a
if type in ('out_invoice', 'out_refund'):
taxes = res.taxes_id and res.taxes_id or (a and self.pool.get('account.account').browse(cr, uid, a, context=context).tax_ids or False)
else:
taxes = res.supplier_taxes_id and res.supplier_taxes_id or (a and self.pool.get('account.account').browse(cr, uid, a, context=context).tax_ids or False)
tax_id = fpos_obj.map_tax(cr, uid, fpos, taxes)
if type in ('in_invoice', 'in_refund'):
result.update( {'price_unit': price_unit or res.standard_price,'invoice_line_tax_id': tax_id} )
else:
result.update({'price_unit': res.list_price, 'invoice_line_tax_id': tax_id})
result['name'] = res.partner_ref
result['uos_id'] = uom_id or res.uom_id.id
if res.description:
result['name'] += '\n'+res.description
domain = {'uos_id':[('category_id','=',res.uom_id.category_id.id)]}
res_final = {'value':result, 'domain':domain}
if not company_id or not currency_id:
return res_final
company = self.pool.get('res.company').browse(cr, uid, company_id, context=context)
currency = self.pool.get('res.currency').browse(cr, uid, currency_id, context=context)
if company.currency_id.id != currency.id:
if type in ('in_invoice', 'in_refund'):
res_final['value']['price_unit'] = res.standard_price
new_price = res_final['value']['price_unit'] * currency.rate
res_final['value']['price_unit'] = new_price
if result['uos_id'] and result['uos_id'] != res.uom_id.id:
selected_uom = self.pool.get('product.uom').browse(cr, uid, result['uos_id'], context=context)
new_price = self.pool.get('product.uom')._compute_price(cr, uid, res.uom_id.id, res_final['value']['price_unit'], result['uos_id'])
res_final['value']['price_unit'] = new_price
#### Validamos que el producto requiera las cuentas Analiticas
prod_obj = self.pool.get('product.product')
prod_b = prod_obj.browse(cr, uid, [product], context=None)[0]
if prod_b.analytics_accounts_required:
res_final['value'].update({'analytics_accounts_required':True})
return res_final
account_invoice_line()
class account_account_template(osv.osv):
_name = "account.account.template"
_inherit = "account.account.template"
_columns = {
'name': fields.char('Name', size=256, required=True, select=True, translate=True),
}
account_account_template()
class account_account(osv.osv):
_name = "account.account"
_inherit = "account.account"
_columns = {
'name': fields.char('Name', size=256, required=True, select=True, translate=True),
}
account_account()
|
[
"[email protected]"
] | |
30563f1f0d1d655fea8cc0dad2b55e5530bab2b8
|
f9d564f1aa83eca45872dab7fbaa26dd48210d08
|
/huaweicloud-sdk-cph/huaweicloudsdkcph/v1/model/list_resource_instances_request.py
|
0823fdc434041d9670e7c3631928d7a2eaaf42b5
|
[
"Apache-2.0"
] |
permissive
|
huaweicloud/huaweicloud-sdk-python-v3
|
cde6d849ce5b1de05ac5ebfd6153f27803837d84
|
f69344c1dadb79067746ddf9bfde4bddc18d5ecf
|
refs/heads/master
| 2023-09-01T19:29:43.013318 | 2023-08-31T08:28:59 | 2023-08-31T08:28:59 | 262,207,814 | 103 | 44 |
NOASSERTION
| 2023-06-22T14:50:48 | 2020-05-08T02:28:43 |
Python
|
UTF-8
|
Python
| false | false | 4,217 |
py
|
# coding: utf-8
import six
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class ListResourceInstancesRequest:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'resource_type': 'str',
'body': 'ListResourceInstancesRequestBody'
}
attribute_map = {
'resource_type': 'resource_type',
'body': 'body'
}
def __init__(self, resource_type=None, body=None):
"""ListResourceInstancesRequest
The model defined in huaweicloud sdk
:param resource_type: 资源类型。 - cph-server,云手机服务器
:type resource_type: str
:param body: Body of the ListResourceInstancesRequest
:type body: :class:`huaweicloudsdkcph.v1.ListResourceInstancesRequestBody`
"""
self._resource_type = None
self._body = None
self.discriminator = None
self.resource_type = resource_type
if body is not None:
self.body = body
@property
def resource_type(self):
"""Gets the resource_type of this ListResourceInstancesRequest.
资源类型。 - cph-server,云手机服务器
:return: The resource_type of this ListResourceInstancesRequest.
:rtype: str
"""
return self._resource_type
@resource_type.setter
def resource_type(self, resource_type):
"""Sets the resource_type of this ListResourceInstancesRequest.
资源类型。 - cph-server,云手机服务器
:param resource_type: The resource_type of this ListResourceInstancesRequest.
:type resource_type: str
"""
self._resource_type = resource_type
@property
def body(self):
"""Gets the body of this ListResourceInstancesRequest.
:return: The body of this ListResourceInstancesRequest.
:rtype: :class:`huaweicloudsdkcph.v1.ListResourceInstancesRequestBody`
"""
return self._body
@body.setter
def body(self, body):
"""Sets the body of this ListResourceInstancesRequest.
:param body: The body of this ListResourceInstancesRequest.
:type body: :class:`huaweicloudsdkcph.v1.ListResourceInstancesRequestBody`
"""
self._body = body
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ListResourceInstancesRequest):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
[
"[email protected]"
] | |
056124ade6036e7d9c1b4817404a25f132abcf7f
|
ecba842cc189499da2c98248e92a458dbcc0dc67
|
/apps/website/privacy/urls.py
|
59aa42a56262eec8c222c517c823f3eb3f7c6516
|
[] |
no_license
|
aquaristar/hhlearn
|
c23e94ab93221419db74409f44d8310244212190
|
ec409b7886bacb33cd3f5c3a724243a30158cd54
|
refs/heads/master
| 2023-03-10T15:46:39.740438 | 2019-11-16T19:19:02 | 2019-11-16T19:19:02 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 202 |
py
|
from django.conf.urls import patterns, include, url
urlpatterns = patterns('apps.website.privacy.views',
url(r'^privacy/$', 'privacy', name='privacy'),
)
|
[
"[email protected]"
] | |
77943a4d3e4d1148d94b9ad235dc96195e234ab2
|
0e478f3d8b6c323c093455428c9094c45de13bac
|
/src/OTLMOW/OTLModel/Datatypes/KlVerkeersregelaarVoltage.py
|
34ad4ce0c129e30a204bef55ade1b07e3f23d16f
|
[
"MIT"
] |
permissive
|
davidvlaminck/OTLMOW
|
c6eae90b2cab8a741271002cde454427ca8b75ba
|
48f8c357c475da1d2a1bc7820556843d4b37838d
|
refs/heads/main
| 2023-01-12T05:08:40.442734 | 2023-01-10T15:26:39 | 2023-01-10T15:26:39 | 432,681,113 | 3 | 1 |
MIT
| 2022-06-20T20:36:00 | 2021-11-28T10:28:24 |
Python
|
UTF-8
|
Python
| false | false | 1,838 |
py
|
# coding=utf-8
import random
from OTLMOW.OTLModel.Datatypes.KeuzelijstField import KeuzelijstField
from OTLMOW.OTLModel.Datatypes.KeuzelijstWaarde import KeuzelijstWaarde
# Generated with OTLEnumerationCreator. To modify: extend, do not edit
class KlVerkeersregelaarVoltage(KeuzelijstField):
"""Keuzelijst met de voorkomende voltages gebruikt voor verkeersregelaars."""
naam = 'KlVerkeersregelaarVoltage'
label = 'Verkeersregelaar voltage'
objectUri = 'https://wegenenverkeer.data.vlaanderen.be/ns/onderdeel#KlVerkeersregelaarVoltage'
definition = 'Keuzelijst met de voorkomende voltages gebruikt voor verkeersregelaars.'
status = 'ingebruik'
codelist = 'https://wegenenverkeer.data.vlaanderen.be/id/conceptscheme/KlVerkeersregelaarVoltage'
options = {
'230': KeuzelijstWaarde(invulwaarde='230',
label='230',
status='ingebruik',
objectUri='https://wegenenverkeer.data.vlaanderen.be/id/concept/KlVerkeersregelaarVoltage/230'),
'40': KeuzelijstWaarde(invulwaarde='40',
label='40',
status='ingebruik',
objectUri='https://wegenenverkeer.data.vlaanderen.be/id/concept/KlVerkeersregelaarVoltage/40'),
'42': KeuzelijstWaarde(invulwaarde='42',
label='42',
status='ingebruik',
objectUri='https://wegenenverkeer.data.vlaanderen.be/id/concept/KlVerkeersregelaarVoltage/42')
}
@classmethod
def create_dummy_data(cls):
return random.choice(list(map(lambda x: x.invulwaarde,
filter(lambda option: option.status == 'ingebruik', cls.options.values()))))
|
[
"[email protected]"
] | |
6177c3e5145dab0ebb77f902ac0a558478083544
|
1ee2cd179e9eb2ec7541dec5b14ce993624181b8
|
/openrasp_iast/plugin/scanner/directory_basic.py
|
4883ffe987a3f1031767825e28eed46ad47c6f17
|
[
"Apache-2.0"
] |
permissive
|
Ze4lfRoG/openrasp-iast
|
0c8492d5c9fbe0c5b3d994f8aa703628361dd405
|
0fd4cdaae642a759cffe214de51c392b75aa828e
|
refs/heads/master
| 2020-10-01T09:05:36.359241 | 2019-12-11T12:06:43 | 2019-12-11T12:06:43 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,299 |
py
|
#!/usr/bin/env python3
# -*- coding: UTF-8 -*-
"""
Copyright 2017-2019 Baidu Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from core.components.plugin import scan_plugin_base
class ScanPlugin(scan_plugin_base.ScanPluginBase):
plugin_info = {
"name": "directory_basic",
"show_name": "目录遍历检测插件",
"description": "基础目录遍历漏洞检测插件"
}
def mutant(self, rasp_result_ins):
"""
测试向量生成
"""
if not rasp_result_ins.has_hook_type("directory"):
return
linux_payload_list = [
("../../../../../../../../../../../../../../../../../../../../etc", "/etc"),
("../../../../etc", "/etc"),
("/etc", "/etc")
]
windows_payload_list = [
("..\\..\\..\\..\\..\\..\\..\\..\\..\\openrasp_dir", ":/openrasp_dir"),
("file://c:\\openrasp_dir", "c:\\openrasp_dir")
]
mac_payload_list = [
("../../../../../../../../../../../../../../../../../../../../private/etc", "/private/etc"),
("../../../private/etc", "/private/etc"),
("/private/etc", "/private/etc")
]
server_os = rasp_result_ins.get_server_info()["os"]
if server_os == "Windows":
payload_list = windows_payload_list
elif server_os == "Mac":
payload_list = mac_payload_list
else:
payload_list = linux_payload_list
# 获取所有待测试参数
request_data_ins = self.new_request_data(rasp_result_ins)
test_params = self.mutant_helper.get_params_list(
request_data_ins, ["get", "post", "json", "headers", "cookies"])
for param in test_params:
if not request_data_ins.is_param_concat_in_hook("directory", param["value"].rstrip("/\\")):
continue
payload_seq = self.gen_payload_seq()
for payload in payload_list:
request_data_ins = self.new_request_data(
rasp_result_ins, payload_seq, payload[1])
request_data_ins.set_param(
param["type"], param["name"], payload[0])
request_data_list = [request_data_ins]
yield request_data_list
def check(self, request_data_list):
"""
请求结果检测
"""
request_data_ins = request_data_list[0]
feature = request_data_ins.get_payload_info()["feature"]
rasp_result_ins = request_data_ins.get_rasp_result()
if rasp_result_ins is None:
return None
if self.checker.check_concat_in_hook(rasp_result_ins, "directory", feature):
return "读取的目录可被用户输入控制"
else:
return None
|
[
"[email protected]"
] | |
cad384be9aede5c74227c0ca4d556d1ada8cbe9a
|
772e04b18f36fe1bffb05c16ef4eff3ba765fd13
|
/gcnvisualizer/test/test_visualizer.py
|
853d514babfbb1580b6492b0b4ad3a106332f9ae
|
[
"LicenseRef-scancode-other-permissive"
] |
permissive
|
clinfo/kGCN
|
3c74f552dd9d71d470a3173012b01733a1262688
|
32328d5a41e6ed7491b3edb705ff94658fc95d3f
|
refs/heads/master
| 2023-08-16T19:43:17.149381 | 2023-08-03T00:08:11 | 2023-08-03T00:08:11 | 194,075,235 | 110 | 38 |
NOASSERTION
| 2022-02-04T17:09:55 | 2019-06-27T10:31:57 |
Python
|
UTF-8
|
Python
| false | false | 503 |
py
|
import unittest
import numpy as np
import pytest
from gcnvisualizer import GCNVisualizer
def test_load_normal_pickle_file(multi_modal_profeat):
for filename in multi_modal_profeat:
g = GCNVisualizer(filename, loglevel='ERROR')
assert ['smiles', 'feature',
'adjacency', 'check_scores',
'feature_IG', 'adjacency_IG',
'profeat_IG', 'vector_modal'] == (list(g.ig_dict.keys()))
if __name__ == "__main__":
unittest.run()
|
[
"[email protected]"
] | |
182933ad1e32acc47eb2cfc12c855e4c86b85ade
|
e116a28a8e4d07bb4de1812fde957a38155eb6df
|
/polar_test2.py
|
0ee481b4a0480d2965c11c666d56aaca4fe2291a
|
[] |
no_license
|
gl-coding/EasyPyEcharts
|
5582ddf6be3158f13663778c1038767a87756216
|
f9dbe8ad7389a6e2629643c9b7af7b9dc3bfccd5
|
refs/heads/master
| 2020-09-29T20:48:46.260306 | 2019-12-10T12:52:24 | 2019-12-10T12:52:24 | 227,119,587 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 500 |
py
|
#encoding=utf-8
from pyecharts import Polar
radius =['周一', '周二', '周三', '周四', '周五', '周六', '周日']
polar =Polar("极坐标系-堆叠柱状图示例", width=1200, height=600)
polar.add("", [1, 2, 3, 4, 3, 5, 1], radius_data=radius, type='barAngle', is_stack=True)
polar.add("", [2, 4, 6, 1, 2, 3, 1], radius_data=radius, type='barAngle', is_stack=True)
polar.add("", [1, 2, 3, 4, 1, 2, 5], radius_data=radius, type='barAngle', is_stack=True)
polar.show_config()
polar.render()
|
[
"[email protected]"
] | |
bae46eca925d4eec064cfa40ac5ad479eccddd16
|
6a01a9287a4c23c7f11b7c5399cfb96bbe42eba8
|
/python/scripts/get_nearest_k_features.py
|
068a702adc3492255203e07630813f3fd49b6ade
|
[
"MIT"
] |
permissive
|
xguse/gmm-to-gff-transcripts-vs-snps
|
3c25bf2752aee76174d5dab92060fe7269caf99f
|
75337135ab8ff6d840af3cfccfe6404a06777a54
|
refs/heads/master
| 2021-01-19T01:50:33.473897 | 2016-08-02T20:31:18 | 2016-08-02T20:31:18 | 54,731,430 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,943 |
py
|
"""For each SNP file, produce a bed representing the nearest k gene or mapped transcript features and its distance from the SNP."""
import pybedtools as pbt
import pandas as pd
k_number = snakemake.params.k_number
snp_beds = snakemake.input.snp_beds
gene_model_subtracted = snakemake.input.gene_model_subtracted
gene_models = snakemake.input.gene_models
nearest_features_beds = snakemake.output.nearest_features_beds
snps_in_features = snakemake.output.snps_in_features
headers = ["SNP_chrom",
"SNP_start",
"SNP_end",
"feature_set_name",
"chrom",
"chromStart",
"chromEnd",
"name",
"score",
"strand",
"thickStart",
"thickEnd",
"itemRgb",
"blockCount",
"blockSizes",
"blockStarts",
"distance"
]
for snp_bed, nearest_bed, feature_hit_file in zip(snp_beds, nearest_features_beds, snps_in_features):
snp_bed = pbt.BedTool(snp_bed)
gene_model_subtracted_bed = pbt.BedTool(gene_model_subtracted)
gene_models_bed = pbt.BedTool(gene_models)
k_nearest = snp_bed.closest([gene_model_subtracted_bed.fn,
gene_models_bed.fn],
k=k_number,
names=['novel_mapped_tx', 'official_annotations'],
D='ref', # Include SIGNED distances from SNP based on the ref genome
t='all', # Return all members of a distance "tie"
mdb='each', # Return `k_number` of neighboors for each `names`
)
k_nearest.saveas(nearest_bed)
nearest_df = pd.read_csv(nearest_bed, sep="\t", names=headers)
nearest_df
in_features = nearest_df.query(""" abs(distance) <= 0 """)
in_features.to_excel(feature_hit_file, index=False)
|
[
"[email protected]"
] | |
ae5f6a56b1dd9225cfb080b788cdc31d7483c321
|
f3bd271bf00325881fb5b2533b9ef7f7448a75ec
|
/xcp2k/classes/_mc1.py
|
fa05bea2ae6ea09939c9d07048036355cd040bf2
|
[] |
no_license
|
obaica/xcp2k
|
7f99fc9d494859e16b9b0ea8e217b0493f4b2f59
|
6e15c2c95658f545102595dc1783f5e03a9e6916
|
refs/heads/master
| 2020-07-15T17:27:43.378835 | 2019-02-11T16:32:24 | 2019-02-11T16:32:24 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,305 |
py
|
from xcp2k.inputsection import InputSection
from _avbmc1 import _avbmc1
from _move_probabilities1 import _move_probabilities1
from _move_updates1 import _move_updates1
from _max_displacements1 import _max_displacements1
class _mc1(InputSection):
def __init__(self):
InputSection.__init__(self)
self.Nstep = None
self.Iprint = None
self.Nmoves = None
self.Nswapmoves = None
self.Lbias = None
self.Lstop = None
self.Ldiscrete = None
self.Rclus = None
self.Restart = None
self.Nvirial = None
self.Ensemble = None
self.Restart_file_name = None
self.Moves_file_name = None
self.Molecules_file_name = None
self.Coordinate_file_name = None
self.Energy_file_name = None
self.Data_file_name = None
self.Cell_file_name = None
self.Max_disp_file_name = None
self.Box2_file_name = None
self.Pressure = None
self.Temperature = None
self.Virial_temps = None
self.Discrete_step = None
self.Eta = None
self.Randomtoskip = None
self.AVBMC = _avbmc1()
self.MOVE_PROBABILITIES = _move_probabilities1()
self.MOVE_UPDATES = _move_updates1()
self.MAX_DISPLACEMENTS = _max_displacements1()
self._name = "MC"
self._keywords = {'Lstop': 'LSTOP', 'Nswapmoves': 'NSWAPMOVES', 'Lbias': 'LBIAS', 'Box2_file_name': 'BOX2_FILE_NAME', 'Nvirial': 'NVIRIAL', 'Ensemble': 'ENSEMBLE', 'Temperature': 'TEMPERATURE', 'Data_file_name': 'DATA_FILE_NAME', 'Pressure': 'PRESSURE', 'Restart': 'RESTART', 'Cell_file_name': 'CELL_FILE_NAME', 'Moves_file_name': 'MOVES_FILE_NAME', 'Iprint': 'IPRINT', 'Rclus': 'RCLUS', 'Eta': 'ETA', 'Molecules_file_name': 'MOLECULES_FILE_NAME', 'Virial_temps': 'VIRIAL_TEMPS', 'Randomtoskip': 'RANDOMTOSKIP', 'Max_disp_file_name': 'MAX_DISP_FILE_NAME', 'Restart_file_name': 'RESTART_FILE_NAME', 'Coordinate_file_name': 'COORDINATE_FILE_NAME', 'Nmoves': 'NMOVES', 'Discrete_step': 'DISCRETE_STEP', 'Energy_file_name': 'ENERGY_FILE_NAME', 'Ldiscrete': 'LDISCRETE', 'Nstep': 'NSTEP'}
self._subsections = {'AVBMC': 'AVBMC', 'MOVE_UPDATES': 'MOVE_UPDATES', 'MAX_DISPLACEMENTS': 'MAX_DISPLACEMENTS', 'MOVE_PROBABILITIES': 'MOVE_PROBABILITIES'}
|
[
"[email protected]"
] | |
57abb1d492da22ca8039ba1f34f17f15d3e3ae53
|
de626f1892619968efbaa22ea26079ee2269e799
|
/test/test_Master_dialog.py
|
02100573c7b18bcfb48fb78df5fb2f3d2c296df2
|
[] |
no_license
|
gerardoros/CartograficoQgisPlugin
|
7e8724cec0469d0494090b3557e9d4e967935121
|
844fa1052f435478e2e946099d7dbd6b1b97c311
|
refs/heads/master
| 2023-04-04T08:06:30.967894 | 2021-04-07T14:15:37 | 2021-04-07T14:15:37 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,479 |
py
|
# coding=utf-8
"""Dialog test.
.. note:: This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
"""
__author__ = 'Master'
__date__ = '2018-04-27'
__copyright__ = 'Copyright 2018, Master'
import unittest
from PyQt5.QtGui import QDialogButtonBox, QDialog
from Master_dialog import MasterDialog
from utilities import get_qgis_app
QGIS_APP = get_qgis_app()
class MasterDialogTest(unittest.TestCase):
"""Test dialog works."""
def setUp(self):
"""Runs before each test."""
self.dialog = MasterDialog(None)
def tearDown(self):
"""Runs after each test."""
self.dialog = None
def test_dialog_ok(self):
"""Test we can click OK."""
button = self.dialog.button_box.button(QDialogButtonBox.Ok)
button.click()
result = self.dialog.result()
self.assertEqual(result, QDialog.Accepted)
def test_dialog_cancel(self):
"""Test we can click cancel."""
button = self.dialog.button_box.button(QDialogButtonBox.Cancel)
button.click()
result = self.dialog.result()
self.assertEqual(result, QDialog.Rejected)
if __name__ == "__main__":
suite = unittest.makeSuite(MasterDialogTest)
runner = unittest.TextTestRunner(verbosity=2)
runner.run(suite)
|
[
"[email protected]"
] | |
20753a5eec6cbef1d2f88ca4dca223a00463326f
|
ef9dfb78938ecf500f2378a84eca8051255a836c
|
/star-travel/venv/bin/pycodestyle
|
93fb5f501bc9e2c6985163ea18cf05d0f8008eee
|
[] |
no_license
|
lesage20/djangoinitiation
|
cd11ed6a1cb16356075f4af3be8a93db31ba8c9f
|
818e3593d16c1fac5b2741605a4675d7833c18b2
|
refs/heads/master
| 2021-02-16T18:24:02.846455 | 2020-03-11T04:44:24 | 2020-03-11T04:44:24 | 245,033,422 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 247 |
#!/Users/nan/Desktop/star-travel/venv/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from pycodestyle import _main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(_main())
|
[
"[email protected]"
] | ||
9eb258cdc6b65a9919a795440b6beb5e84713442
|
bdd40ea113fdf2f04ef7d61a096a575322928d1d
|
/Rupesh/opencv/Opencv_Python/Probabilistic_hough_line_L4_6.py
|
dd657873e7607cacae08499456287fe764d0fb17
|
[] |
no_license
|
rupesh7399/rupesh
|
3eebf924d33790c29636ad59433e10444b74bc2f
|
9b746acf37ab357c147cdada1de5458c5fc64f53
|
refs/heads/master
| 2020-12-22T05:01:29.176696 | 2020-03-03T10:32:36 | 2020-03-03T10:32:36 | 202,111,967 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 666 |
py
|
import cv2
import numpy as np
# Grayscale and Canny Edges extracted
image = cv2.imread('images/soduku.jpg')
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
edges = cv2.Canny(gray, 100, 170, apertureSize = 3)
# Again we use the same rho and theta accuracies
# However, we specific a minimum vote (pts along line) of 100
# and Min line length of 5 pixels and max gap between lines of 10 pixels
lines = cv2.HoughLinesP(edges, 1, np.pi / 180, 200, 5, 10)
print(lines.shape)
for line in lines:
for x1,y1,x2,y2 in line:
cv2.line(image, (x1, y1), (x2, y2),(0, 255, 0), 3)
cv2.imshow('Probabilistic Hough Lines', image)
cv2.waitKey(0)
cv2.destroyAllWindows()
|
[
"[email protected]"
] | |
700d87cb74a2cd087f84aba3b9e72437ec8f0300
|
8dbb7d4a57a29550568ea9b005ade6542bb546fd
|
/baidu-spider/setting.py
|
6b7cf57e85f3e0f1b010ad7af1e0b45f29d2f094
|
[] |
no_license
|
kalicc/feapder_project
|
96e4b0d575d61967fff445d62267abe0d1b0d7af
|
19820eb7da8f9cec32a11bdcfc47232917743896
|
refs/heads/master
| 2023-08-05T00:44:36.822548 | 2021-09-24T10:02:53 | 2021-09-24T10:02:53 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 5,628 |
py
|
# -*- coding: utf-8 -*-
"""爬虫配置文件"""
# import os
# import sys
#
# MYSQL
MYSQL_IP = "localhost"
MYSQL_PORT = 3306
MYSQL_DB = "feapder"
MYSQL_USER_NAME = "feapder"
MYSQL_USER_PASS = "feapder123"
#
# # MONGODB
# MONGO_IP = os.getenv("MONGO_IP", "localhost")
# MONGO_PORT = int(os.getenv("MONGO_PORT", 27017))
# MONGO_DB = os.getenv("MONGO_DB")
# MONGO_USER_NAME = os.getenv("MONGO_USER_NAME")
# MONGO_USER_PASS = os.getenv("MONGO_USER_PASS")
#
# REDIS
# ip:port 多个可写为列表或者逗号隔开 如 ip1:port1,ip2:port2 或 ["ip1:port1", "ip2:port2"]
REDISDB_IP_PORTS = "localhost:6379"
# REDISDB_USER_PASS = os.getenv("REDISDB_USER_PASS")
# # 默认 0 到 15 共16个数据库
# REDISDB_DB = int(os.getenv("REDISDB_DB", 0))
# # 适用于redis哨兵模式
# REDISDB_SERVICE_NAME = os.getenv("REDISDB_SERVICE_NAME")
#
# # 数据入库的pipeline,可自定义,默认MysqlPipeline
# ITEM_PIPELINES = [
# "feapder.pipelines.mysql_pipeline.MysqlPipeline",
# # "feapder.pipelines.mongo_pipeline.MongoPipeline",
# ]
#
# # 爬虫相关
# # COLLECTOR
# COLLECTOR_SLEEP_TIME = 1 # 从任务队列中获取任务到内存队列的间隔
# COLLECTOR_TASK_COUNT = 10 # 每次获取任务数量
#
# # SPIDER
# SPIDER_THREAD_COUNT = 1 # 爬虫并发数
# SPIDER_SLEEP_TIME = 0 # 下载时间间隔 单位秒。 支持随机 如 SPIDER_SLEEP_TIME = [2, 5] 则间隔为 2~5秒之间的随机数,包含2和5
# SPIDER_TASK_COUNT = 1 # 每个parser从内存队列中获取任务的数量
# SPIDER_MAX_RETRY_TIMES = 100 # 每个请求最大重试次数
# AUTO_STOP_WHEN_SPIDER_DONE = True # 爬虫是否自动结束
#
# # 浏览器渲染
# WEBDRIVER = dict(
# pool_size=1, # 浏览器的数量
# load_images=True, # 是否加载图片
# user_agent=None, # 字符串 或 无参函数,返回值为user_agent
# proxy=None, # xxx.xxx.xxx.xxx:xxxx 或 无参函数,返回值为代理地址
# headless=False, # 是否为无头浏览器
# driver_type="CHROME", # CHROME、PHANTOMJS、FIREFOX
# timeout=30, # 请求超时时间
# window_size=(1024, 800), # 窗口大小
# executable_path=None, # 浏览器路径,默认为默认路径
# render_time=0, # 渲染时长,即打开网页等待指定时间后再获取源码
# custom_argument=["--ignore-certificate-errors"], # 自定义浏览器渲染参数
# )
#
# # 爬虫启动时,重新抓取失败的requests
# RETRY_FAILED_REQUESTS = False
# # 保存失败的request
# SAVE_FAILED_REQUEST = True
# # request防丢机制。(指定的REQUEST_LOST_TIMEOUT时间内request还没做完,会重新下发 重做)
# REQUEST_LOST_TIMEOUT = 600 # 10分钟
# # request网络请求超时时间
# REQUEST_TIMEOUT = 22 # 等待服务器响应的超时时间,浮点数,或(connect timeout, read timeout)元组
#
# # 下载缓存 利用redis缓存,但由于内存大小限制,所以建议仅供开发调试代码时使用,防止每次debug都需要网络请求
# RESPONSE_CACHED_ENABLE = False # 是否启用下载缓存 成本高的数据或容易变需求的数据,建议设置为True
# RESPONSE_CACHED_EXPIRE_TIME = 3600 # 缓存时间 秒
# RESPONSE_CACHED_USED = False # 是否使用缓存 补采数据时可设置为True
#
# # 设置代理
# PROXY_EXTRACT_API = None # 代理提取API ,返回的代理分割符为\r\n
# PROXY_ENABLE = True
#
# # 随机headers
# RANDOM_HEADERS = True
# # UserAgent类型 支持 'chrome', 'opera', 'firefox', 'internetexplorer', 'safari',若不指定则随机类型
# USER_AGENT_TYPE = "chrome"
# # 默认使用的浏览器头 RANDOM_HEADERS=True时不生效
# DEFAULT_USERAGENT = "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/73.0.3683.103 Safari/537.36"
# # requests 使用session
# USE_SESSION = False
#
# # 去重
# ITEM_FILTER_ENABLE = False # item 去重
# REQUEST_FILTER_ENABLE = False # request 去重
#
# # 报警 支持钉钉、企业微信、邮件
# # 钉钉报警
# DINGDING_WARNING_URL = "" # 钉钉机器人api
# DINGDING_WARNING_PHONE = "" # 报警人 支持列表,可指定多个
# # 邮件报警
# EMAIL_SENDER = "" # 发件人
# EMAIL_PASSWORD = "" # 授权码
# EMAIL_RECEIVER = "" # 收件人 支持列表,可指定多个
# EMAIL_SMTPSERVER = "smtp.163.com" # 邮件服务器 默认为163邮箱
# # 企业微信报警
# WECHAT_WARNING_URL = "" # 企业微信机器人api
# WECHAT_WARNING_PHONE = "" # 报警人 将会在群内@此人, 支持列表,可指定多人
# WECHAT_WARNING_ALL = False # 是否提示所有人, 默认为False
# # 时间间隔
# WARNING_INTERVAL = 3600 # 相同报警的报警时间间隔,防止刷屏; 0表示不去重
# WARNING_LEVEL = "DEBUG" # 报警级别, DEBUG / ERROR
# WARNING_FAILED_COUNT = 1000 # 任务失败数 超过WARNING_FAILED_COUNT则报警
#
# LOG_NAME = os.path.basename(os.getcwd())
# LOG_PATH = "log/%s.log" % LOG_NAME # log存储路径
# LOG_LEVEL = "DEBUG"
# LOG_COLOR = True # 是否带有颜色
# LOG_IS_WRITE_TO_CONSOLE = True # 是否打印到控制台
# LOG_IS_WRITE_TO_FILE = False # 是否写文件
# LOG_MODE = "w" # 写文件的模式
# LOG_MAX_BYTES = 10 * 1024 * 1024 # 每个日志文件的最大字节数
# LOG_BACKUP_COUNT = 20 # 日志文件保留数量
# LOG_ENCODING = "utf8" # 日志文件编码
# OTHERS_LOG_LEVAL = "ERROR" # 第三方库的log等级
#
# # 切换工作路径为当前项目路径
# project_path = os.path.abspath(os.path.dirname(__file__))
# os.chdir(project_path) # 切换工作路经
# sys.path.insert(0, project_path)
# print('当前工作路径为 ' + os.getcwd())
|
[
"[email protected]"
] | |
dbbdd80325b36ade3a92fc51452029f8d9e7b58e
|
d4ca03693383a5bc20bcdf7b7d552bba1d5467ff
|
/prepare_3comp.py
|
c3f7453dccf62501b6eac3430daf838f8eb00c62
|
[] |
no_license
|
samhaug/beamform_code
|
abaa1c723fec6143523b4bf8c05e7daa2655584d
|
3bb201356056c1ee91a141b057ff7c666171de2a
|
refs/heads/master
| 2023-01-19T22:15:02.272484 | 2020-11-24T23:05:47 | 2020-11-24T23:05:47 | 289,100,877 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 5,717 |
py
|
import obspy
from os.path import isdir
from subprocess import call
from obspy.taup import TauPyModel
import numpy as np
from matplotlib import pyplot as plt
model = TauPyModel(model='prem')
if not isdir("./z_comp"):
call("mkdir ./z_comp",shell=True)
if not isdir("./e_comp"):
call("mkdir ./e_comp",shell=True)
if not isdir("./n_comp"):
call("mkdir ./n_comp",shell=True)
print('reading')
z = obspy.read('data/*BHZ*')
n = obspy.read('data/*BHN*')
e = obspy.read('data/*BHE*')
print('read')
z_l = []
n_l = []
e_l = []
all_l = []
z.interpolate(6)
n.interpolate(6)
e.interpolate(6)
#z.filter('bandpass',freqmin=1/100.,freqmax=1./5,zerophase=True)
#n.filter('bandpass',freqmin=1/100.,freqmax=1./5,zerophase=True)
#e.filter('bandpass',freqmin=1/100.,freqmax=1./5,zerophase=True)
z.detrend()
n.detrend()
e.detrend()
# Trim data to numsamp samples. Remove shorter traces
numsamp=3882
z_netw=[]
z_stat=[]
z_loc=[]
for idx,tr in enumerate(z):
z[idx].data = z[idx].data[0:numsamp]
if len(z[idx].data) != numsamp:
z_stat.append(z[idx].stats.station)
z_netw.append(z[idx].stats.network)
z_loc.append(z[idx].stats.location)
for ii in range(0,len(z_netw)):
z.remove(z.select(station=z_stat[ii],network=z_netw[ii],location=z_loc[ii])[0])
n_netw=[]
n_stat=[]
n_loc=[]
for idx,tr in enumerate(n):
n[idx].data = n[idx].data[0:numsamp]
if len(n[idx].data) != numsamp:
n_stat.append(n[idx].stats.station)
n_netw.append(n[idx].stats.network)
n_loc.append(n[idx].stats.location)
for ii in range(0,len(n_netw)):
n.remove(n.select(station=n_stat[ii],network=n_netw[ii],location=n_loc[ii])[0])
e_netw=[]
e_stat=[]
e_loc=[]
for idx,tr in enumerate(e):
e[idx].data = e[idx].data[0:numsamp]
if len(tr.data) != numsamp:
e_stat.append(e[idx].stats.station)
e_netw.append(e[idx].stats.network)
e_loc.append(e[idx].stats.location)
for ii in range(0,len(e_netw)):
e.remove(e.select(station=e_stat[ii],network=e_netw[ii],location=e_loc[ii])[0])
#Remove duplicates
for tr in z:
name = "{}_{}_{}".format(tr.stats.network,tr.stats.station,tr.stats.location)
if name not in z_l:
z_l.append(name)
else:
z.remove(tr)
for tr in n:
name = "{}_{}_{}".format(tr.stats.network,tr.stats.station,tr.stats.location)
if name not in n_l:
n_l.append(name)
else:
n.remove(tr)
for tr in e:
name = "{}_{}_{}".format(tr.stats.network,tr.stats.station,tr.stats.location)
if name not in e_l:
e_l.append(name)
else:
e.remove(tr)
z_l = []
n_l = []
e_l = []
all_l = []
#Make list of each trace
for tr in z:
z_l.append("{}_{}_{}".format(tr.stats.network,tr.stats.station,tr.stats.location))
all_l.append("{}_{}_{}".format(tr.stats.network,tr.stats.station,tr.stats.location))
for tr in n:
n_l.append("{}_{}_{}".format(tr.stats.network,tr.stats.station,tr.stats.location))
all_l.append("{}_{}_{}".format(tr.stats.network,tr.stats.station,tr.stats.location))
for tr in e:
e_l.append("{}_{}_{}".format(tr.stats.network,tr.stats.station,tr.stats.location))
all_l.append("{}_{}_{}".format(tr.stats.network,tr.stats.station,tr.stats.location))
#Remove traces not common to all three components
for i in (set(all_l)-set(z_l)):
try:
for tr in n.select(network=i.split('_')[0],station=i.split('_')[1],location=i.split('_')[2]):
n.remove(tr)
except:
pass
try:
for tr in e.select(network=i.split('_')[0],station=i.split('_')[1],location=i.split('_')[2]):
e.remove(tr)
except:
continue
for i in (set(all_l)-set(n_l)):
try:
for tr in z.select(network=i.split('_')[0],station=i.split('_')[1],location=i.split('_')[2]):
z.remove(tr)
except:
pass
try:
for tr in e.select(network=i.split('_')[0],station=i.split('_')[1],location=i.split('_')[2]):
e.remove(tr)
except:
pass
for i in (set(all_l)-set(e_l)):
try:
for tr in n.select(network=i.split('_')[0],station=i.split('_')[1],location=i.split('_')[2]):
n.remove(tr)
except:
pass
try:
for tr in z.select(network=i.split('_')[0],station=i.split('_')[1],location=i.split('_')[2]):
z.remove(tr)
except:
pass
z.sort(['network','station','location'])
n.sort(['network','station','location'])
e.sort(['network','station','location'])
#print("Aligning on P")
#for idx,tr in enumerate(z):
# gcarc = tr.stats.sac['gcarc']
# if tr.stats.sac['evdp'] > 1000:
# tr.stats.sac['evdp'] *= 1/1000.
# h = tr.stats.sac['evdp']
# t = model.get_travel_times(source_depth_in_km=h,
# distance_in_degree=gcarc,
# phase_list=['ttp'])[0].time
# s = tr.stats.sampling_rate
# w = tr.data[int((t-20)*s):int((t+20)*s)]
# l = int(len(w)/2.)
# p1 = np.argmax(np.abs(w))
# z[idx].data = np.roll(z[idx].data,l-p1)
# e[idx].data = np.roll(e[idx].data,l-p1)
# n[idx].data = np.roll(n[idx].data,l-p1)
#z.differentiate()
#n.differentiate()
#e.differentiate()
for tr in z:
tr.write('z_comp/{}_{}_{}.sac'.format(tr.stats.network,tr.stats.station,tr.stats.location),format='SAC')
for tr in n:
if tr.stats.sac['evdp'] > 1000:
tr.stats.sac['evdp'] *= 1/1000.
tr.write('n_comp/{}_{}_{}.sac'.format(tr.stats.network,tr.stats.station,tr.stats.location),format='SAC')
for tr in e:
if tr.stats.sac['evdp'] > 1000:
tr.stats.sac['evdp'] *= 1/1000.
tr.write('e_comp/{}_{}_{}.sac'.format(tr.stats.network,tr.stats.station,tr.stats.location),format='SAC')
|
[
"[email protected]"
] | |
7de8fb97bfda1fbea1f83d3bc24dd88497e0a7b5
|
d2c4934325f5ddd567963e7bd2bdc0673f92bc40
|
/tests/model_control/detailed/transf_None/model_control_one_enabled_None_ConstantTrend_Seasonal_Minute_LSTM.py
|
aac22081d0e2dc667f76605a2ce93909efa6bbb6
|
[
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
jmabry/pyaf
|
797acdd585842474ff4ae1d9db5606877252d9b8
|
afbc15a851a2445a7824bf255af612dc429265af
|
refs/heads/master
| 2020-03-20T02:14:12.597970 | 2018-12-17T22:08:11 | 2018-12-17T22:08:11 | 137,104,552 | 0 | 0 |
BSD-3-Clause
| 2018-12-17T22:08:12 | 2018-06-12T17:15:43 |
Python
|
UTF-8
|
Python
| false | false | 163 |
py
|
import pyaf.tests.model_control.test_ozone_custom_models_enabled as testmod
testmod.build_model( ['None'] , ['ConstantTrend'] , ['Seasonal_Minute'] , ['LSTM'] );
|
[
"[email protected]"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.