blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 777
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 149
values | src_encoding
stringclasses 26
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 3
10.2M
| extension
stringclasses 188
values | content
stringlengths 3
10.2M
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
9b5706ba5278a195707b47a5d60c00805fc6e7e5
|
52b5773617a1b972a905de4d692540d26ff74926
|
/.history/valid_20200616204038.py
|
910525208285c836bf6cb5ec61a6cd7e8d587d35
|
[] |
no_license
|
MaryanneNjeri/pythonModules
|
56f54bf098ae58ea069bf33f11ae94fa8eedcabc
|
f4e56b1e4dda2349267af634a46f6b9df6686020
|
refs/heads/master
| 2022-12-16T02:59:19.896129 | 2020-09-11T12:05:22 | 2020-09-11T12:05:22 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 405 |
py
|
# Ipv4 --> 4 decimal numbers,between 0 to 255
# leading zero's is invalid
# check whethere its a digit between 0 to 255
def valid(str):
address = str.split(".")
numbers = range(0,256)
for a in address:
if int(a) in numbers:
if len(a) == 2 and a[0] == "0":
return False
else:
print(address)
valid("172.16.254.02")
|
[
"[email protected]"
] | |
08087af6a1ab27fa92e29b9cc8a336473af168f4
|
8ccc03e848cd180186fec52179740a6007875c32
|
/Control/main.py
|
239ad03da3601da79bf3dd294a73c23e148fc0f1
|
[] |
no_license
|
ManiacalLabs/Laz-A-Sketch
|
013166f06ccd63a7237608eec83dbc0a789ebe0f
|
2901aecc09b1e4f34e982b59016269ed69950e1d
|
refs/heads/master
| 2020-07-05T09:15:23.999032 | 2019-09-21T18:31:14 | 2019-09-21T18:31:14 | 202,604,166 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,869 |
py
|
from com import Encoder
from grbl import grbl
from time import sleep
# constrain to specific range
def clamp(n, minn, maxn):
return max(min(maxn, n), minn)
# round to a specific increment, such as 0.25
def inc_round(v, inc):
return round(v/inc)*inc
class Control(object):
def __init__(self, g, enc, spm, inc, power, speed, init, size):
self.grbl = g
self.enc = enc
self.spm = spm
self.inc = inc
self.power = power
self.speed = speed
self.init = init
self._x, self._y = 0,0 # internal use "high res" values
self.x, self.y = 0,0 # values constrained to specific increments
self.grbl.unlock()
self.cfg = self.grbl.get_config()
# self.max_x, self.max_y = 130,130
if size:
self.max_x, self.max_y = size[0], size[1]
else:
self.max_x = self.cfg['$130']
self.max_y = self.cfg['$131']
self.grbl.send(self.init)
self.set_speed(self.speed)
self.set_power(self.power)
def home(self):
print('Homing...')
self.x, self.y, _ = self.grbl.home()
self._x, self._y = self.x, self.y
print('Homing complete')
def set_speed(self, speed):
self.speed = speed
self.grbl.send('F{}'.format(self.speed))
def set_power(self, power):
self.power = power
self.grbl.send('S{}'.format(1000*self.power))
def check(self):
# store previous values
lx, ly = self.x, self.y
# read encoder deltas
dx, dy = self.enc.read()
# update and constrain internal values
self._x += (dx / self.spm)
self._y += (dy / self.spm)
self._x = clamp(self._x, 0, self.max_x)
self._y = clamp(self._y, 0, self.max_y)
# round to configured increment
self.x = inc_round(self._x, self.inc)
self.y = inc_round(self._y, self.inc)
return (self.x != lx or self.y != ly)
def move(self):
cmd = 'G1 X{0:.3f} Y{1:.3f}'.format(self.x, self.y)
self.grbl.send(cmd)
MACHINE_CFG = {
"size": None, # X,Y dimensions in mm, None to autodetect
"spm": 100, # encoder steps per mm movement
"inc": 0.01, # constrain move values to this increment
"power": 0.05, # default power level (0.0 - 1.0)
"speed": 5000, # default movement speed,
"init": "G90 G21 G54 M4" # startup gcode
}
def main():
g = grbl()
enc = Encoder()
con = Control(g, enc, **MACHINE_CFG)
con.home()
while True:
if con.check():
con.move()
# print('{0:.2f},{1:.2f}'.format(con.x, con.y))
# sleep(0.05)
if __name__ == '__main__':
main()
|
[
"[email protected]"
] | |
7455209477479b379ca339fc7009bf72424ba4ab
|
52c8f780d1b2d6086b0c9e70e4ddfbcba8a8d97a
|
/sam-app/tests/unit/test_acc_FanManager_with_driver.py
|
e8aa91dad46f301dcac03dc84b2b99e9da9cc77b
|
[] |
no_license
|
stevezieglerva/lutils
|
75089dec093e3b0377fe6e333844daa6a923acbd
|
d28fb8269e9dbc10a01b48761b4c706145d45cd1
|
refs/heads/master
| 2023-07-27T18:17:08.026448 | 2021-09-17T15:33:50 | 2021-09-17T15:33:50 | 276,093,010 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 7,593 |
py
|
import inspect
import json
import os
import sys
import boto3
from moto import mock_dynamodb2
currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
parentdir = os.path.dirname(currentdir)
parentdir = os.path.dirname(parentdir) + "/common_layer_hex/python"
sys.path.insert(0, parentdir)
currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
parentdir = os.path.dirname(currentdir)
sys.path.insert(0, parentdir)
print("Updated path:")
print(json.dumps(sys.path, indent=3))
import unittest
from unittest import mock
from common_layer_hex.python.domain.FanManager import *
from FanManagerTestDriver import *
from common_layer_hex.python.infrastructure.repository.InMemoryRepository import (
InMemoryRepository,
)
from common_layer_hex.python.infrastructure.notifications.TestNotifier import (
TestNotifier,
)
from common_layer_hex.python.domain.ProcessDTO import *
from common_layer_hex.python.domain.TaskDTO import *
from common_layer_hex.python.infrastructure.repository.DynamoDB import DynamoDB
from common_layer_hex.python.domain.FanEventDTO import FanEventDTO
class FanManagerDriverUnitTests(unittest.TestCase):
@mock_dynamodb2
def test_start_process__given_valid_inputs__then_process_and_task_in_repo(self):
# Arrange
driver = FanManagerTestDriver()
tasks = driver.create_task_array(
[
{"task_name": "task 01", "message": {"action": "go"}},
{"task_name": "task 02", "message": {"action": "save"}},
]
)
# Act
results = driver.when_start_process("fan manager test", tasks)
# Assert
driver.then_process_in_repo(results.updated_process.process_id)
driver.then_count_of_tasks_in_status(results.updated_process, "fan_out", 2)
driver.then_tasks_linked_to_process(
results.updated_process, results.updated_tasks[0]
)
driver.then_event_created_for(
results.event_notifications[0], EVENT_PROCESS_STARTED
)
self.assertEqual(results.updated_process.information, "special info")
@mock_dynamodb2
def test_fan_out__given_newly_created_tasks__then_tasks_status_changed_and_notifications_sent(
self,
):
# Arrange
driver = FanManagerTestDriver()
tasks = driver.create_task_array(
[
{"task_name": "task 01", "message": {"action": "go"}},
{"task_name": "task 02", "message": {"action": "save"}},
]
)
results = driver.when_start_process("fan manager test", tasks)
process = results.updated_process
# Act
results = driver.when_fan_out(results.updated_tasks)
print(f"\n\n{results}")
# Assert
driver.then_count_of_tasks_in_status(process, "created", 2)
driver.then_event_created_for(
results.event_notifications[0], EVENT_TASK_CREATED
)
@mock_dynamodb2
def test_complete_task__given_some_tasks_open__then_tasks_status_changed_and_process_progress_set(
self,
):
# Arrange
driver = FanManagerTestDriver()
tasks = driver.create_task_array(
[
{"task_name": "task 01", "message": {"action": "go"}},
{"task_name": "task 02", "message": {"action": "save"}},
]
)
results = driver.when_start_process("fan manager test", tasks)
results = driver.when_fan_out(results.updated_tasks)
second_updated_task = results.updated_tasks[1]
# Act
results = driver.when_complete_task(second_updated_task)
print(results)
# Assert
driver.then_count_of_tasks_in_status(
results.updated_process,
"completed",
1,
)
driver.then_progress_is(results.updated_process.progress, 0.5)
driver.then_event_created_for(
results.event_notifications[0], EVENT_TASK_COMPLETED
)
self.assertEqual(results.updated_process.information, "special info")
@mock_dynamodb2
def test_complete_task__given_all_tasks_completed__then_tasks_status_changed_and_process_progress_set(
self,
):
# Arrange
driver = FanManagerTestDriver()
tasks = driver.create_task_array(
[
{"task_name": "task 01", "message": {"action": "go"}},
{"task_name": "task 02", "message": {"action": "save"}},
]
)
results = driver.when_start_process("fan manager test", tasks)
process = results.updated_process
saved_tasks = results.updated_tasks
results = driver.when_fan_out(saved_tasks)
# Act
results = driver.when_complete_task(saved_tasks[0])
results = driver.when_complete_task(saved_tasks[1])
print(results)
# Assert
driver.then_count_of_tasks_in_status(
results.updated_process,
"completed",
2,
)
driver.then_progress_is(results.updated_process.progress, 1)
driver.then_event_created_for(
results.event_notifications[0], EVENT_TASK_COMPLETED
)
@mock_dynamodb2
def test_complete_process__given_all_tasks_completed__then_tasks_status_changed_and_process_progress_set(
self,
):
# Arrange
driver = FanManagerTestDriver()
tasks = driver.create_task_array(
[
{"task_name": "task 01", "message": {"action": "go"}},
{"task_name": "task 02", "message": {"action": "save"}},
]
)
results = driver.when_start_process("fan manager test", tasks)
saved_tasks = results.updated_tasks
results = driver.when_fan_out(saved_tasks)
results = driver.when_complete_task(saved_tasks[0])
results = driver.when_complete_task(saved_tasks[1])
# Act
results = driver.when_complete_process_if_needed(results.updated_process)
# Assert
driver.then_count_of_tasks_in_status(
results.updated_process,
"completed",
2,
)
driver.then_progress_is(results.updated_process.progress, 1)
driver.then_event_created_for(
results.event_notifications[0], EVENT_PROCESS_COMPLETED
)
@mock_dynamodb2
def test_complete_proceess__given_some_tasks_open__then_progress_not_1_and_process_complete_event_not_sent(
self,
):
# Arrange
driver = FanManagerTestDriver()
tasks = driver.create_task_array(
[
{"task_name": "task 01", "message": {"action": "go"}},
{"task_name": "task 02", "message": {"action": "save"}},
]
)
results = driver.when_start_process("fan manager test", tasks)
process = results.updated_process
saved_tasks = results.updated_tasks
results = driver.when_fan_out(saved_tasks)
results = driver.when_complete_task(saved_tasks[0])
results = driver.when_complete_task(saved_tasks[1])
# Act
results = driver.when_complete_process_if_needed(process)
# Assert
driver.then_count_of_tasks_in_status(
results.updated_process,
"completed",
2,
)
driver.then_progress_is(results.updated_process.progress, 1)
driver.then_event_created_for(
results.event_notifications[0], EVENT_PROCESS_COMPLETED
)
|
[
"[email protected]"
] | |
79d4842cf8368b7277beb304cd01456d2b9ee061
|
f6c6e0ebc18b7b1a28c23367f62c960e86194c88
|
/fileIO/hdf5/nexus_tools.py
|
421d6569c4932aaca6baeea4dd1b1d2bf5521445
|
[] |
no_license
|
TheGrim1/python_work
|
9316d6fbb71a4be9bd901f104e939949dfd91174
|
5b34277aed4c06b62276644160e0aa97a4260233
|
refs/heads/master
| 2021-01-11T13:54:54.366575 | 2019-03-12T12:38:39 | 2019-03-12T12:38:39 | 94,876,671 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,053 |
py
|
import sys, os
import h5py
import numpy as np
from nexusformat.nexus import *
import datetime
# local import for testing:
sys.path.append(os.path.abspath("/data/id13/inhouse2/AJ/skript"))
from fileIO.hdf5.open_h5 import open_h5
import time
def createlink(f,dest,linkdir,linkname,soft=True):
"""
Args:
f (h5py.File|str): hdf5 file
dest (str): destination for the link
linkdir (str): link directory
linkname (str): link name
adapted from spectrocrunch (woutdenolf)
"""
bclose = False
if isinstance(f,h5py.File) or isinstance(f,h5py.Group):
hdf5FileObject = f
elif isinstance(f,str):
hdf5FileObject = h5py.File(f)
bclose = True
else:
raise ValueError("The hdf5 file must be either a string or an hdf5 file object.")
if dest in hdf5FileObject:
if soft:
# Remove the link if it exists
if linkdir in hdf5FileObject:
if linkname in hdf5FileObject[linkdir]:
hdf5FileObject[linkdir].id.unlink(linkname)
# Create the link
hdf5FileObject[linkdir][linkname] = h5py.SoftLink(dest)
else:
b = True
if linkdir in hdf5FileObject:
if linkname in hdf5FileObject[linkdir]:
hdf5FileObject[linkdir][linkname].path = f[dest]
b = False
if b:
hdf5FileObject[linkdir][linkname] = f[dest]
if bclose:
hdf5FileObject.close()
def timestamp(nx_f = None):
'''
timestamps the passed nexus file, returns 1 if succesfull, -1 else
'''
if type(nx_f) == h5py._hl.files.File or type(nx_f) == NXroot:
timestamp = "T".join(str(datetime.datetime.now()).split())
if 'file_time' in list(nx_f.attrs.keys()):
nx_f.attrs['file_update_time'] = timestamp
else:
nx_f.attrs['file_time'] = timestamp
test = 1
else:
test = -1
return test
def find_dataset_path(nx_g, dataset_name):
'''
returns the path to dataset_name within the groups in nx_g.
kind of like to find --maxdepth=1
'''
dataset_path = 'did not find a valid path'
for key in list(nx_g.keys()):
for dataset in nx_g[key]:
if dataset.name == dataset_name:
data_set.path = key + '/' + dataset_name
return dataset_path
def id13_default_units(name):
angles = ['Theta',
'Rot1',
'Rot2',
'Rot3']
piezo = ['nnp1',
'nnp2',
'nnp3']
time = ['time',
'exp']
meter = ['PixelSize1',
'PixelSize2',
'Distance',
'Poni1',
'Poni2',
'Wavelength']
if name in angles:
units = 'degrees'
elif name in meter:
units = 'm'
elif name in piezo:
units = 'um'
elif name in time:
units = 's'
else:
units = 'mm'
return units
|
[
"[email protected]"
] | |
1f01193d2bfd5c64c2531378677587ae85d761bf
|
8aa1b94626402c0c614128d6061edb771dad05cf
|
/qt/qt03/qt04_main.py
|
d39ff9264bb6cdfeb174a973c92f7e9ebe2cd09e
|
[] |
no_license
|
netfj/Project_Stu02
|
31e76c1b656ee74c54cae2185821dec7ccf50401
|
afc1b26b7c586fd6979ab574c7d357a6b9ef4d29
|
refs/heads/master
| 2023-03-13T22:24:40.364167 | 2021-02-23T09:53:31 | 2021-02-23T09:53:31 | 341,506,093 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 896 |
py
|
#coding:utf-8
"""
@info:
@author:NetFj @software:PyCharm @file:qt04_main.py @time:2018/11/19.19:10
"""
import sys
from PyQt5 import QtWidgets,QtCore,QtGui
from PyQt5.QtWidgets import *
from PyQt5.QtGui import *
from PyQt5.QtCore import *
from qt04_lineEdit import Ui_Form
class myForm(QWidget,Ui_Form):
def __init__(self):
super().__init__()
self.setupUi(self)
action = QAction(self)
action.setIcon(QIcon('Warning.ico'))
action.triggered.connect(self.Check)
self.lineEdit.addAction(action, QLineEdit.TrailingPosition)
regx = QRegExp("^[a-zA-Z][0-9A-Za-z]{14}$")
validator = QRegExpValidator(regx, self.lineEdit_4)
self.lineEdit_4.setValidator(validator)
def Check(self):
print('你输入了:',self.lineEdit.text())
app = QApplication(sys.argv)
myshow = myForm()
myshow.show()
sys.exit(app.exec_())
|
[
"[email protected]"
] | |
e0254a9e2cd5d791b1204e4fbb4bb1b67eaa4c7d
|
f88f900c0384f6da82eeb749371ad44115527700
|
/course-book/09-matching/0908-brisk.py
|
cd57fd239de27d3da4c9d0d75da5fa6d0aa2061a
|
[] |
no_license
|
aaron-kr/learning-opencv
|
eff382e8f0c822400f765451d57b192a63cd1b74
|
158239f0140569aec519fc1fbf255c54ef2567d2
|
refs/heads/main
| 2023-08-21T11:02:49.775425 | 2021-10-27T00:04:01 | 2021-10-27T00:04:01 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,671 |
py
|
# 0908.py
# BRISK = Binary Robust Invariant Scalable Keypoints
import cv2
import numpy as np
#1
def distance(f1,f2):
x1,y1 = f1.pt
x2,y2 = f2.pt
return np.sqrt((x2-x1) ** 2 + (y2-y1) ** 2)
def filteringByDistance(kp, distE = 0.5):
size = len(kp)
mask = np.arange(1, size + 1).astype(np.bool8) # all True
for i, f1 in enumerate(kp):
if not mask[i]:
continue
else: # True
for j, f2 in enumerate(kp):
if i == j:
continue
if distance(f1,f2) < distE:
mask[j] = False
np_kp = np.array(kp)
return list(np_kp[mask])
#2
src = cv2.imread('../../img/chessboard.jpg')
gray = cv2.cvtColor(src, cv2.COLOR_BGR2GRAY)
gray = cv2.GaussianBlur(gray, (5,5), 0.0)
briskF = cv2.BRISK_create()
kp = briskF.detect(gray)
print('len(kp) = ', len(kp))
dst = cv2.drawKeypoints(gray, kp, None, color = (0,0,255))
cv2.imshow('dst', dst)
#3
kp = sorted(kp, key = lambda f: f.response, reverse = True)
filtered_kp = list(filter(lambda f: f.response > 50, kp))
filtered_kp = filteringByDistance(kp, 10)
print('len(filtered_kp) = ', len(filtered_kp))
kp, des = briskF.compute(gray, filtered_kp)
print('des.shape = ', des.shape)
print('des = ', des)
#4
dst2 = cv2.drawKeypoints(gray, filtered_kp, None, color = (0,0,255))
for f in filtered_kp:
x,y = f.pt
size = f.size
rect = ((x,y), (size,size), f.angle)
box = cv2.boxPoints(rect).astype(np.int32)
cv2.polylines(dst2, [box], True, (0,255,0), 2)
cv2.circle(dst2, (round(x), round(y)), round(f.size / 2), (255,0,0), 2)
cv2.imshow('dst2', dst2)
cv2.waitKey(0)
cv2.destroyAllWindows()
|
[
"[email protected]"
] | |
d9dd5624a3b479ff9480ceb782b0bcd58623699f
|
6b2a8dd202fdce77c971c412717e305e1caaac51
|
/solutions_5753053697277952_1/Python/Ifni/A.py
|
aec0465a73baa41598ab32da433f7e0c9aada43c
|
[] |
no_license
|
alexandraback/datacollection
|
0bc67a9ace00abbc843f4912562f3a064992e0e9
|
076a7bc7693f3abf07bfdbdac838cb4ef65ccfcf
|
refs/heads/master
| 2021-01-24T18:27:24.417992 | 2017-05-23T09:23:38 | 2017-05-23T09:23:38 | 84,313,442 | 2 | 4 | null | null | null | null |
UTF-8
|
Python
| false | false | 889 |
py
|
import math
import numpy as numpy
inp=open('A-large.in', 'r')
out=open("A-large.out", 'w')
T=int(inp.readline())
for index in range(T):
N=int(inp.readline())
temp = [int(x) for x in (inp.readline()).split()]
senate=[];
total=0;
for j in range(N):
total=total+temp[j]
senate.append((temp[j],j))
senate.sort(key=lambda tup: tup[0])
result=""
while(total!=0):
temp=senate.pop()
result=result+chr(ord('A')+temp[1]);
temp=(temp[0]-1, temp[1]); total=total-1;
if senate[0][0]>total/2:
result=result+chr(ord('A')+senate[0][1])+" ";
if senate[0][0]==1:
senate.pop(); total=total-1; N=N-1;
else:
senate[0]=(senate[0][0]-1, senate[0][1]); total=total-1;
else:
result=result+" ";
if temp[0]!=0:
senate.append(temp); senate.sort(key=lambda tup: tup[0])
else:
N=N-1
out.write('Case #{}: {}\n'.format(index+1, result))
inp.close()
out.close()
|
[
"[email protected]"
] | |
92eacd0c0e3aef83e67904d47744a212b7061059
|
d6277540df5076aad71a0d955bca11bdc8950f7e
|
/evaluation/__init__.py
|
85d8569eb1f59a6f63f98a0c11eac841f54dd006
|
[
"Apache-2.0"
] |
permissive
|
MILVLG/bottom-up-attention.pytorch
|
c9cfc43c594d7447190f70e80b5a6a06a1613895
|
4dbce869ad17117ca9f1df55bc5604cdbcd47f59
|
refs/heads/master
| 2022-05-02T14:34:23.723685 | 2022-04-07T13:37:56 | 2022-04-07T13:37:56 | 253,174,827 | 286 | 75 |
Apache-2.0
| 2022-04-07T13:37:56 | 2020-04-05T06:59:16 |
Jupyter Notebook
|
UTF-8
|
Python
| false | false | 38 |
py
|
from .vg_evaluation import VGEvaluator
|
[
"[email protected]"
] | |
2d91a0f67927ad7abc1250f72dccb9e6c541f62d
|
d2e80a7f2d93e9a38f37e70e12ff564986e76ede
|
/Python-cookbook-2nd/cb2_18/cb2_18_13_exm_1.py
|
9820f9bf01fa89bddb018133edef453aab07ff29
|
[] |
no_license
|
mahavivo/Python
|
ceff3d173948df241b4a1de5249fd1c82637a765
|
42d2ade2d47917ece0759ad83153baba1119cfa1
|
refs/heads/master
| 2020-05-21T10:01:31.076383 | 2018-02-04T13:35:07 | 2018-02-04T13:35:07 | 54,322,949 | 5 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 111 |
py
|
probability = 0.333
n, d = farey(probability, 100)
print "Odds are %d : %d" % (n, d-n)
# emits: Odds are 1 : 2
|
[
"[email protected]"
] | |
bb6314173e62dd8b39229874f63a97bf93583b80
|
8a780cb47eac9da046bdb5d6917f97a086887603
|
/problems/sum_of_square_numbers/solution.py
|
6f6f83cfeba7a9d7f3b7efee88d11a9bfb0df65a
|
[] |
no_license
|
dengl11/Leetcode
|
d16315bc98842922569a5526d71b7fd0609ee9fb
|
43a5e436b6ec8950c6952554329ae0314430afea
|
refs/heads/master
| 2022-12-20T03:15:30.993739 | 2020-09-05T01:04:08 | 2020-09-05T01:04:08 | 279,178,665 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 431 |
py
|
from math import sqrt
class Solution(object):
def judgeSquareSum(self, c):
"""
:type c: int
:rtype: bool
"""
if c < 0: return False
i = 0
j = int(sqrt(c))
while i <= j:
curr = i*i + j *j
if curr == c: return True
if curr < c:
i += 1
else:
j -= 1
return False
|
[
"[email protected]"
] | |
bc74bc20b8ccdcdc3a3d27ba681bafa08fb5d157
|
51aeee42378b72c570ed61264e25075c20884b44
|
/platzigram_api/users/permissions/users.py
|
9dac3c49607e04a6306b2c3d568565be22e1acfc
|
[
"MIT"
] |
permissive
|
ChekeGT/Platzigram-Api
|
ee3ac0d04194e70d84b27d39585a2c2341550025
|
0ab05f1bb325b02563aead2e885194e274013150
|
refs/heads/master
| 2022-06-03T03:28:16.312898 | 2019-07-16T21:07:36 | 2019-07-16T21:07:36 | 182,161,789 | 0 | 0 |
MIT
| 2022-05-25T01:41:31 | 2019-04-18T21:48:00 |
Python
|
UTF-8
|
Python
| false | false | 522 |
py
|
"""User model related permissions."""
# Django REST Framework
from rest_framework.permissions import BasePermission
class IsAccountOwner(BasePermission):
"""Permission that allows
a user to access a view only if the requesting
user matches with the user object
"""
message = 'You are not the account owner.'
def has_object_permission(self, request, view, user):
"""Returns if the requesting user matches with the user being used by the view."""
return request.user == user
|
[
"[email protected]"
] | |
a2ee91183a6675595d74021c02f4c73e1b8d99fc
|
131caeecc070839555b95382fe9c6ea77a618dce
|
/.history/Classiles/ice_cream_truck_20210615181707.py
|
7fea1cea58b970f58a182a32eba15b64e1fabacd
|
[
"Unlicense"
] |
permissive
|
minefarmer/Coding101-OOP
|
f128e34c95f5362b3d9a53bbac3d862c3f256263
|
d5655977559e3bd1acf6a4f185a6121cc3b05ce4
|
refs/heads/main
| 2023-05-22T18:42:37.769345 | 2021-06-18T00:28:06 | 2021-06-18T00:28:06 | 376,620,545 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 74 |
py
|
class Icecream:
def __init__(self):
self.scoops = 0
|
[
"[email protected]"
] | |
4b25dd4310919c082d27c8ddf76559522a033981
|
d400c32010a414a2f536c5c0a3490c8b8e2e9d5a
|
/modules/m16e/i18n/pt_pt/widgets/zip_validator.py
|
99374e1459daa4da8474d3b5dc41aa96697bb3c9
|
[
"LicenseRef-scancode-public-domain"
] |
permissive
|
CarlosCorreiaM16e/chirico_cms
|
3e521eae8f38b732497a2b808950c6a534e69d4f
|
73897cbddb230630e13f22333b9094d0a047acb3
|
refs/heads/master
| 2020-12-30T07:59:04.100330 | 2020-05-02T12:26:58 | 2020-05-02T12:26:58 | 238,917,321 | 2 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,078 |
py
|
# -*- coding: utf-8 -*-
import sys
import traceback
from gluon import current
import m16e.term as term
class is_valid_zip( object ):
def __init__( self,
allowBlank=False,
error_message=None ):
self.allowBlank = allowBlank
self.error_message = error_message
def __call__( self, value ):
return self.validate( value )
def formatter( self, value ):
return value
def get_county_zip_code( self, value ):
db = current.db
T = current.T
from m16e.db import db_tables
czc_model = db_tables.get_table_model( 'county_zip_code', db=db )
parts = value.split( '-' )
if len( parts ) < 2:
return None
p1 = parts[0].strip()
p2 = parts[1].strip()
if len( p1 ) == 4 and len( p2 ) == 3:
q_sql = (db.county_zip_code.zip_part_1 == p1)
q_sql &= (db.county_zip_code.zip_part_2 == p2)
czc = czc_model.select( q_sql ).first()
return czc
return None
def validate( self, value ):
db = current.db
T = current.T
try:
# term.printLog( 'zip: %s' % ( repr( value ) ) )
valid = False
blank = not value
if self.allowBlank and blank:
return ( value, None )
if value:
czc = self.get_county_zip_code( value )
if czc:
valid = True
# term.printLog( 'valid: %s' % ( repr( valid ) ) )
if valid:
msg = None
else:
msg = self.error_message
if not msg:
msg = T( 'must be a valid zip code (ex.: 1000-001)' )
# term.printDebug( 'msg: %s' % repr( msg ) )
return ( value, msg )
except Exception, err:
t, v, tb = sys.exc_info()
traceback.print_exception( t, v, tb )
term.printLog( 'error: %s' % ( str( err ) ) )
return ( value, self.error_message )
|
[
"[email protected]"
] | |
c0a02b8a9cfd9ac990ca398a1120393672a231dc
|
f60b0c051d8ba5088dc4246679b870f577646bb0
|
/59 Wed, 21 Mar 2012 23:58:41.py
|
3636882413236459b26b8f671227018953648249
|
[] |
no_license
|
joopeed/lp1
|
bbd11fe7749356828a16fc45703e010db5d35464
|
117bf769a048ec1dff53f779b26c9e7adec052ba
|
refs/heads/master
| 2021-01-02T22:50:08.600553 | 2014-04-03T21:15:40 | 2014-04-03T21:15:40 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 323 |
py
|
# Date: Wed, 21 Mar 2012 23:58:41 +0000
# Question 59
# JOAO PEDRO FERREIRA 21211940
meses = ["jan","fev","mar","abr",=
"mai","jun","jul","ago","set&q=
uot;,"out","nov","dez"]
n = map(float,raw_input().split())
m = map(float,raw_input().split())
for i in range(12):
if n[i]-m[i]<0:
print meses[i], n[i]-m[i]
|
[
"[email protected]"
] | |
32fe7a8ea69ff3766fddde1db073e44f542f5944
|
9e5f89954fae8ac705d3e721d82b7b72d9fbcbaa
|
/4. ARREGLO/Diez numeros ordenados de forma ascendente.py
|
abd54618f407fda3a01e54660c8bb74a5a277bfa
|
[] |
no_license
|
Diego-18/python-algorithmic-exercises
|
dda950d0fcabd25244ce3ecfc1296d06a7701f5f
|
583907b82c3549a4bff7718d23aa3e0d7be2e4a3
|
refs/heads/main
| 2023-08-12T02:28:49.425894 | 2021-09-26T05:14:07 | 2021-09-26T05:14:07 | 410,439,661 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 842 |
py
|
#!/usr/bin/env python
#-*- coding: UTF8 -*-
################################
# Elaborado por: DIEGO CHAVEZ #
################################
#Realice un algoritmo que cargue 10 numeros a una lista, y ordenelos de menor a mayor
def fILeerI(psTexto):
lfVar=int(raw_input(psTexto))
return lfVar
def fsLeerS(psTexto):
lsVar=raw_input(psTexto)
return lsVar
def faBurbuja (paArreglo): #Ordena de manera ascendente la lista
liTamano=len(paArreglo)
for liX in range (0,liTamano):
for liJ in range (liX, liTamano-1):
if (paArreglo[liX]>paArreglo[liJ+1]):
liAux=paArreglo[liX]
paArreglo[liX]=paArreglo[liJ+1]
paArreglo[liJ+1]=liAux
return paArreglo
laLista=[]
for liN in range (1,11):
liNum=fILeerI("Introduzca el número: ")
laLista.append(liNum)
LaLista=faBurbuja(laLista)
print LaLista
#UPTP S1-T1
|
[
"[email protected]"
] | |
10be9709f131c693640209830d55dfdd6e70b369
|
225469cfb5507fba6770b91e2195b65e3f5ec066
|
/bin/django-admin
|
180e615386c07b80c6f55dc0a44b19eeaa940414
|
[] |
no_license
|
Michkail/portrapit-store
|
9bc12a063f5c2810bdc85fb5daf189f14300a849
|
c55e5d9879369fe195a1d22a3956ac17d9a71904
|
refs/heads/main
| 2023-02-15T06:17:56.208851 | 2021-01-15T15:57:11 | 2021-01-15T15:57:11 | 324,973,884 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 298 |
#!/home/michkail/Documents/portrapit-store/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from django.core.management import execute_from_command_line
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(execute_from_command_line())
|
[
"[email protected]"
] | ||
e707ea93d81b8d0c6720ffcee498fad5390d9260
|
f2c7b1befad2e01129a899426d3006d0298aedd6
|
/apps/transactions/api/serializer.py
|
2db12993f21f85da3c2e7aa29738f439799ed847
|
[
"MIT"
] |
permissive
|
AlcindoSchleder/flask_validator
|
9821a71660699976ebf161f7e16a809fc08b58c4
|
5d4b73b755ee434daab400a4d69d05237965334e
|
refs/heads/main
| 2023-02-26T23:27:16.246461 | 2021-02-05T02:32:28 | 2021-02-05T02:32:28 | 335,714,960 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 743 |
py
|
from datetime import datetime
from rules.configure_app import ma
from apps.transactions.model import Transactions
from flask_marshmallow.fields import fields
class TransactionsSerializer(ma.SQLAlchemyAutoSchema):
id = fields.Str()
customer_id = fields.Str(required=True)
doc_id = fields.Str(required=True)
score = fields.Float(required=True, default=0.0)
income = fields.Float(required=True, default=0.0)
requested_value = fields.Float(required=True, default=0.0)
installments = fields.Integer(required=True, default=0)
status = fields.Integer(required=True, default=400)
time = fields.DateTime(requested=True, default=datetime.now)
class Meta:
model = Transactions
include_fk = True
|
[
"[email protected]"
] | |
0eae0a5d1591d3e6e319cab06ad2ebc6522a4935
|
53fab060fa262e5d5026e0807d93c75fb81e67b9
|
/backup/user_073/ch150_2020_04_13_19_23_30_928222.py
|
b24da99c02a5d581834b8ca6ccbea288d5519394
|
[] |
no_license
|
gabriellaec/desoft-analise-exercicios
|
b77c6999424c5ce7e44086a12589a0ad43d6adca
|
01940ab0897aa6005764fc220b900e4d6161d36b
|
refs/heads/main
| 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 113 |
py
|
def calcula_pi(n):
i=0
s=0
while i-1<n:
s+=6/(i**2)
i+=1
π=s**0.5
return π
|
[
"[email protected]"
] | |
5b80e84195b05148e414152bb5e7ec6b8abe981a
|
d554b1aa8b70fddf81da8988b4aaa43788fede88
|
/5 - Notebooks e Data/1 - Análises numéricas/Arquivos David/Atualizados/logDicas-master/data/2019-1/226/users/4163/codes/1593_1803.py
|
0fe4f037b835ec2220a49b366c342d4819a6b0fb
|
[] |
no_license
|
JosephLevinthal/Research-projects
|
a3bc3ca3b09faad16f5cce5949a2279cf14742ba
|
60d5fd6eb864a5181f4321e7a992812f3c2139f9
|
refs/heads/master
| 2022-07-31T06:43:02.686109 | 2020-05-23T00:24:26 | 2020-05-23T00:24:26 | 266,199,309 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 155 |
py
|
x = int(input("numero do cartao: "))
a = x//1000
b = x//100%10
c = x//10%10
d = x%10
m = a * 5
n = b*4
o = c*3
p = d*2
s= m+n+o+p
v = (s%11)
print(v)
|
[
"[email protected]"
] | |
e951784c19687be2b1f6400544c6e6ac4ff2bc83
|
41a4eeaf62a36d7c57ad55393996787bb55ba6b7
|
/venv/lib/python3.7/site-packages/kubernetes/client/models/v1beta1_volume_attachment_status.py
|
4bf4e872c98d42a22c1bb7ae6be5edad0a426636
|
[] |
no_license
|
jicowan/group-operator
|
c7a20ff03584da9ace19489bc3d27b9fb22a066c
|
bac6e51aef0d9836679621e3ce7e55f4c1ead402
|
refs/heads/master
| 2021-07-14T11:45:30.062219 | 2019-09-26T15:26:52 | 2019-09-26T15:26:52 | 209,454,861 | 10 | 4 | null | 2021-07-01T17:23:07 | 2019-09-19T03:29:54 |
Python
|
UTF-8
|
Python
| false | false | 7,370 |
py
|
# coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.14.5
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class V1beta1VolumeAttachmentStatus(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'attach_error': 'V1beta1VolumeError',
'attached': 'bool',
'attachment_metadata': 'dict(str, str)',
'detach_error': 'V1beta1VolumeError'
}
attribute_map = {
'attach_error': 'attachError',
'attached': 'attached',
'attachment_metadata': 'attachmentMetadata',
'detach_error': 'detachError'
}
def __init__(self, attach_error=None, attached=None, attachment_metadata=None, detach_error=None):
"""
V1beta1VolumeAttachmentStatus - a model defined in Swagger
"""
self._attach_error = None
self._attached = None
self._attachment_metadata = None
self._detach_error = None
self.discriminator = None
if attach_error is not None:
self.attach_error = attach_error
self.attached = attached
if attachment_metadata is not None:
self.attachment_metadata = attachment_metadata
if detach_error is not None:
self.detach_error = detach_error
@property
def attach_error(self):
"""
Gets the attach_error of this V1beta1VolumeAttachmentStatus.
The last error encountered during attach operation, if any. This field must only be set by the entity completing the attach operation, i.e. the external-attacher.
:return: The attach_error of this V1beta1VolumeAttachmentStatus.
:rtype: V1beta1VolumeError
"""
return self._attach_error
@attach_error.setter
def attach_error(self, attach_error):
"""
Sets the attach_error of this V1beta1VolumeAttachmentStatus.
The last error encountered during attach operation, if any. This field must only be set by the entity completing the attach operation, i.e. the external-attacher.
:param attach_error: The attach_error of this V1beta1VolumeAttachmentStatus.
:type: V1beta1VolumeError
"""
self._attach_error = attach_error
@property
def attached(self):
"""
Gets the attached of this V1beta1VolumeAttachmentStatus.
Indicates the volume is successfully attached. This field must only be set by the entity completing the attach operation, i.e. the external-attacher.
:return: The attached of this V1beta1VolumeAttachmentStatus.
:rtype: bool
"""
return self._attached
@attached.setter
def attached(self, attached):
"""
Sets the attached of this V1beta1VolumeAttachmentStatus.
Indicates the volume is successfully attached. This field must only be set by the entity completing the attach operation, i.e. the external-attacher.
:param attached: The attached of this V1beta1VolumeAttachmentStatus.
:type: bool
"""
if attached is None:
raise ValueError("Invalid value for `attached`, must not be `None`")
self._attached = attached
@property
def attachment_metadata(self):
"""
Gets the attachment_metadata of this V1beta1VolumeAttachmentStatus.
Upon successful attach, this field is populated with any information returned by the attach operation that must be passed into subsequent WaitForAttach or Mount calls. This field must only be set by the entity completing the attach operation, i.e. the external-attacher.
:return: The attachment_metadata of this V1beta1VolumeAttachmentStatus.
:rtype: dict(str, str)
"""
return self._attachment_metadata
@attachment_metadata.setter
def attachment_metadata(self, attachment_metadata):
"""
Sets the attachment_metadata of this V1beta1VolumeAttachmentStatus.
Upon successful attach, this field is populated with any information returned by the attach operation that must be passed into subsequent WaitForAttach or Mount calls. This field must only be set by the entity completing the attach operation, i.e. the external-attacher.
:param attachment_metadata: The attachment_metadata of this V1beta1VolumeAttachmentStatus.
:type: dict(str, str)
"""
self._attachment_metadata = attachment_metadata
@property
def detach_error(self):
"""
Gets the detach_error of this V1beta1VolumeAttachmentStatus.
The last error encountered during detach operation, if any. This field must only be set by the entity completing the detach operation, i.e. the external-attacher.
:return: The detach_error of this V1beta1VolumeAttachmentStatus.
:rtype: V1beta1VolumeError
"""
return self._detach_error
@detach_error.setter
def detach_error(self, detach_error):
"""
Sets the detach_error of this V1beta1VolumeAttachmentStatus.
The last error encountered during detach operation, if any. This field must only be set by the entity completing the detach operation, i.e. the external-attacher.
:param detach_error: The detach_error of this V1beta1VolumeAttachmentStatus.
:type: V1beta1VolumeError
"""
self._detach_error = detach_error
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, V1beta1VolumeAttachmentStatus):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
|
[
"[email protected]"
] | |
e5261b326ef90f1f8c418f8f8769e45cd8c258f4
|
9de0d6b626727e024e00832dd6b3fb8e98e4efc6
|
/tests/runners.py
|
75a05509085256e70961c1aad5e04f7a0a69242d
|
[
"BSD-2-Clause"
] |
permissive
|
vovanz/invoke
|
2e2403472397b84c80690219e7470d7b8c39a7e3
|
ba1f2742837d51f04e229a8d1f8c0d4d132833fc
|
refs/heads/master
| 2020-03-18T09:22:08.557683 | 2018-05-23T11:29:51 | 2018-05-23T11:29:51 | 134,559,507 | 0 | 0 |
BSD-2-Clause
| 2018-05-23T11:29:53 | 2018-05-23T11:27:23 |
Python
|
UTF-8
|
Python
| false | false | 53,304 |
py
|
import os
import struct
import sys
import types
from io import BytesIO
from itertools import chain, repeat
from invoke.vendor.six import StringIO, b, PY2, iteritems
from pytest import raises, skip
from pytest_relaxed import trap
from mock import patch, Mock, call
from invoke import (
Runner, Local, Context, Config, Failure, ThreadException, Responder,
WatcherError, UnexpectedExit, StreamWatcher, Result,
)
from invoke.terminals import WINDOWS
from _util import (
mock_subprocess, mock_pty, skip_if_windows, _Dummy,
_KeyboardInterruptingRunner, OhNoz, _,
)
class _RaisingWatcher(StreamWatcher):
def submit(self, stream):
raise WatcherError("meh")
def _run(*args, **kwargs):
klass = kwargs.pop('klass', _Dummy)
settings = kwargs.pop('settings', {})
context = Context(config=Config(overrides=settings))
return klass(context).run(*args, **kwargs)
def _runner(out='', err='', **kwargs):
klass = kwargs.pop('klass', _Dummy)
runner = klass(Context(config=Config(overrides=kwargs)))
if 'exits' in kwargs:
runner.returncode = Mock(return_value=kwargs.pop('exits'))
out_file = BytesIO(b(out))
err_file = BytesIO(b(err))
runner.read_proc_stdout = out_file.read
runner.read_proc_stderr = err_file.read
return runner
def _expect_platform_shell(shell):
if WINDOWS:
assert shell.endswith('cmd.exe')
else:
assert shell == '/bin/bash'
class Runner_:
# NOTE: these copies of _run and _runner form the base case of "test Runner
# subclasses via self._run/_runner helpers" functionality. See how e.g.
# Local_ uses the same approach but bakes in the dummy class used.
def _run(self, *args, **kwargs):
return _run(*args, **kwargs)
def _runner(self, *args, **kwargs):
return _runner(*args, **kwargs)
def _mock_stdin_writer(self):
"""
Return new _Dummy subclass whose write_proc_stdin() method is a mock.
"""
class MockedStdin(_Dummy):
pass
MockedStdin.write_proc_stdin = Mock()
return MockedStdin
class init:
"__init__"
def takes_a_context_instance(self):
c = Context()
assert Runner(c).context == c
def context_instance_is_required(self):
with raises(TypeError):
Runner()
class run:
def handles_invalid_kwargs_like_any_other_function(self):
try:
self._run(_, nope_noway_nohow='as if')
except TypeError as e:
assert 'got an unexpected keyword argument' in str(e)
else:
assert False, "Invalid run() kwarg didn't raise TypeError"
class warn:
def honors_config(self):
runner = self._runner(run={'warn': True}, exits=1)
# Doesn't raise Failure -> all good
runner.run(_)
def kwarg_beats_config(self):
runner = self._runner(run={'warn': False}, exits=1)
# Doesn't raise Failure -> all good
runner.run(_, warn=True)
def does_not_apply_to_watcher_errors(self):
runner = self._runner(out="stuff")
try:
watcher = _RaisingWatcher()
runner.run(_, watchers=[watcher], warn=True, hide=True)
except Failure as e:
assert isinstance(e.reason, WatcherError)
else:
assert False, "Did not raise Failure for WatcherError!"
class hide:
@trap
def honors_config(self):
runner = self._runner(out='stuff', run={'hide': True})
r = runner.run(_)
assert r.stdout == 'stuff'
assert sys.stdout.getvalue() == ''
@trap
def kwarg_beats_config(self):
runner = self._runner(out='stuff')
r = runner.run(_, hide=True)
assert r.stdout == 'stuff'
assert sys.stdout.getvalue() == ''
class pty:
def pty_defaults_to_off(self):
assert self._run(_).pty is False
def honors_config(self):
runner = self._runner(run={'pty': True})
assert runner.run(_).pty is True
def kwarg_beats_config(self):
runner = self._runner(run={'pty': False})
assert runner.run(_, pty=True).pty is True
class shell:
def defaults_to_bash_or_cmdexe_when_pty_True(self):
_expect_platform_shell(self._run(_, pty=True).shell)
def defaults_to_bash_or_cmdexe_when_pty_False(self):
_expect_platform_shell(self._run(_, pty=False).shell)
def may_be_overridden(self):
assert self._run(_, shell='/bin/zsh').shell == '/bin/zsh'
def may_be_configured(self):
runner = self._runner(run={'shell': '/bin/tcsh'})
assert runner.run(_).shell == '/bin/tcsh'
def kwarg_beats_config(self):
runner = self._runner(run={'shell': '/bin/tcsh'})
assert runner.run(_, shell='/bin/zsh').shell == '/bin/zsh'
class env:
def defaults_to_os_environ(self):
assert self._run(_).env == os.environ
def updates_when_dict_given(self):
expected = dict(os.environ, FOO='BAR')
assert self._run(_, env={'FOO': 'BAR'}).env == expected
def replaces_when_replace_env_True(self):
env = self._run(_, env={'JUST': 'ME'}, replace_env=True).env
assert env == {'JUST': 'ME'}
def config_can_be_used(self):
env = self._run(_, settings={'run': {'env': {'FOO': 'BAR'}}}).env
assert env == dict(os.environ, FOO='BAR')
def kwarg_wins_over_config(self):
settings = {'run': {'env': {'FOO': 'BAR'}}}
kwarg = {'FOO': 'NOTBAR'}
foo = self._run(_, settings=settings, env=kwarg).env['FOO']
assert foo == 'NOTBAR'
class return_value:
def return_code(self):
"""
Result has .return_code (and .exited) containing exit code int
"""
runner = self._runner(exits=17)
r = runner.run(_, warn=True)
assert r.return_code == 17
assert r.exited == 17
def ok_attr_indicates_success(self):
runner = self._runner()
assert runner.run(_).ok is True # default dummy retval is 0
def ok_attr_indicates_failure(self):
runner = self._runner(exits=1)
assert runner.run(_, warn=True).ok is False
def failed_attr_indicates_success(self):
runner = self._runner()
assert runner.run(_).failed is False # default dummy retval is 0
def failed_attr_indicates_failure(self):
runner = self._runner(exits=1)
assert runner.run(_, warn=True).failed is True
@trap
def stdout_attribute_contains_stdout(self):
runner = self._runner(out='foo')
assert runner.run(_).stdout == "foo"
assert sys.stdout.getvalue() == "foo"
@trap
def stderr_attribute_contains_stderr(self):
runner = self._runner(err='foo')
assert runner.run(_).stderr == "foo"
assert sys.stderr.getvalue() == "foo"
def whether_pty_was_used(self):
assert self._run(_).pty is False
assert self._run(_, pty=True).pty is True
def command_executed(self):
assert self._run(_).command == _
def shell_used(self):
_expect_platform_shell(self._run(_).shell)
def hide_param_exposed_and_normalized(self):
assert self._run(_, hide=True).hide, ('stdout' == 'stderr')
assert self._run(_, hide=False).hide == tuple()
assert self._run(_, hide='stderr').hide == ('stderr',)
class command_echoing:
@trap
def off_by_default(self):
self._run("my command")
assert sys.stdout.getvalue() == ""
@trap
def enabled_via_kwarg(self):
self._run("my command", echo=True)
assert "my command" in sys.stdout.getvalue()
@trap
def enabled_via_config(self):
self._run("yup", settings={'run': {'echo': True}})
assert "yup" in sys.stdout.getvalue()
@trap
def kwarg_beats_config(self):
self._run("yup", echo=True, settings={'run': {'echo': False}})
assert "yup" in sys.stdout.getvalue()
@trap
def uses_ansi_bold(self):
self._run("my command", echo=True)
# TODO: vendor & use a color module
assert sys.stdout.getvalue() == "\x1b[1;37mmy command\x1b[0m\n"
class encoding:
# NOTE: these tests just check what Runner.encoding ends up as; it's
# difficult/impossible to mock string objects themselves to see what
# .decode() is being given :(
#
# TODO: consider using truly "nonstandard"-encoded byte sequences as
# fixtures, encoded with something that isn't compatible with UTF-8
# (UTF-7 kinda is, so...) so we can assert that the decoded string is
# equal to its Unicode equivalent.
#
# Use UTF-7 as a valid encoding unlikely to be a real default derived
# from test-runner's locale.getpreferredencoding()
def defaults_to_encoding_method_result(self):
# Setup
runner = self._runner()
encoding = 'UTF-7'
runner.default_encoding = Mock(return_value=encoding)
# Execution & assertion
runner.run(_)
runner.default_encoding.assert_called_with()
assert runner.encoding == 'UTF-7'
def honors_config(self):
c = Context(Config(overrides={'run': {'encoding': 'UTF-7'}}))
runner = _Dummy(c)
runner.default_encoding = Mock(return_value='UTF-not-7')
runner.run(_)
assert runner.encoding == 'UTF-7'
def honors_kwarg(self):
skip()
def uses_locale_module_for_default_encoding(self):
# Actually testing this highly OS/env specific stuff is very
# error-prone; so we degrade to just testing expected function
# calls for now :(
with patch('invoke.runners.locale') as fake_locale:
fake_locale.getdefaultlocale.return_value = ('meh', 'UHF-8')
fake_locale.getpreferredencoding.return_value = 'FALLBACK'
expected = 'UHF-8' if (PY2 and not WINDOWS) else 'FALLBACK'
assert self._runner().default_encoding() == expected
def falls_back_to_defaultlocale_when_preferredencoding_is_None(self):
if PY2:
skip()
with patch('invoke.runners.locale') as fake_locale:
fake_locale.getdefaultlocale.return_value = (None, None)
fake_locale.getpreferredencoding.return_value = 'FALLBACK'
assert self._runner().default_encoding() == 'FALLBACK'
class output_hiding:
@trap
def _expect_hidden(self, hide, expect_out="", expect_err=""):
self._runner(out='foo', err='bar').run(_, hide=hide)
assert sys.stdout.getvalue() == expect_out
assert sys.stderr.getvalue() == expect_err
def both_hides_everything(self):
self._expect_hidden('both')
def True_hides_everything(self):
self._expect_hidden(True)
def out_only_hides_stdout(self):
self._expect_hidden('out', expect_out="", expect_err="bar")
def err_only_hides_stderr(self):
self._expect_hidden('err', expect_out="foo", expect_err="")
def accepts_stdout_alias_for_out(self):
self._expect_hidden('stdout', expect_out="", expect_err="bar")
def accepts_stderr_alias_for_err(self):
self._expect_hidden('stderr', expect_out="foo", expect_err="")
def None_hides_nothing(self):
self._expect_hidden(None, expect_out="foo", expect_err="bar")
def False_hides_nothing(self):
self._expect_hidden(False, expect_out="foo", expect_err="bar")
def unknown_vals_raises_ValueError(self):
with raises(ValueError):
self._run(_, hide="wat?")
def unknown_vals_mention_value_given_in_error(self):
value = "penguinmints"
try:
self._run(_, hide=value)
except ValueError as e:
msg = "Error from run(hide=xxx) did not tell user what the bad value was!" # noqa
msg += "\nException msg: {}".format(e)
assert value in str(e), msg
else:
assert False, "run() did not raise ValueError for bad hide= value" # noqa
def does_not_affect_capturing(self):
assert self._runner(out='foo').run(_, hide=True).stdout == 'foo'
@trap
def overrides_echoing(self):
self._runner().run('invisible', hide=True, echo=True)
assert 'invisible' not in sys.stdout.getvalue()
class output_stream_overrides:
@trap
def out_defaults_to_sys_stdout(self):
"out_stream defaults to sys.stdout"
self._runner(out="sup").run(_)
assert sys.stdout.getvalue() == "sup"
@trap
def err_defaults_to_sys_stderr(self):
"err_stream defaults to sys.stderr"
self._runner(err="sup").run(_)
assert sys.stderr.getvalue() == "sup"
@trap
def out_can_be_overridden(self):
"out_stream can be overridden"
out = StringIO()
self._runner(out="sup").run(_, out_stream=out)
assert out.getvalue() == "sup"
assert sys.stdout.getvalue() == ""
@trap
def err_can_be_overridden(self):
"err_stream can be overridden"
err = StringIO()
self._runner(err="sup").run(_, err_stream=err)
assert err.getvalue() == "sup"
assert sys.stderr.getvalue() == ""
@trap
def pty_defaults_to_sys(self):
self._runner(out="sup").run(_, pty=True)
assert sys.stdout.getvalue() == "sup"
@trap
def pty_out_can_be_overridden(self):
out = StringIO()
self._runner(out="yo").run(_, pty=True, out_stream=out)
assert out.getvalue() == "yo"
assert sys.stdout.getvalue() == ""
class output_stream_handling:
# Mostly corner cases, generic behavior's covered above
def writes_and_flushes_to_stdout(self):
out = Mock(spec=StringIO)
self._runner(out="meh").run(_, out_stream=out)
out.write.assert_called_once_with("meh")
out.flush.assert_called_once_with()
def writes_and_flushes_to_stderr(self):
err = Mock(spec=StringIO)
self._runner(err="whatever").run(_, err_stream=err)
err.write.assert_called_once_with("whatever")
err.flush.assert_called_once_with()
class input_stream_handling:
# NOTE: actual autoresponder tests are elsewhere. These just test that
# stdin works normally & can be overridden.
@patch('invoke.runners.sys.stdin', StringIO("Text!"))
def defaults_to_sys_stdin(self):
# Execute w/ runner class that has a mocked stdin_writer
klass = self._mock_stdin_writer()
self._runner(klass=klass).run(_, out_stream=StringIO())
# Check that mocked writer was called w/ the data from our patched
# sys.stdin.
# NOTE: this also tests that non-fileno-bearing streams read/write
# 1 byte at a time. See farther-down test for fileno-bearing stdin
calls = list(map(lambda x: call(x), "Text!"))
klass.write_proc_stdin.assert_has_calls(calls, any_order=False)
def can_be_overridden(self):
klass = self._mock_stdin_writer()
in_stream = StringIO("Hey, listen!")
self._runner(klass=klass).run(
_,
in_stream=in_stream,
out_stream=StringIO(),
)
# stdin mirroring occurs char-by-char
calls = list(map(lambda x: call(x), "Hey, listen!"))
klass.write_proc_stdin.assert_has_calls(calls, any_order=False)
def can_be_disabled_entirely(self):
# Mock handle_stdin so we can assert it's not even called
class MockedHandleStdin(_Dummy):
pass
MockedHandleStdin.handle_stdin = Mock()
self._runner(klass=MockedHandleStdin).run(
_,
in_stream=False, # vs None or a stream
)
assert not MockedHandleStdin.handle_stdin.called
@patch('invoke.util.debug')
def exceptions_get_logged(self, mock_debug):
# Make write_proc_stdin asplode
klass = self._mock_stdin_writer()
klass.write_proc_stdin.side_effect = OhNoz("oh god why")
# Execute with some stdin to trigger that asplode (but skip the
# actual bubbled-up raising of it so we can check things out)
try:
stdin = StringIO("non-empty")
self._runner(klass=klass).run(_, in_stream=stdin)
except ThreadException:
pass
# Assert debug() was called w/ expected format
# TODO: make the debug call a method on ExceptionHandlingThread,
# then make thread class configurable somewhere in Runner, and pass
# in a customized ExceptionHandlingThread that has a Mock for that
# method?
mock_debug.assert_called_with("Encountered exception OhNoz('oh god why',) in thread for 'handle_stdin'") # noqa
class failure_handling:
def fast_failures(self):
with raises(UnexpectedExit):
self._runner(exits=1).run(_)
def non_1_return_codes_still_act_as_failure(self):
r = self._runner(exits=17).run(_, warn=True)
assert r.failed is True
class UnexpectedExit_repr:
def similar_to_just_the_result_repr(self):
try:
self._runner(exits=23).run(_)
except UnexpectedExit as e:
expected = "<UnexpectedExit: cmd='{}' exited=23>"
assert repr(e) == expected.format(_)
class UnexpectedExit_str:
def setup(self):
def lines(prefix):
return "\n".join(
"{} {}".format(prefix, x) for x in range(1, 26)
) + "\n"
self._stdout = lines('stdout')
self._stderr = lines('stderr')
@trap
def displays_command_and_exit_code_by_default(self):
try:
self._runner(
exits=23,
out=self._stdout,
err=self._stderr,
).run(_)
except UnexpectedExit as e:
expected = """Encountered a bad command exit code!
Command: '{}'
Exit code: 23
Stdout: already printed
Stderr: already printed
""".format(_)
assert str(e) == expected
else:
assert False, "Failed to raise UnexpectedExit!"
@trap
def does_not_display_stderr_when_pty_True(self):
try:
self._runner(
exits=13, out=self._stdout, err=self._stderr
).run(_, pty=True)
except UnexpectedExit as e:
expected = """Encountered a bad command exit code!
Command: '{}'
Exit code: 13
Stdout: already printed
Stderr: n/a (PTYs have no stderr)
""".format(_)
assert str(e) == expected
@trap
def pty_stderr_message_wins_over_hidden_stderr(self):
try:
self._runner(
exits=1, out=self._stdout, err=self._stderr
).run(_, pty=True, hide=True)
except UnexpectedExit as e:
r = str(e)
assert "Stderr: n/a (PTYs have no stderr)" in r
assert "Stderr: already printed" not in r
@trap
def explicit_hidden_stream_tail_display(self):
# All the permutations of what's displayed when, are in
# subsequent test, which does 'x in y' assertions; this one
# here ensures the actual format of the display (newlines, etc)
# is as desired.
try:
self._runner(
exits=77, out=self._stdout, err=self._stderr
).run(_, hide=True)
except UnexpectedExit as e:
expected = """Encountered a bad command exit code!
Command: '{}'
Exit code: 77
Stdout:
stdout 16
stdout 17
stdout 18
stdout 19
stdout 20
stdout 21
stdout 22
stdout 23
stdout 24
stdout 25
Stderr:
stderr 16
stderr 17
stderr 18
stderr 19
stderr 20
stderr 21
stderr 22
stderr 23
stderr 24
stderr 25
""".format(_)
assert str(e) == expected
@trap
def displays_tails_of_streams_only_when_hidden(self):
def oops(msg, r, hide):
return "{}! hide={}; str output:\n\n{}".format(
msg, hide, r
)
for hide, expect_out, expect_err in (
(False, False, False),
(True, True, True),
('stdout', True, False),
('stderr', False, True),
('both', True, True),
):
try:
self._runner(
exits=1, out=self._stdout, err=self._stderr
).run(_, hide=hide)
except UnexpectedExit as e:
r = str(e)
# Expect that the top of output is never displayed
err = oops("Too much stdout found", r, hide)
assert "stdout 15" not in r, err
err = oops("Too much stderr found", r, hide)
assert "stderr 15" not in r, err
# Expect to see tail of stdout if we expected it
if expect_out:
err = oops("Didn't see stdout", r, hide)
assert "stdout 16" in r, err
# Expect to see tail of stderr if we expected it
if expect_err:
err = oops("Didn't see stderr", r, hide)
assert "stderr 16" in r, err
else:
assert False, "Failed to raise UnexpectedExit!"
def _regular_error(self):
self._runner(exits=1).run(_)
def _watcher_error(self):
klass = self._mock_stdin_writer()
# Exited=None because real procs will have no useful .returncode()
# result if they're aborted partway via an exception.
runner = self._runner(klass=klass, out="stuff", exits=None)
runner.run(_, watchers=[_RaisingWatcher()], hide=True)
# TODO: may eventually turn into having Runner raise distinct Failure
# subclasses itself, at which point `reason` would probably go away.
class reason:
def is_None_for_regular_nonzero_exits(self):
try:
self._regular_error()
except Failure as e:
assert e.reason is None
else:
assert False, "Failed to raise Failure!"
def is_None_for_custom_command_exits(self):
# TODO: when we implement 'exitcodes 1 and 2 are actually OK'
skip()
def is_exception_when_WatcherError_raised_internally(self):
try:
self._watcher_error()
except Failure as e:
assert isinstance(e.reason, WatcherError)
else:
assert False, "Failed to raise Failure!"
# TODO: should these move elsewhere, eg to Result specific test file?
# TODO: *is* there a nice way to split into multiple Response and/or
# Failure subclasses? Given the split between "returned as a value when
# no problem" and "raised as/attached to an exception when problem",
# possibly not - complicates how the APIs need to be adhered to.
class wrapped_result:
def most_attrs_are_always_present(self):
attrs = (
'command', 'shell', 'env', 'stdout', 'stderr', 'pty',
)
for method in (self._regular_error, self._watcher_error):
try:
method()
except Failure as e:
for attr in attrs:
assert getattr(e.result, attr) is not None
else:
assert False, "Did not raise Failure!"
class shell_exit_failure:
def exited_is_integer(self):
try:
self._regular_error()
except Failure as e:
assert isinstance(e.result.exited, int)
else:
assert False, "Did not raise Failure!"
def ok_bool_etc_are_falsey(self):
try:
self._regular_error()
except Failure as e:
assert e.result.ok is False
assert e.result.failed is True
assert not bool(e.result)
assert not e.result
else:
assert False, "Did not raise Failure!"
def stringrep_notes_exit_status(self):
try:
self._regular_error()
except Failure as e:
assert "exited with status 1" in str(e.result)
else:
assert False, "Did not raise Failure!"
class watcher_failure:
def exited_is_None(self):
try:
self._watcher_error()
except Failure as e:
exited = e.result.exited
err = "Expected None, got {!r}".format(exited)
assert exited is None, err
def ok_and_bool_still_are_falsey(self):
try:
self._watcher_error()
except Failure as e:
assert e.result.ok is False
assert e.result.failed is True
assert not bool(e.result)
assert not e.result
else:
assert False, "Did not raise Failure!"
def stringrep_lacks_exit_status(self):
try:
self._watcher_error()
except Failure as e:
assert "exited with status" not in str(e.result)
expected = "not fully executed due to watcher error"
assert expected in str(e.result)
else:
assert False, "Did not raise Failure!"
class threading:
# NOTE: see also the more generic tests in concurrency.py
def errors_within_io_thread_body_bubble_up(self):
class Oops(_Dummy):
def handle_stdout(self, **kwargs):
raise OhNoz()
def handle_stderr(self, **kwargs):
raise OhNoz()
runner = Oops(Context())
try:
runner.run("nah")
except ThreadException as e:
# Expect two separate OhNoz objects on 'e'
assert len(e.exceptions) == 2
for tup in e.exceptions:
assert isinstance(tup.value, OhNoz)
assert isinstance(tup.traceback, types.TracebackType)
assert tup.type == OhNoz
# TODO: test the arguments part of the tuple too. It's pretty
# implementation-specific, though, so possibly not worthwhile.
else:
assert False, "Did not raise ThreadException as expected!"
def io_thread_errors_str_has_details(self):
class Oops(_Dummy):
def handle_stdout(self, **kwargs):
raise OhNoz()
runner = Oops(Context())
try:
runner.run("nah")
except ThreadException as e:
message = str(e)
# Just make sure salient bits appear present, vs e.g. default
# representation happening instead.
assert "Saw 1 exceptions within threads" in message
assert "{'kwargs': " in message
assert "Traceback (most recent call last):\n\n" in message
assert "OhNoz" in message
else:
assert False, "Did not raise ThreadException as expected!"
class watchers:
# NOTE: it's initially tempting to consider using mocks or stub
# Responder instances for many of these, but it really doesn't save
# appreciable runtime or code read/write time.
# NOTE: these strictly test interactions between
# StreamWatcher/Responder and their host Runner; Responder-only tests
# are in tests/watchers.py.
def nothing_is_written_to_stdin_by_default(self):
# NOTE: technically if some goofus ran the tests by hand and mashed
# keys while doing so...this would fail. LOL?
# NOTE: this test seems not too useful but is a) a sanity test and
# b) guards against e.g. breaking the autoresponder such that it
# responds to "" or "\n" or etc.
klass = self._mock_stdin_writer()
self._runner(klass=klass).run(_)
assert not klass.write_proc_stdin.called
def _expect_response(self, **kwargs):
"""
Execute a run() w/ ``watchers`` set from ``responses``.
Any other ``**kwargs`` given are passed direct to ``_runner()``.
:returns: The mocked ``write_proc_stdin`` method of the runner.
"""
watchers = [
Responder(pattern=key, response=value)
for key, value in iteritems(kwargs.pop('responses'))
]
kwargs['klass'] = klass = self._mock_stdin_writer()
runner = self._runner(**kwargs)
runner.run(_, watchers=watchers, hide=True)
return klass.write_proc_stdin
def watchers_responses_get_written_to_proc_stdin(self):
self._expect_response(
out="the house was empty",
responses={'empty': 'handed'},
).assert_called_once_with("handed")
def multiple_hits_yields_multiple_responses(self):
holla = call('how high?')
self._expect_response(
out="jump, wait, jump, wait",
responses={'jump': 'how high?'},
).assert_has_calls([holla, holla])
def chunk_sizes_smaller_than_patterns_still_work_ok(self):
klass = self._mock_stdin_writer()
klass.read_chunk_size = 1 # < len('jump')
responder = Responder('jump', 'how high?')
runner = self._runner(klass=klass, out="jump, wait, jump, wait")
runner.run(_, watchers=[responder], hide=True)
holla = call('how high?')
# Responses happened, period.
klass.write_proc_stdin.assert_has_calls([holla, holla])
# And there weren't duplicates!
assert len(klass.write_proc_stdin.call_args_list) == 2
def both_out_and_err_are_scanned(self):
bye = call("goodbye")
# Would only be one 'bye' if only scanning stdout
self._expect_response(
out="hello my name is inigo",
err="hello how are you",
responses={"hello": "goodbye"},
).assert_has_calls([bye, bye])
def multiple_patterns_works_as_expected(self):
calls = [call('betty'), call('carnival')]
# Technically, I'd expect 'betty' to get called before 'carnival',
# but under Python 3 it's reliably backwards from Python 2.
# In real world situations where each prompt sits & waits for its
# response, this probably wouldn't be an issue, so using
# any_order=True for now. Thanks again Python 3.
self._expect_response(
out="beep boop I am a robot",
responses={'boop': 'betty', 'robot': 'carnival'},
).assert_has_calls(calls, any_order=True)
def multiple_patterns_across_both_streams(self):
responses = {
'boop': 'betty',
'robot': 'carnival',
'Destroy': 'your ego',
'humans': 'are awful',
}
calls = map(lambda x: call(x), responses.values())
# CANNOT assume order due to simultaneous streams.
# If we didn't say any_order=True we could get race condition fails
self._expect_response(
out="beep boop, I am a robot",
err="Destroy all humans!",
responses=responses,
).assert_has_calls(calls, any_order=True)
def honors_watchers_config_option(self):
klass = self._mock_stdin_writer()
responder = Responder("my stdout", "and my axe")
runner = self._runner(
out="this is my stdout", # yielded stdout
klass=klass, # mocked stdin writer
run={'watchers': [responder]}, # ends up as config override
)
runner.run(_, hide=True)
klass.write_proc_stdin.assert_called_once_with("and my axe")
def kwarg_overrides_config(self):
# TODO: how to handle use cases where merging, not overriding, is
# the expected/unsurprising default? probably another config-only
# (not kwarg) setting, e.g. run.merge_responses?
# TODO: now that this stuff is list, not dict, based, it should be
# easier...BUT how to handle removal of defaults from config? Maybe
# just document to be careful using the config as it won't _be_
# overridden? (Users can always explicitly set the config to be
# empty-list if they want kwargs to be the entire set of
# watchers...right?)
klass = self._mock_stdin_writer()
conf = Responder("my stdout", "and my axe")
kwarg = Responder("my stdout", "and my body spray")
runner = self._runner(
out="this is my stdout", # yielded stdout
klass=klass, # mocked stdin writer
run={'watchers': [conf]}, # ends up as config override
)
runner.run(_, hide=True, watchers=[kwarg])
klass.write_proc_stdin.assert_called_once_with("and my body spray")
class io_sleeping:
# NOTE: there's an explicit CPU-measuring test in the integration suite
# which ensures the *point* of the sleeping - avoiding CPU hogging - is
# actually functioning. These tests below just unit-test the mechanisms
# around the sleep functionality (ensuring they are visible and can be
# altered as needed).
def input_sleep_attribute_defaults_to_hundredth_of_second(self):
assert Runner(Context()).input_sleep == 0.01
@mock_subprocess()
def subclasses_can_override_input_sleep(self):
class MyRunner(_Dummy):
input_sleep = 0.007
with patch('invoke.runners.time') as mock_time:
MyRunner(Context()).run(
_,
in_stream=StringIO("foo"),
out_stream=StringIO(), # null output to not pollute tests
)
assert mock_time.sleep.call_args_list == [call(0.007)] * 3
class stdin_mirroring:
def _test_mirroring(
self,
expect_mirroring,
**kwargs
):
# Setup
fake_in = "I'm typing!"
output = Mock()
input_ = StringIO(fake_in)
input_is_pty = kwargs.pop('in_pty', None)
class MyRunner(_Dummy):
def should_echo_stdin(self, input_, output):
# Fake result of isatty() test here and only here; if we do
# this farther up, it will affect stuff trying to run
# termios & such, which is harder to mock successfully.
if input_is_pty is not None:
input_.isatty = lambda: input_is_pty
return super(MyRunner, self).should_echo_stdin(
input_, output)
# Execute basic command with given parameters
self._run(
_,
klass=MyRunner,
in_stream=input_,
out_stream=output,
**kwargs
)
# Examine mocked output stream to see if it was mirrored to
if expect_mirroring:
calls = output.write.call_args_list
assert calls == list(map(lambda x: call(x), fake_in))
assert len(output.flush.call_args_list) == len(fake_in)
# Or not mirrored to
else:
assert output.write.call_args_list == []
def when_pty_is_True_no_mirroring_occurs(self):
self._test_mirroring(
pty=True,
expect_mirroring=False,
)
def when_pty_is_False_we_write_in_stream_back_to_out_stream(self):
self._test_mirroring(
pty=False,
in_pty=True,
expect_mirroring=True,
)
def mirroring_is_skipped_when_our_input_is_not_a_tty(self):
self._test_mirroring(
in_pty=False,
expect_mirroring=False,
)
def mirroring_can_be_forced_on(self):
self._test_mirroring(
# Subprocess pty normally disables echoing
pty=True,
# But then we forcibly enable it
echo_stdin=True,
# And expect it to happen
expect_mirroring=True,
)
def mirroring_can_be_forced_off(self):
# Make subprocess pty False, stdin tty True, echo_stdin False,
# prove no mirroring
self._test_mirroring(
# Subprocess lack of pty normally enables echoing
pty=False,
# Provided the controlling terminal _is_ a tty
in_pty=True,
# But then we forcibly disable it
echo_stdin=False,
# And expect it to not happen
expect_mirroring=False,
)
def mirroring_honors_configuration(self):
self._test_mirroring(
pty=False,
in_pty=True,
settings={'run': {'echo_stdin': False}},
expect_mirroring=False,
)
@trap
@skip_if_windows
@patch('invoke.runners.sys.stdin')
@patch('invoke.terminals.fcntl.ioctl')
@patch('invoke.terminals.os')
@patch('invoke.terminals.termios')
@patch('invoke.terminals.tty')
@patch('invoke.terminals.select')
# NOTE: the no-fileno edition is handled at top of this local test
# class, in the base case test.
def reads_FIONREAD_bytes_from_stdin_when_fileno(
self, select, tty, termios, mock_os, ioctl, stdin
):
# Set stdin up as a file-like buffer which passes has_fileno
stdin.fileno.return_value = 17 # arbitrary
stdin_data = list("boo!")
def fakeread(n):
# Why is there no slice version of pop()?
data = stdin_data[:n]
del stdin_data[:n]
return ''.join(data)
stdin.read.side_effect = fakeread
# Without mocking this, we'll always get errors checking the above
# bogus fileno()
mock_os.tcgetpgrp.return_value = None
# Ensure select() only spits back stdin one time, despite there
# being multiple bytes to read (this at least partly fakes behavior
# from issue #58)
select.select.side_effect = chain(
[([stdin], [], [])],
repeat(([], [], [])),
)
# Have ioctl yield our multiple number of bytes when called with
# FIONREAD
def fake_ioctl(fd, cmd, buf):
# This works since each mocked attr will still be its own mock
# object with a distinct 'is' identity.
if cmd is termios.FIONREAD:
return struct.pack('h', len(stdin_data))
ioctl.side_effect = fake_ioctl
# Set up our runner as one w/ mocked stdin writing (simplest way to
# assert how the reads & writes are happening)
klass = self._mock_stdin_writer()
self._runner(klass=klass).run(_)
klass.write_proc_stdin.assert_called_once_with("boo!")
class character_buffered_stdin:
@skip_if_windows
@patch('invoke.terminals.tty')
@patch('invoke.terminals.termios') # stub
def setcbreak_called_on_tty_stdins(self, mock_termios, mock_tty):
self._run(_)
mock_tty.setcbreak.assert_called_with(sys.stdin)
@skip_if_windows
@patch('invoke.terminals.tty')
def setcbreak_not_called_on_non_tty_stdins(self, mock_tty):
self._run(_, in_stream=StringIO())
assert not mock_tty.setcbreak.called
@skip_if_windows
@patch('invoke.terminals.tty')
@patch('invoke.terminals.os')
def setcbreak_not_called_if_process_not_foregrounded(
self, mock_os, mock_tty,
):
# Re issue #439.
mock_os.getpgrp.return_value = 1337
mock_os.tcgetpgrp.return_value = 1338
self._run(_)
assert not mock_tty.setcbreak.called
# Sanity
mock_os.tcgetpgrp.assert_called_once_with(sys.stdin.fileno())
@skip_if_windows
@patch('invoke.terminals.tty') # stub
@patch('invoke.terminals.termios')
def tty_stdins_have_settings_restored_by_default(
self, mock_termios, mock_tty
):
sentinel = [1, 7, 3, 27]
mock_termios.tcgetattr.return_value = sentinel
self._run(_)
mock_termios.tcsetattr.assert_called_once_with(
sys.stdin, mock_termios.TCSADRAIN, sentinel
)
@skip_if_windows
@patch('invoke.terminals.tty') # stub
@patch('invoke.terminals.termios')
def tty_stdins_have_settings_restored_on_KeyboardInterrupt(
self, mock_termios, mock_tty
):
# This test is re: GH issue #303
# tcgetattr returning some arbitrary value
sentinel = [1, 7, 3, 27]
mock_termios.tcgetattr.return_value = sentinel
# Don't actually bubble up the KeyboardInterrupt...
try:
self._run(_, klass=_KeyboardInterruptingRunner)
except KeyboardInterrupt:
pass
# Did we restore settings?!
mock_termios.tcsetattr.assert_called_once_with(
sys.stdin, mock_termios.TCSADRAIN, sentinel
)
class send_interrupt:
def _run_with_mocked_interrupt(self, klass):
runner = klass(Context())
runner.send_interrupt = Mock()
try:
runner.run(_)
except:
pass
return runner
def called_on_KeyboardInterrupt(self):
runner = self._run_with_mocked_interrupt(
_KeyboardInterruptingRunner
)
assert runner.send_interrupt.called
def not_called_for_other_exceptions(self):
class _GenericExceptingRunner(_Dummy):
def wait(self):
raise Exception
runner = self._run_with_mocked_interrupt(_GenericExceptingRunner)
assert not runner.send_interrupt.called
def sends_escape_byte_sequence(self):
for pty in (True, False):
runner = _KeyboardInterruptingRunner(Context())
mock_stdin = Mock()
runner.write_proc_stdin = mock_stdin
runner.run(_, pty=pty)
mock_stdin.assert_called_once_with(u'\x03')
class stop:
def always_runs_no_matter_what(self):
class _ExceptingRunner(_Dummy):
def wait(self):
raise OhNoz()
runner = _ExceptingRunner(context=Context())
runner.stop = Mock()
try:
runner.run(_)
except OhNoz:
runner.stop.assert_called_once_with()
else:
assert False, "_ExceptingRunner did not except!"
class _FastLocal(Local):
# Neuter this for same reason as in _Dummy above
input_sleep = 0
class Local_:
def _run(self, *args, **kwargs):
return _run(*args, **dict(kwargs, klass=_FastLocal))
def _runner(self, *args, **kwargs):
return _runner(*args, **dict(kwargs, klass=_FastLocal))
class pty:
@mock_pty()
def when_pty_True_we_use_pty_fork_and_os_exec(self):
"when pty=True, we use pty.fork and os.exec*"
self._run(_, pty=True)
# @mock_pty's asserts check os/pty calls for us.
@mock_pty(insert_os=True)
def _expect_exit_check(self, exited, mock_os):
if exited:
expected_check = mock_os.WIFEXITED
expected_get = mock_os.WEXITSTATUS
unexpected_check = mock_os.WIFSIGNALED
unexpected_get = mock_os.WTERMSIG
else:
expected_check = mock_os.WIFSIGNALED
expected_get = mock_os.WTERMSIG
unexpected_check = mock_os.WIFEXITED
unexpected_get = mock_os.WEXITSTATUS
expected_check.return_value = True
unexpected_check.return_value = False
self._run(_, pty=True)
exitstatus = mock_os.waitpid.return_value[1]
expected_get.assert_called_once_with(exitstatus)
assert not unexpected_get.called
def pty_uses_WEXITSTATUS_if_WIFEXITED(self):
self._expect_exit_check(True)
def pty_uses_WTERMSIG_if_WIFSIGNALED(self):
self._expect_exit_check(False)
@mock_pty(insert_os=True)
def WTERMSIG_result_turned_negative_to_match_subprocess(self, mock_os):
mock_os.WIFEXITED.return_value = False
mock_os.WIFSIGNALED.return_value = True
mock_os.WTERMSIG.return_value = 2
assert self._run(_, pty=True, warn=True).exited == -2
@mock_pty()
def pty_is_set_to_controlling_terminal_size(self):
self._run(_, pty=True)
# @mock_pty's asserts check the TIOC[GS]WINSZ calls for us
def warning_only_fires_once(self):
# I.e. if implementation checks pty-ness >1 time, only one warning
# is emitted. This is kinda implementation-specific, but...
skip()
@patch('invoke.runners.sys')
def replaced_stdin_objects_dont_explode(self, mock_sys):
# Replace sys.stdin with an object lacking .isatty(), which
# normally causes an AttributeError unless we are being careful.
mock_sys.stdin = object()
# Test. If bug is present, this will error.
runner = Local(Context())
assert runner.should_use_pty(pty=True, fallback=True) is False
@mock_pty(trailing_error=OSError("Input/output error"))
def spurious_OSErrors_handled_gracefully(self):
# Doesn't-blow-up test.
self._run(_, pty=True)
@mock_pty(trailing_error=OSError("I/O error"))
def other_spurious_OSErrors_handled_gracefully(self):
# Doesn't-blow-up test.
self._run(_, pty=True)
@mock_pty(trailing_error=OSError("wat"))
def non_spurious_OSErrors_bubble_up(self):
try:
self._run(_, pty=True)
except ThreadException as e:
e = e.exceptions[0]
assert e.type == OSError
assert str(e.value) == "wat"
class fallback:
@mock_pty(isatty=False)
def can_be_overridden_by_kwarg(self):
self._run(_, pty=True, fallback=False)
# @mock_pty's asserts will be mad if pty-related os/pty calls
# didn't fire, so we're done.
@mock_pty(isatty=False)
def can_be_overridden_by_config(self):
self._runner(run={'fallback': False}).run(_, pty=True)
# @mock_pty's asserts will be mad if pty-related os/pty calls
# didn't fire, so we're done.
@trap
@mock_subprocess(isatty=False)
def affects_result_pty_value(self, *mocks):
assert self._run(_, pty=True).pty is False
@mock_pty(isatty=False)
def overridden_fallback_affects_result_pty_value(self):
assert self._run(_, pty=True, fallback=False).pty is True
class shell:
@mock_pty(insert_os=True)
def defaults_to_bash_or_cmdexe_when_pty_True(self, mock_os):
# NOTE: yea, windows can't run pty is true, but this is really
# testing config behavior, so...meh
self._run(_, pty=True)
_expect_platform_shell(mock_os.execve.call_args_list[0][0][0])
@mock_subprocess(insert_Popen=True)
def defaults_to_bash_or_cmdexe_when_pty_False(self, mock_Popen):
self._run(_, pty=False)
_expect_platform_shell(
mock_Popen.call_args_list[0][1]['executable']
)
@mock_pty(insert_os=True)
def may_be_overridden_when_pty_True(self, mock_os):
self._run(_, pty=True, shell='/bin/zsh')
assert mock_os.execve.call_args_list[0][0][0] == '/bin/zsh'
@mock_subprocess(insert_Popen=True)
def may_be_overridden_when_pty_False(self, mock_Popen):
self._run(_, pty=False, shell='/bin/zsh')
assert mock_Popen.call_args_list[0][1]['executable'] == '/bin/zsh'
class env:
# NOTE: update-vs-replace semantics are tested 'purely' up above in
# regular Runner tests.
@mock_subprocess(insert_Popen=True)
def uses_Popen_kwarg_for_pty_False(self, mock_Popen):
self._run(_, pty=False, env={'FOO': 'BAR'})
expected = dict(os.environ, FOO='BAR')
env = mock_Popen.call_args_list[0][1]['env']
assert env == expected
@mock_pty(insert_os=True)
def uses_execve_for_pty_True(self, mock_os):
type(mock_os).environ = {'OTHERVAR': 'OTHERVAL'}
self._run(_, pty=True, env={'FOO': 'BAR'})
expected = {'OTHERVAR': 'OTHERVAL', 'FOO': 'BAR'}
env = mock_os.execve.call_args_list[0][0][2]
assert env == expected
class Result_:
def nothing_is_required(self):
Result()
def first_posarg_is_stdout(self):
assert Result("foo").stdout == "foo"
def command_defaults_to_empty_string(self):
assert Result().command == ""
def shell_defaults_to_empty_string(self):
assert Result().shell == ""
def env_defaults_to_empty_dict(self):
assert Result().env == {}
def stdout_defaults_to_empty_string(self):
assert Result().stdout == u""
def stderr_defaults_to_empty_string(self):
assert Result().stderr == u""
def exited_defaults_to_zero(self):
assert Result().exited == 0
def pty_defaults_to_False(self):
assert Result().pty is False
def repr_contains_useful_info(self):
assert repr(Result(command="foo")) == "<Result cmd='foo' exited=0>"
|
[
"[email protected]"
] | |
abc7a937b19b83288cf794e0c35454d0c0fac86d
|
45b4ff764fafaa08140bced64fc1f0fb0ae36087
|
/icecreamratings_projcet/icecreamratings_projcet/users/urls.py
|
3db76cdfb4c65666078e3346486be9f1d24bda6e
|
[
"MIT"
] |
permissive
|
wlgud0402/studying-django-with-twoscoops
|
c35297cc2c169170bff43c62a6325139a269a7d2
|
78a69ad311aefc59e271c86824196c130b92bb0a
|
refs/heads/master
| 2023-01-08T23:43:33.177538 | 2020-10-27T11:16:05 | 2020-10-27T11:16:05 | 307,382,998 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 381 |
py
|
from django.urls import path
from icecreamratings_projcet.users.views import (
user_detail_view,
user_redirect_view,
user_update_view,
)
app_name = "users"
urlpatterns = [
path("~redirect/", view=user_redirect_view, name="redirect"),
path("~update/", view=user_update_view, name="update"),
path("<str:username>/", view=user_detail_view, name="detail"),
]
|
[
"[email protected]"
] | |
46061121dc8d4f34e77a91f018bcfd036c3bfeb4
|
2f963d7989749037a3ec27aaa39b31416b33cbb2
|
/ib_users/views/get_user_details_from_usernames/tests/__init__.py
|
9ac1df71a9a7863cc83f6fffe550618bd1f21516
|
[] |
no_license
|
migsantos121/phd3-backend
|
3cd014908856c995de3c4473d82059bc9c1b5794
|
9d1d2bd6f55dc89719ce5a1916c5db3d573aec1e
|
refs/heads/master
| 2022-12-12T17:25:59.334509 | 2020-03-09T09:24:08 | 2020-03-09T09:24:08 | 245,991,086 | 0 | 0 | null | 2022-06-28T14:45:50 | 2020-03-09T09:17:18 |
Python
|
UTF-8
|
Python
| false | false | 304 |
py
|
# Endpoint Configuration
APP_NAME = "ib_users"
OPERATION_NAME = "get_user_details_from_usernames"
REQUEST_METHOD = "post"
URL_SUFFIX = "users/usernames/"
from .test_case_01 import TestCase01GetUserDetailsFromUsernamesAPITestCase
__all__ = [
"TestCase01GetUserDetailsFromUsernamesAPITestCase"
]
|
[
"[email protected]"
] | |
192579fc38cc68f8e715b2436b91946626f05247
|
747f759311d404af31c0f80029e88098193f6269
|
/addons/training_room/training_room.py
|
cc3e095607ac84d12dfecb24873a6a8013f954a7
|
[] |
no_license
|
sgeerish/sirr_production
|
9b0d0f7804a928c0c582ddb4ccb7fcc084469a18
|
1081f3a5ff8864a31b2dcd89406fac076a908e78
|
refs/heads/master
| 2020-05-19T07:21:37.047958 | 2013-09-15T13:03:36 | 2013-09-15T13:03:36 | 9,648,444 | 0 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 68 |
py
|
/home/openerp/production/extra-addons/training_room/training_room.py
|
[
"[email protected]"
] | |
8f3a268d0ec0eaaf90cc0c89f0fe0766ee974c5a
|
d05a59feee839a4af352b7ed2fd6cf10a288a3cb
|
/xlsxwriter/test/utility/test_xl_col_to_name.py
|
2825d4b5c9fe0888c7aa52ba32d837d01dc4e18d
|
[
"BSD-2-Clause-Views"
] |
permissive
|
elessarelfstone/XlsxWriter
|
0d958afd593643f990373bd4d8a32bafc0966534
|
bb7b7881c7a93c89d6eaac25f12dda08d58d3046
|
refs/heads/master
| 2020-09-24T06:17:20.840848 | 2019-11-24T23:43:01 | 2019-11-24T23:43:01 | 225,685,272 | 1 | 0 |
NOASSERTION
| 2019-12-03T18:09:06 | 2019-12-03T18:09:05 | null |
UTF-8
|
Python
| false | false | 1,515 |
py
|
###############################################################################
#
# Tests for XlsxWriter.
#
# Copyright (c), 2013-2019, John McNamara, [email protected]
#
import unittest
import warnings
from ...utility import xl_col_to_name
class TestUtility(unittest.TestCase):
"""
Test xl_col_to_name() utility function.
"""
def test_xl_col_to_name(self):
"""Test xl_col_to_name()"""
tests = [
# col, col string
(0, 'A'),
(1, 'B'),
(2, 'C'),
(9, 'J'),
(24, 'Y'),
(25, 'Z'),
(26, 'AA'),
(254, 'IU'),
(255, 'IV'),
(256, 'IW'),
(16383, 'XFD'),
(16384, 'XFE'),
(-1, None),
]
for col, string in tests:
exp = string
got = xl_col_to_name(col)
# Ignore the warnings for negative values.
warnings.filterwarnings('ignore')
self.assertEqual(got, exp)
def test_xl_col_to_name_abs(self):
"""Test xl_col_to_name() with absolute references"""
tests = [
# col, col_abs, col string
(0, True, '$A'),
(-1, True, None),
]
for col, col_abs, string in tests:
exp = string
got = xl_col_to_name(col, col_abs)
# Ignore the warnings for negative values.
warnings.filterwarnings('ignore')
self.assertEqual(got, exp)
|
[
"[email protected]"
] | |
acfe1e5fdabe708e131174f39c9bc40f741babb0
|
9743d5fd24822f79c156ad112229e25adb9ed6f6
|
/xai/brain/wordbase/verbs/_maltreating.py
|
6f0f0ae372087c9a1569c183fc0592e2807f290b
|
[
"MIT"
] |
permissive
|
cash2one/xai
|
de7adad1758f50dd6786bf0111e71a903f039b64
|
e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6
|
refs/heads/master
| 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 256 |
py
|
from xai.brain.wordbase.verbs._maltreat import _MALTREAT
#calss header
class _MALTREATING(_MALTREAT, ):
def __init__(self,):
_MALTREAT.__init__(self)
self.name = "MALTREATING"
self.specie = 'verbs'
self.basic = "maltreat"
self.jsondata = {}
|
[
"[email protected]"
] | |
26b7a7b1e9f6bb8b0d7ab6d73e12dc2bffdcb7e7
|
d3efc82dfa61fb82e47c82d52c838b38b076084c
|
/run/bulk_order/insert_canceled_SjSzNHG-2.py
|
c25c05a95105d64ba05e733196db6e86eccf3877
|
[] |
no_license
|
nantongzyg/xtp_test
|
58ce9f328f62a3ea5904e6ed907a169ef2df9258
|
ca9ab5cee03d7a2f457a95fb0f4762013caa5f9f
|
refs/heads/master
| 2022-11-30T08:57:45.345460 | 2020-07-30T01:43:30 | 2020-07-30T01:43:30 | 280,388,441 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 4,214 |
py
|
#!/usr/bin/python
# -*- encoding: utf-8 -*-
import sys
import json
import random
import time
sys.path.append("/home/yhl2/workspace/xtp_test/xtp/api")
from xtp_test_case import *
sys.path.append("/home/yhl2/workspace/xtp_test/mysql")
from QueryStkpriceDBNHG import QueryStkSz
from QueryStkpriceDBNHG import QueryStkSh
# 总下单数
order_count = 50
# 下单计数
count = 0
# 存放撤单用的原xtpid
xtpids = []
stk_sz = QueryStkSz()
stk_info_sz = [ele for ele in stk_sz][0:10]
stk_sh = QueryStkSh()
stk_info_sh = [ele for ele in stk_sh][0:10]
class Order(xtp_test_case):
def test_order(self):
# insert_all_traded()
insert_all_canceled()
#insert_all_random()
# 全成
def insert_all_traded():
global count
while count < order_count:
for i in xrange(len(stk_info_sz)):
all_traded_common(stk_info_sz, i, 2)
for i in xrange(len(stk_info_sh)):
all_traded_common(stk_info_sh, i, 1)
def all_traded_common(stk_list, index, market):
global count
market = Api.const.XTP_MARKET_TYPE['XTP_MKT_SH_A'] if market == 1 else \
Api.const.XTP_MARKET_TYPE['XTP_MKT_SZ_A'],
if count == order_count:
return
count += 1
wt_reqs = {
'business_type': Api.const.XTP_BUSINESS_TYPE[
'XTP_BUSINESS_TYPE_CASH'],
'order_client_id': 2,
'market': Api.const.XTP_MARKET_TYPE['XTP_MKT_SZ_A'],
'ticker': stk_list[index][0],
'side': Api.const.XTP_SIDE_TYPE['XTP_SIDE_BUY'],
'price_type': Api.const.XTP_PRICE_TYPE['XTP_PRICE_LIMIT'],
'price': stk_list[index][1] / 10000.0,
'quantity': 200
}
Api.trade.InsertOrder(wt_reqs)
# 撤单
def insert_all_canceled():
count = 0
while count < order_count:
for i in xrange(len(stk_info_sz)):
print 'sz------'
print stk_info_sz, i
all_canceled_common(stk_info_sz, i, 2)
for i in xrange(len(stk_info_sh)):
print 'sh------'
all_canceled_common(stk_info_sh, i, 1)
count += 1
for xtpid in xtpids:
print xtpid
Api.trade.CancelOrder(xtpid)
def all_canceled_common(stk_list, index, market):
global xtpids
market = Api.const.XTP_MARKET_TYPE['XTP_MKT_SH_A'] if market == 1 else \
Api.const.XTP_MARKET_TYPE['XTP_MKT_SZ_A'],
wt_reqs = {
'business_type': Api.const.XTP_BUSINESS_TYPE[
'XTP_BUSINESS_TYPE_CASH'],
'order_client_id': 1,
'market': Api.const.XTP_MARKET_TYPE['XTP_MKT_SZ_A'],
'ticker': stk_list[index][0],
'side': Api.const.XTP_SIDE_TYPE['XTP_SIDE_BUY'],
'price_type': Api.const.XTP_PRICE_TYPE['XTP_PRICE_BEST_OR_CANCEL'],
'price': stk_list[index][1] / 10000.0,
'quantity': 200
}
xtpid = Api.trade.InsertOrder(wt_reqs)
xtpids.append(xtpid)
# 各种成交类型轮旬下单
def insert_all_random():
trade_typ_sz = [1, 2, 2, 3, 5] # 1-订单确认 2-全成 3-部成 5-废单
trade_typ_sh = [1, 2, 2, 3, 4] # 1-订单确认 2-全成 3-部成 4-废单
while count < order_count:
for i in xrange(len(stk_info_sz)):
all_random_common(stk_info_sz, i, 2, trade_typ_sz)
for i in xrange(len(stk_info_sh)):
all_random_common(stk_info_sh, i, 1, trade_typ_sh)
def all_random_common(stk_list, index, market, trade_type):
global count
market = Api.const.XTP_MARKET_TYPE['XTP_MKT_SH_A'] if market == 1 else \
Api.const.XTP_MARKET_TYPE['XTP_MKT_SZ_A'],
if count == order_count:
return
count += 1
wt_reqs = {
'business_type': Api.const.XTP_BUSINESS_TYPE[
'XTP_BUSINESS_TYPE_CASH'],
'order_client_id': trade_type[index % len(trade_type)],
'market': Api.const.XTP_MARKET_TYPE['XTP_MKT_SZ_A'],
'ticker': stk_list[index][0],
'side': Api.const.XTP_SIDE_TYPE['XTP_SIDE_BUY'],
'price_type': Api.const.XTP_PRICE_TYPE['XTP_PRICE_BEST_OR_CANCEL'],
'price': stk_list[index][1] / 10000.0,
'quantity': 200
}
Api.trade.InsertOrder(wt_reqs)
# time.sleep(0.01)
if __name__ == '__main__':
unittest.main()
|
[
"[email protected]"
] | |
154921eb4442071015372678277580623417e7dc
|
7aefdc2a315f682ff03103b9364103fe88eae76a
|
/harmony/settings/aws.py
|
6143b4c0647139e87abe907a5c45814a4906580e
|
[] |
no_license
|
johnfelipe/HarmonyLab
|
032913d60a22909db745ea0b3bc09ab49ce81368
|
8a679ac611a18f08ecae6af58cf23dc32cdac7a1
|
refs/heads/master
| 2020-12-11T02:02:48.100012 | 2014-03-13T20:26:37 | 2014-03-13T20:26:37 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 75 |
py
|
# Amazon Web Services (AWS) settings
from harmony.settings.common import *
|
[
"[email protected]"
] | |
572247a7fae95f6c0a8df09df833154268dba90e
|
5995eb33f64467273db3b63beaf70e648a053fd0
|
/Bai6.py
|
b60a67dfbd34d8e2fb2b5de0a546a87a43de4fc7
|
[] |
no_license
|
lbbquoc/Python-Lab
|
c0068a1c81590850fb2893c01685ee6ea07db9c5
|
8b227ebc1e38c0080abef9284f07717d338e3015
|
refs/heads/master
| 2022-04-17T11:25:19.011062 | 2020-04-21T02:20:16 | 2020-04-21T02:20:16 | 257,454,800 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,596 |
py
|
import math
import random
class Point2D:
def __init__(self,x,y):
self.x = x
self.y = y
def CalDistance(x1,y1,x2,y2):
diemA = Point2D(x1,y1)
diemB = Point2D(x2,y2)
return round(math.sqrt(pow(diemB.x - diemA.x,2) + pow(diemB.y - diemA.y, 2)),2)
# round dùng để làm tròn cú pháp : round(<số>, <làm tròn đến chữ số hàng ?> )
def createTriangle(x1,y1,x2,y2,x3,y3):
AB = CalDistance(x1,y1,x2,y2)
AC = CalDistance(x1,y1,x3,y3)
BC = CalDistance(x2,y2,x3,y3)
print(AB,AC,BC)
if (AB + BC > AC or AB + AC > BC or BC + AC >AB ):
return True
else:
return False
def calCircumference(a,b,c):
return a+b+c
def calArea(a,b,c):
p = calCircumference(a,b,c) / 2
return math.sqrt(p*(p-a)*(p-b)*(p-c))
def whatIsTriangle(x1,y1,x2,y2,x3,y3):
if createTriangle(x1,y1,x2,y2,x3,y3):
AB = CalDistance(x1,y1,x2,y2)
AC = CalDistance(x1,y1,x3,y3)
BC = CalDistance(x2,y2,x3,y3)
if (AB == AC == BC):
print("It is equilateral triangle\n")
print("Circumference is : ",calCircumference(AB,BC,AC))
print("Area is:", calArea(AB,BC,AC))
return
if ((AB == AC and AB != BC) or (AB == BC and AB != AC) or (BC == AC and BC != AB)):
print("It is isosceles triangle\n")
print("Circumference is : ",calCircumference(AB,BC,AC))
print("Area is:", calArea(AB,BC,AC))
return
if (AB**2 + BC**2 == AC**2) or (AB**2 + AC**2 == BC**2) or (BC**2 + AC**2 == AB**2):
print("It is right triangle\n")
print("Circumference is : ",calCircumference(AB,BC,AC))
print("Area is:", calArea(AB,BC,AC))
return
else:
print("It's normal triangle !")
print("Circumference is : ",calCircumference(AB,BC,AC))
print("Area is:", calArea(AB,BC,AC))
return
else:
return
def GiaiPhuongTrinh2An(x1,y1,x2,y2):
# phương trinh y = Ax + B
list = []
D = x1*1 - 1*x2
Dx = y1*1 - 1*y2
Dy = x1*y2 - y1*x2
print(D,Dx,Dy)
print()
if (D == 0 and Dx == 0):
print("He phuong trinh vo so nghiem !")
return
if (D == 0 and Dx != 0):
print("He phuong trinh vo nghiem !")
return
if (D != 0):
print("Phuong trinh co nghiem: ")
n1 = Dx / D
n2 = Dy / D
print(n1,n2)
list.insert(0,n1)
list.insert(0,n2)
list.reverse()
return list
#list = GiaiPhuongTrinh2An(1,2,4,8)
def vtTuongDoiCua2DT(x1,y1,x2,y2,x3,y3,x4,y4):
listAB = GiaiPhuongTrinh2An(x1,y1,x2,y2)
listCD = GiaiPhuongTrinh2An(x3,y3,x4,y4)
if((listCD[0]/listAB[0] == -1/-1) and (listCD[1]/listAB[1] == -1/-1) ):
print("AB trung CD")
if (listCD[0]/listAB[0] == -1/-1) and (listCD[1]/listAB[1] != -1/-1) :
print("AB // CD")
if listCD[0]/listAB[0] != -1/-1 :
print("AB cat CD")
# lý do b2/b1 luôn bằng -1/-1 là do : mình dùng phương trình y = ax +b (mình chuyển vế y về bên tay phải sẽ có được b luôn bằng -1)
# x1 = round(random.randint(1,100),0)
# x2 = random.randint(1,100)
# x3 = random.randint(1,100)
# y1 = random.randint(1,100)
# y2 = random.randint(1,100)
# y3 = random.randint(1,100)
# x4 = random.randint(1,100)
# y4 = random.randint(1,100)
# print(x1)
# print(createTriangle(x1,y1,x2,y2,x3,y3))
# whatIsTriangle(x1,y1,x2,y2,x3,y3)
# vtTuongDoiCua2DT(x1,y1,x2,y2,x3,y3,x4,y4)
|
[
"="
] |
=
|
de14d2a3fe9fafb2f89c91e98c86b209c7ff70be
|
cbe264842df4eae3569b28ed4aae9489014ed23c
|
/RedBook/ch2/my_httpserver.py
|
5b49fd27d00328f1582ef2d9d776d3770f7b1a2f
|
[
"MIT"
] |
permissive
|
zeroam/TIL
|
31e176c2f4c3e1ef72b1155353690cc2f7160f96
|
43e3573be44c7f7aa4600ff8a34e99a65cbdc5d1
|
refs/heads/master
| 2021-07-23T01:43:34.135033 | 2021-07-10T06:47:17 | 2021-07-10T06:47:17 | 167,952,375 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 479 |
py
|
from http.server import HTTPServer, BaseHTTPRequestHandler
class MyHandler(BaseHTTPRequestHandler):
def do_GET(self):
self.send_response_only(200, 'OK')
self.send_header('Content-Type', 'text/plain')
self.end_headers()
self.wfile.write(b"Hello World")
if __name__ == "__main__":
server = HTTPServer(('', 8888), MyHandler)
print("Started WebServer on port 8888...")
print("Press ^C to quit WebServer.")
server.serve_forever()
|
[
"[email protected]"
] | |
d28335641d8721cac96e45ba527c31bd369024bc
|
c7d6fbccc4e72a2fb3ab9f5dc31bb74e6c86fc1e
|
/bw2regional/xtables.py
|
8a814fe1a3f3211224e8ac45a81cb62d52480171
|
[] |
permissive
|
brightway-lca/brightway2-regional
|
5c97f5fd1284254e53ef104c12ce2395886f439c
|
d11bea7555b915348aff6432de6afe9034271256
|
refs/heads/main
| 2023-06-09T17:27:01.732840 | 2023-04-28T13:49:29 | 2023-04-28T13:49:29 | 246,269,473 | 2 | 2 |
BSD-3-Clause
| 2023-05-24T14:51:24 | 2020-03-10T10:18:53 |
Python
|
UTF-8
|
Python
| false | false | 1,601 |
py
|
from .loading import Loading
from .meta import extension_tables, geocollections
from .validate import xtable_validator
class ExtensionTable(Loading):
_metadata = extension_tables
validator = xtable_validator
matrix = "xtable_matrix"
@property
def filename(self):
return super(ExtensionTable, self).filename.replace(".loading", ".xtable")
def write_to_map(self, *args, **kwargs):
raise NotImplementedError
def import_from_map(self, mask=None):
from .utils import get_pandarus_map
geocollection = extension_tables[self.name].get("geocollection")
xt_field = extension_tables[self.name].get("xt_field")
if not geocollection:
raise ValueError("No geocollection for this extension table")
if geocollections[geocollection].get('kind') == 'raster':
raise ValueError("This function is only for vectors.")
map_obj = get_pandarus_map(geocollection)
data = []
if xt_field is None:
raise ValueError("No `xt_field` field name specified")
id_field = geocollections[geocollection].get("field")
if not id_field:
raise ValueError(
"Geocollection must specify ``field`` field name for unique feature ids"
)
for feature in map_obj:
label = feature["properties"][id_field]
value = float(feature["properties"][xt_field])
if mask is not None and value == mask:
continue
data.append((value, (geocollection, label)))
self.write(data)
|
[
"[email protected]"
] | |
d06ab9b80ef1af8c850a34a8d5715c716e3849ee
|
a9f767c9abe9ef645b505ec33661b815e8021432
|
/kaybee/plugins/widgets/handlers.py
|
6d901d3bdb92ff21877ac86629873051028df02e
|
[
"Apache-2.0"
] |
permissive
|
pauleveritt/kaybee
|
bcd402a1f28e3e37f42217d9550c0981a494bfe4
|
a00a718aaaa23b2d12db30dfacb6b2b6ec84459c
|
refs/heads/master
| 2022-06-18T04:58:52.286306 | 2018-08-21T13:52:10 | 2018-08-21T13:52:10 | 115,625,247 | 1 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,484 |
py
|
import inspect
import os
from typing import List
from docutils import nodes
from docutils.readers import doctree
from sphinx.application import Sphinx
from sphinx.builders.html import StandaloneHTMLBuilder
from sphinx.environment import BuildEnvironment
from sphinx.jinja2glue import SphinxFileSystemLoader
from kaybee.app import kb
from kaybee.plugins.events import SphinxEvent
from kaybee.plugins.widgets.action import WidgetAction
from kaybee.plugins.widgets.node import widget
from kaybee.plugins.widgets.directive import WidgetDirective
@kb.event(SphinxEvent.BI, scope='widgets')
def add_widget_node(kb_app: kb, sphinx_app: Sphinx):
sphinx_app.add_node(widget)
@kb.event(SphinxEvent.EBRD, scope='widgets', system_order=40)
def initialize_widgets_container(kb_app: kb,
sphinx_app: Sphinx,
sphinx_env: BuildEnvironment,
docnames=List[str],
):
if not hasattr(sphinx_app.env, 'widgets'):
sphinx_app.env.widgets = dict()
@kb.event(SphinxEvent.EBRD, scope='widgets', system_order=50)
def register_template_directory(kb_app: kb,
sphinx_app: Sphinx,
sphinx_env: BuildEnvironment,
docnames=List[str],
):
""" Add this widget's templates dir to template paths """
template_bridge = sphinx_app.builder.templates
actions = WidgetAction.get_callbacks(kb_app)
for action in actions:
f = os.path.dirname(inspect.getfile(action))
template_bridge.loaders.append(SphinxFileSystemLoader(f))
@kb.event(SphinxEvent.EBRD, scope='widgets', system_order=60)
def register_widget_directive(kb_app: kb,
sphinx_app: Sphinx,
sphinx_env: BuildEnvironment,
docnames=List[str],
):
# Register a directive
for k, v in list(kb_app.config.widgets.items()):
sphinx_app.add_directive(k, WidgetDirective)
@kb.event(SphinxEvent.DRES, scope='widgets')
def render_widgets(kb_app: kb,
sphinx_app: Sphinx,
doctree: doctree,
fromdocname: str,
):
""" Go through docs and replace widget directive with rendering """
builder: StandaloneHTMLBuilder = sphinx_app.builder
for node in doctree.traverse(widget):
# Render the output
w = sphinx_app.env.widgets.get(node.name)
context = builder.globalcontext.copy()
# Add in certain globals
context['resources'] = sphinx_app.env.resources
context['references'] = sphinx_app.env.references
output = w.render(sphinx_app, context)
# Put the output into the node contents
listing = [nodes.raw('', output, format='html')]
node.replace_self(listing)
@kb.dumper('widgets')
def dump_settings(kb_app: kb, sphinx_env: BuildEnvironment):
# First get the kb app configuration for widgets
config = {
k: v.__module__ + '.' + v.__name__
for (k, v) in kb_app.config.widgets.items()
}
# Next, get the actual widgets in the app.widgets DB
widgets = sphinx_env.widgets
values = {k: v.__json__() for (k, v) in widgets.items()}
widgets = dict(
config=config,
values=values
)
return dict(widgets=widgets)
|
[
"[email protected]"
] | |
db82a4ebbfaf27c4005b0b52244b7e7b6cef93d7
|
e70a17e8a37847a961f19b136f3bbe74393fa2af
|
/RPI/build/image_view/catkin_generated/pkg.develspace.context.pc.py
|
9059bfd521c98ccd13c2659ef88db64293193e9a
|
[
"MIT"
] |
permissive
|
Mondiegus/ROS-4x4-CAR-AI
|
1413ead6f46a8b16005abeea3e0b215caa45f27e
|
124efe39168ce96eec13d57e644f4ddb6dfe2364
|
refs/heads/Master
| 2023-07-14T23:56:53.519082 | 2021-03-27T17:28:45 | 2021-03-27T17:28:45 | 334,233,839 | 0 | 0 |
MIT
| 2021-02-02T13:00:30 | 2021-01-29T18:46:16 |
Makefile
|
UTF-8
|
Python
| false | false | 512 |
py
|
# generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "/home/pi/catkin_ws/devel/.private/image_view/include".split(';') if "/home/pi/catkin_ws/devel/.private/image_view/include" != "" else []
PROJECT_CATKIN_DEPENDS = "dynamic_reconfigure".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "".split(';') if "" != "" else []
PROJECT_NAME = "image_view"
PROJECT_SPACE_DIR = "/home/pi/catkin_ws/devel/.private/image_view"
PROJECT_VERSION = "1.15.3"
|
[
"[email protected]"
] | |
4f6ae9cc7f5fb40a62904cfab554ac8996d10568
|
f802c49ab73cadfab92253e2c5c7d2dd96180576
|
/mcq_v2/mcq_v2/forms.py
|
cd294af3dc24e988ee58ed79e830f8d0afeb25d4
|
[] |
no_license
|
anushiv25/Coding-IDE
|
258ed2a0f1fa3ceba97981e543ce665bc39f280e
|
81cacbe350d426680a157a9eb617641eb92d3f28
|
refs/heads/master
| 2021-11-05T19:43:08.175946 | 2019-02-19T14:14:50 | 2019-02-19T14:14:50 | 170,346,050 | 0 | 1 | null | 2021-10-31T18:26:18 | 2019-02-12T15:54:30 |
CSS
|
UTF-8
|
Python
| false | false | 2,314 |
py
|
from django import forms
from django.contrib.auth import get_user_model
User=get_user_model()
CHOICES= [
('CS', 'Computer Science'),
('IT', 'Information Technology'),
('Civil', 'Civil'),
('Mechanical', 'Mechanical'),
('EC', 'Electronics & Communication'),
]
class signup_form(forms.Form):
name=forms.CharField(widget=forms.TextInput(attrs={"class":"form-control","id":"defaultForm-name"}))
branch=forms.CharField(widget=forms.Select(choices=CHOICES,attrs={"class":"multiple","id":"defaultForm-college"}))
#branch=forms.CharField(widget=forms.TextInput(attrs={"class":"form-control","id":"defaultForm-branch"}))
year=forms.DecimalField(max_value=3,min_value=1,widget=forms.NumberInput(attrs={"class":"form-control","id":"defaultForm-year"}))
college=forms.CharField(widget=forms.TextInput(attrs={"class":"form-control","id":"defaultForm-college"}))
email=forms.CharField(widget=forms.TextInput(attrs={"class":"form-control","id":"defaultForm-email"}))
password=forms.CharField(min_length=8,widget=forms.PasswordInput(attrs={"class":"form-control","id":"defaultForm-password"}))
confirm_password=forms.CharField(widget=forms.PasswordInput(attrs={"class":"form-control","id":"defaultForm-confirm_pass"}))
def clean_email(self):
email=self.cleaned_data.get("email")
qs=User.objects.filter(username=email)
if qs.exists():
raise forms.ValidationError("Email Taken !")
elif ("@" not in email) or (".com" not in email):
raise forms.ValidationError("Please Enter a Valid Email !")
return email
# def clean(self):
# data=self.cleaned_data
# password=self.cleaned_data.get("password")
# password1=self.cleaned_data.get("confirm_password")
# if password1 != password:
# raise forms.ValidationError("Password must Match !")
# return data
def clean_confirm_password(self):
data=self.cleaned_data
print(data)
password=self.cleaned_data.get('password')
password1=self.cleaned_data.get('confirm_password')
if password1!=password:
raise forms.ValidationError("Password must Match !!")
return data
class login_form(forms.Form):
email=forms.CharField(widget=forms.TextInput(attrs={"class":"form-control","id":"defaultForm-email"}))
password=forms.CharField(widget=forms.PasswordInput(attrs={"class":"form-control","id":"defaultForm-pass"}))
|
[
"[email protected]"
] | |
7953dfc4ee37a3ff7e911c16aee6627b68443f3b
|
ff734af8ae77fd1e0af7ebee85ec7321b50cbdea
|
/challenges/repeated_word/repeated_word.py
|
9e56bac66e9f3d2f73da71faebc510050dcaa536
|
[] |
no_license
|
LeoKuhorev/data-structures-and-algorithms
|
005c0ae4a84762437b966d284fb28d3cf5a17b17
|
59d9d9ccc35ef7e475aeea820f1800db8bf42807
|
refs/heads/master
| 2023-01-20T12:46:03.141941 | 2022-05-16T21:44:08 | 2022-05-16T21:44:08 | 215,949,237 | 0 | 0 | null | 2023-01-07T15:28:59 | 2019-10-18T05:42:41 |
JavaScript
|
UTF-8
|
Python
| false | false | 658 |
py
|
def repeated_word(text: str) -> str:
"""Return first repeated word in the given text. If all words are unique returns None
Args:
text (str): Given text
Raises:
TypeError: If the passed in text is not a string
Returns:
str: First repeated word
"""
if not type(text) is str:
raise TypeError('text must be a string!')
text_lst = text.split(' ')
existing_words = set()
for word in text_lst:
word = ''.join(char for char in word if char. isalnum()).lower()
if word in existing_words:
return word
else:
existing_words.add(word)
return None
|
[
"[email protected]"
] | |
59c6e82f2a13acca52e2f000798ad80f03cfdc58
|
b33d1d4b74d375a2050baf80cda5b8571aff7462
|
/s14/day02/集合.py
|
2fee61ee0195463eae45991ab61b369b0541380b
|
[] |
no_license
|
sunwang33/code
|
e979e1b11209200fba07a99d926d76f09c83b514
|
377f3e919555bf0f02ef56c9395d57992c84fcfd
|
refs/heads/master
| 2021-01-16T18:10:08.358744 | 2018-01-01T02:58:43 | 2018-01-01T02:58:43 | 100,045,002 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 946 |
py
|
__author__ = "sun wang"
list_1 = [1,4,5,7,3,6,7,9]
list_1 = set(list_1)
print(list_1 , type(list_1))
list_2 = set([2,6,0,66,22,4])
print(list_1 , list_2 )
print(list_1.intersection(list_2))
print(list_1.union(list_2))
print(list_1.difference(list_2))
print(list_2.difference(list_1))
print(list_1.issubset(list_2))
print(list_1.issuperset(list_2))
list_3 = set([1,3,7])
print(list_3.issubset(list_1))
print (list_1.symmetric_difference(list_2))
name_1="分割线"
print(name_1.center(50,'-'))
list_4 = set([5,6,8])
print(list_3.isdisjoint(list_4))
#求交集
print (list_1 & list_2)
print (list_1 | list_2 )
print (list_1 - list_2 )
print (list_1 ^ list_2)
list_1.add(999)
print (list_1)
list_1.update([10,37,42])
print(list_1)
list_1.remove(42)
print(list_1)
list_1.add(42)
print(list_1)
print(len(list_1))
if 42 in list_1:
print ("42 in list_1")
print(list_1.pop())
print(list_1)
list_1.discard(888)
list_1.discard(3)
print(list_1)
|
[
"[email protected]"
] | |
605c5ee633eb838f3f770819ac8122cce8f0f6d6
|
078918048099dfa2454cfac2d449ea3d77fbec55
|
/1392-longest-happy-prefix.py
|
c6fd8935b35347c62acda8c328e994b31284a44b
|
[] |
no_license
|
DmitryVlaznev/leetcode
|
931784dcc4b465eebda7d22311f5bf5fa879f068
|
b2a2afdfc725330545c9a2869fefc7d45ec594bc
|
refs/heads/master
| 2023-06-10T05:42:34.992220 | 2023-06-05T09:54:10 | 2023-06-05T09:54:30 | 241,064,389 | 2 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,419 |
py
|
# 1392. Longest Happy Prefix
# A string is called a happy prefix if is a non-empty prefix which is
# also a suffix (excluding itself).
# Given a string s. Return the longest happy prefix of s .
# Return an empty string if no such prefix exists.
# Example 1:
# Input: s = "level"
# Output: "l"
# Explanation: s contains 4 prefix excluding itself ("l", "le", "lev",
# "leve"), and suffix ("l", "el", "vel", "evel"). The largest prefix
# which is also suffix is given by "l".
# Example 2:
# Input: s = "ababab"
# Output: "abab"
# Explanation: "abab" is the largest prefix which is also suffix. They
# can overlap in the original string.
# Example 3:
# Input: s = "leetcodeleet"
# Output: "leet"
# Example 4:
# Input: s = "a"
# Output: ""
# Constraints:
# 1 <= s.length <= 10^5
# s contains only lowercase English letters.
class Solution:
def longestPrefix(self, s: str) -> str:
pf = [0] * len(s)
for i in range(1, len(s)):
j = pf[i - 1]
while j > 0 and s[i] != s[j]: j = pf[j - 1]
if s[i] == s[j]: j+=1
pf[i] = j
return s[:pf[-1]]
def log(correct, res):
if correct == res:
print("[v]", res)
else:
print(">>> INCORRECT >>>", correct, " | ", res)
t = Solution()
log("l", t.longestPrefix("level"))
log("abab", t.longestPrefix("ababab"))
log("leet", t.longestPrefix("leetcodeleet"))
log("", t.longestPrefix("a"))
|
[
"[email protected]"
] | |
36fa4966ad339ed2e49810b3390600b44cf16427
|
c9fe05f893deff75232aabca4e877c144972249a
|
/arcpyenv/arcgispro-py3-clone/Lib/site-packages/arcgis/features/_analysis.py
|
67fb3985552a133f2d50b0570a1f0c58b80be5a8
|
[
"Python-2.0"
] |
permissive
|
SherbazHashmi/HackathonServer
|
4d1dc7f0122a701a0f3a17787d32efe83bc67601
|
a874fe7e5c95196e4de68db2da0e2a05eb70e5d8
|
refs/heads/master
| 2022-12-26T06:46:33.893749 | 2019-11-03T10:49:47 | 2019-11-03T10:49:47 | 218,912,149 | 3 | 3 | null | 2022-12-11T11:52:37 | 2019-11-01T04:16:38 |
Python
|
UTF-8
|
Python
| false | false | 85,268 |
py
|
import logging as _logging
import arcgis
from datetime import datetime
from arcgis.features import FeatureSet
from arcgis.mapping import MapImageLayer
from arcgis.geoprocessing import DataFile, LinearUnit, RasterData
from arcgis.geoprocessing._support import _execute_gp_tool
_log = _logging.getLogger(__name__)
_use_async = True
def aggregate_points(point_layer: str = None,
polygon_layer: str = None,
keep_boundaries_with_no_points: bool = True,
summary_fields: str = """[]""",
group_by_field: str = None,
minority_majority: bool = False,
percent_points: bool = False,
output_name: str = None,
context: str = None,
gis=None) -> tuple:
"""
Aggregate points task allows you to aggregate or count the total number of points that are distributed within specified areas or boundaries (polygons). You can also summarize Sum, Mean, Min, Max and Standard deviation calculations for attributes of the point layer to understand the general characteristics of aggregated points.
Parameters:
point_layer: pointLayer (str). Required parameter. Point layer to be aggregated
polygon_layer: polygonLayer (str). Required parameter. Polygon layer to which the points should be aggregated.
keep_boundaries_with_no_points: keepBoundariesWithNoPoints (bool). Optional parameter. Specify whether the polygons without any points should be returned in the output.
summary_fields: summaryFields (str). Optional parameter. A list of field names and summary type. Example [ fieldName1 summaryType1,fieldName2 summaryType2].
group_by_field: groupByField (str). Optional parameter. A field name from PointLayer based on which the points will be grouped.
minority_majority: minorityMajority (bool). Optional parameter. This boolean parameter is applicable only when a groupByField is specified. If true, the minority (least dominant) or the majority (most dominant) attribute values within each group, within each boundary will be calculated.
percent_points: percentPoints (bool). Optional parameter. This boolean parameter is applicable only when a groupByField is specified. If set to true, the percentage count of points for each unique groupByField value is calculated.
output_name: outputName (str). Optional parameter. Additional properties such as output feature service name.
context: context (str). Optional parameter. Additional settings such as processing extent and output spatial reference.
gis: Optional, the GIS on which this tool runs. If not specified, the active GIS is used.
Returns the following as a named tuple:
aggregated_layer - aggregatedLayer as a str
group_summary - groupSummary as a str
See http://analysis6.arcgis.com:80/arcgis/rest/directories/arcgisoutput/tasks_GPServer/tasks/AggregatePoints.htm for additional help.
"""
kwargs = locals()
param_db = {
"point_layer": (str, "pointLayer"),
"polygon_layer": (str, "polygonLayer"),
"keep_boundaries_with_no_points": (bool, "keepBoundariesWithNoPoints"),
"summary_fields": (str, "summaryFields"),
"group_by_field": (str, "groupByField"),
"minority_majority": (bool, "minorityMajority"),
"percent_points": (bool, "percentPoints"),
"output_name": (str, "outputName"),
"context": (str, "context"),
"aggregated_layer": (str, "aggregatedLayer"),
"group_summary": (str, "groupSummary"),
}
return_values = [
{"name": "aggregated_layer", "display_name": "aggregatedLayer", "type": str},
{"name": "group_summary", "display_name": "groupSummary", "type": str},
]
if gis is None:
gis = arcgis.env.active_gis
url = gis.properties.helperServices.analysis.url
return _execute_gp_tool(gis, "AggregatePoints", kwargs, param_db, return_values, _use_async, url)
def find_hot_spots(analysis_layer: str = None,
analysis_field: str = None,
divided_by_field: str = None,
bounding_polygon_layer: str = None,
aggregation_polygon_layer: str = None,
output_name: str = None,
context: str = None,
gis=None,
shape_type: str = None) -> tuple:
"""
The Find Hot Spots task finds statistically significant clusters of incident points, weighted points, or weighted polygons. For incident data, the analysis field (weight) is obtained by aggregation. Output is a hot spot map.
Parameters:
analysis_layer: analysis layer (str). Required parameter. The point or polygon feature layer for which hot spots will be calculated.
analysis_field: analysis field (str). Optional parameter. The numeric field in the AnalysisLayer that will be analyzed.
divided_by_field: divided by field (str). Optional parameter.
bounding_polygon_layer: bounding polygon layer (str). Optional parameter. When the analysis layer is points and no AnalysisField is specified, you can provide polygons features that define where incidents could have occurred.
aggregation_polygon_layer: aggregation polygon layer (str). Optional parameter. When the AnalysisLayer contains points and no AnalysisField is specified, you can provide polygon features into which the points will be aggregated and analyzed, such as administrative units.
output_name: output name (str). Optional parameter. Additional properties such as output feature service name.
context: context (str). Optional parameter. Additional settings such as processing extent and output spatial reference.
gis: Optional, the GIS on which this tool runs. If not specified, the active GIS is used.
shape_type: shape type (str). Optional parameter. The shape of the polygon mesh the input features will be aggregated into.
Choice list:['hexagon', 'fishnet']
Returns the following as a named tuple:
hot_spots_result_layer - hotSpotsResultLayer as a str
process_info - processInfo as a str
See http://analysis6.arcgis.com:80/arcgis/rest/directories/arcgisoutput/tasks_GPServer/tasks/FindHotSpots.htm for additional help.
"""
kwargs = locals()
param_db = {
"analysis_layer": (str, "analysisLayer"),
"analysis_field": (str, "analysisField"),
"divided_by_field": (str, "dividedByField"),
"bounding_polygon_layer": (str, "boundingPolygonLayer"),
"aggregation_polygon_layer": (str, "aggregationPolygonLayer"),
"output_name": (str, "outputName"),
"context": (str, "context"),
"hot_spots_result_layer": (str, "hotSpotsResultLayer"),
"process_info": (str, "processInfo"),
"shape_type": (str, "shapeType"),
}
return_values = [
{"name": "hot_spots_result_layer", "display_name": "hotSpotsResultLayer", "type": str},
{"name": "process_info", "display_name": "processInfo", "type": str},
]
if gis is None:
gis = arcgis.env.active_gis
url = gis.properties.helperServices.analysis.url
return _execute_gp_tool(gis, "FindHotSpots", kwargs, param_db, return_values, _use_async, url)
def create_buffers(input_layer: str = None,
distances: str = """[]""",
field: str = None,
units: str = """Meters""",
dissolve_type: str = """None""",
ring_type: str = """Disks""",
side_type: str = """Full""",
end_type: str = """Round""",
output_name: str = None,
context: str = None,
gis=None) -> str:
"""
Creates buffer polygon(s) around input features.
Parameters:
input_layer: inputLayer (str). Required parameter. The input to be buffered.
distances: distances (str). Optional parameter. The distance(s) that will be buffered.
field: field (str). Optional parameter. Buffers will be created using field values.
units: units (str). Optional parameter. The linear unit to be used with the distance value(s).
Choice list:['Feet', 'Kilometers', 'Meters', 'Miles', 'NauticalMiles', 'Yards']
dissolve_type: dissolveType (str). Optional parameter. Specifies the dissolve to be performed to remove buffer overlap.
Choice list:['None', 'Dissolve', 'Split']
ring_type: ringType (str). Optional parameter. The ring type.
Choice list:['Disks', 'Rings']
side_type: sideType (str). Optional parameter. The side(s) of the input that will be buffered.
Choice list:['Full', 'Left', 'Right', 'Outside']
end_type: endType (str). Optional parameter. The shape of the buffer at the end of buffered line features.
Choice list:['Round', 'Flat']
output_name: outputName (str). Optional parameter. Additional properties such as output feature service name.
context: context (str). Optional parameter. Additional settings such as processing extent and output spatial reference.
gis: Optional, the GIS on which this tool runs. If not specified, the active GIS is used.
Returns:
buffer_layer - bufferLayer as a str
See http://analysis6.arcgis.com:80/arcgis/rest/directories/arcgisoutput/tasks_GPServer/tasks/CreateBuffers.htm for additional help.
"""
kwargs = locals()
param_db = {
"input_layer": (str, "inputLayer"),
"distances": (str, "distances"),
"field": (str, "field"),
"units": (str, "units"),
"dissolve_type": (str, "dissolveType"),
"ring_type": (str, "ringType"),
"side_type": (str, "sideType"),
"end_type": (str, "endType"),
"output_name": (str, "outputName"),
"context": (str, "context"),
"buffer_layer": (str, "bufferLayer"),
}
return_values = [
{"name": "buffer_layer", "display_name": "bufferLayer", "type": str},
]
if gis is None:
gis = arcgis.env.active_gis
url = gis.properties.helperServices.analysis.url
return _execute_gp_tool(gis, "CreateBuffers", kwargs, param_db, return_values, _use_async, url)
def create_drive_time_areas(input_layer: str = None,
break_values: str = """[5, 10, 15]""",
break_units: str = """Minutes""",
travel_mode: str = """Driving""",
overlap_policy: str = """Overlap""",
time_of_day: datetime = None,
time_zone_for_time_of_day: str = """GeoLocal""",
output_name: str = None,
context: str = None,
gis=None) -> str:
"""
Parameters:
input_layer: inputLayer (str). Required parameter.
break_values: breakValues (str). Optional parameter.
break_units: breakUnits (str). Optional parameter.
Choice list:['Minutes', 'Seconds', 'Hours', 'Miles', 'Kilometers', 'Meters', 'Feet', 'Yards']
travel_mode: travelMode (str). Optional parameter.
overlap_policy: overlapPolicy (str). Optional parameter.
Choice list:['Overlap', 'Dissolve', 'Split']
time_of_day: timeOfDay (datetime). Optional parameter.
time_zone_for_time_of_day: timeZoneForTimeOfDay (str). Optional parameter.
Choice list:['UTC', 'GeoLocal']
output_name: outputName (str). Optional parameter. Additional properties such as output feature service name.
context: context (str). Optional parameter. Additional settings such as processing extent and output spatial reference.
gis: Optional, the GIS on which this tool runs. If not specified, the active GIS is used.
Returns:
drive_time_areas_layer - driveTimeAreasLayer as a str
See http://analysis6.arcgis.com:80/arcgis/rest/directories/arcgisoutput/tasks_GPServer/tasks/CreateDriveTimeAreas.htm for additional help.
"""
kwargs = locals()
param_db = {
"input_layer": (str, "inputLayer"),
"break_values": (str, "breakValues"),
"break_units": (str, "breakUnits"),
"travel_mode": (str, "travelMode"),
"overlap_policy": (str, "overlapPolicy"),
"time_of_day": (datetime, "timeOfDay"),
"time_zone_for_time_of_day": (str, "timeZoneForTimeOfDay"),
"output_name": (str, "outputName"),
"context": (str, "context"),
"drive_time_areas_layer": (str, "driveTimeAreasLayer"),
}
return_values = [
{"name": "drive_time_areas_layer", "display_name": "driveTimeAreasLayer", "type": str},
]
if gis is None:
gis = arcgis.env.active_gis
url = gis.properties.helperServices.analysis.url
return _execute_gp_tool(gis, "CreateDriveTimeAreas", kwargs, param_db, return_values, _use_async, url)
def dissolve_boundaries(input_layer: str = None,
dissolve_fields: str = """[]""",
summary_fields: str = """[]""",
output_name: str = None,
context: str = None,
gis=None) -> str:
"""
Dissolve features based on specified fields.
Parameters:
input_layer: inputLayer (str). Required parameter. The layer containing polygon features that will be dissolved.
dissolve_fields: dissolveFields (str). Optional parameter. One or more fields from the input that control which polygons are merged. If no fields are supplied, all polygons that overlap or shared a common border will be dissolved into one polygon.
summary_fields: summaryFields (str). Optional parameter. A list of field names and statistical types that will be used to summarize the output. Supported statistics include: Sum, Mean, Min, Max, and Stddev.
output_name: outputName (str). Optional parameter. Additional properties such as output feature service name.
context: context (str). Optional parameter. Additional settings such as processing extent and output spatial reference.
gis: Optional, the GIS on which this tool runs. If not specified, the active GIS is used.
Returns:
dissolved_layer - dissolvedLayer as a str
See http://analysis6.arcgis.com:80/arcgis/rest/directories/arcgisoutput/tasks_GPServer/tasks/DissolveBoundaries.htm for additional help.
"""
kwargs = locals()
param_db = {
"input_layer": (str, "inputLayer"),
"dissolve_fields": (str, "dissolveFields"),
"summary_fields": (str, "summaryFields"),
"output_name": (str, "outputName"),
"context": (str, "context"),
"dissolved_layer": (str, "dissolvedLayer"),
}
return_values = [
{"name": "dissolved_layer", "display_name": "dissolvedLayer", "type": str},
]
if gis is None:
gis = arcgis.env.active_gis
url = gis.properties.helperServices.analysis.url
return _execute_gp_tool(gis, "DissolveBoundaries", kwargs, param_db, return_values, _use_async, url)
def merge_layers(input_layer: str = None,
merge_layer: str = None,
merging_attributes: str = """[]""",
output_name: str = None,
context: str = None,
gis=None) -> str:
"""
Combines two inputs of the same feature data type into a new output.
Parameters:
input_layer: inputLayer (str). Required parameter. The point, line, or polygon features to merge with the mergeLayer.
merge_layer: mergeLayer (str). Required parameter. The point, line or polygon features to merge with inputLayer. mergeLayer must contain the same feature type (point, line, or polygon) as the inputLayer.
merging_attributes: mergingAttributes (str). Optional parameter. An array of values that describe how fields from the mergeLayer are to be modified. By default all fields from both inputs will be carried across to the output.
output_name: outputName (str). Optional parameter. Additional properties such as output feature service name.
context: context (str). Optional parameter. Additional settings such as processing extent and output spatial reference.
gis: Optional, the GIS on which this tool runs. If not specified, the active GIS is used.
Returns:
merged_layer - mergedLayer as a str
See http://analysis6.arcgis.com:80/arcgis/rest/directories/arcgisoutput/tasks_GPServer/tasks/MergeLayers.htm for additional help.
"""
kwargs = locals()
param_db = {
"input_layer": (str, "inputLayer"),
"merge_layer": (str, "mergeLayer"),
"merging_attributes": (str, "mergingAttributes"),
"output_name": (str, "outputName"),
"context": (str, "context"),
"merged_layer": (str, "mergedLayer"),
}
return_values = [
{"name": "merged_layer", "display_name": "mergedLayer", "type": str},
]
if gis is None:
gis = arcgis.env.active_gis
url = gis.properties.helperServices.analysis.url
return _execute_gp_tool(gis, "MergeLayers", kwargs, param_db, return_values, _use_async, url)
def summarize_within(sum_within_layer: str = None,
summary_layer: str = None,
sum_shape: bool = True,
shape_units: str = None,
summary_fields: str = """[]""",
group_by_field: str = None,
minority_majority: bool = False,
percent_shape: bool = False,
output_name: str = None,
context: str = None,
gis=None) -> tuple:
"""
The SummarizeWithin task helps you to summarize and find statistics on the point, line, or polygon features (or portions of these features) that are within the boundaries of polygons in another layer. For example:Given a layer of watershed boundaries and a layer of land-use boundaries by land-use type, calculate total acreage of land-use type for each watershed.Given a layer of parcels in a county and a layer of city boundaries, summarize the average value of vacant parcels within each city boundary.Given a layer of counties and a layer of roads, summarize the total mileage of roads by road type within each county.
Parameters:
sum_within_layer: sumWithinLayer (str). Required parameter. A polygon feature layer or featurecollection. Features, or portions of features, in the summaryLayer (below) that fall within the boundaries of these polygons will be summarized.
summary_layer: summaryLayer (str). Required parameter. Point, line, or polygon features that will be summarized for each polygon in the sumWithinLayer.
sum_shape: sumShape (bool). Optional parameter. A boolean value that instructs the task to calculate count of points, length of lines or areas of polygons of the summaryLayer within each polygon in sumWithinLayer.
shape_units: shapeUnits (str). Optional parameter. Specify units to summarize the length or areas when sumShape is set to true. Units is not required to summarize points.
Choice list:['Acres', 'Hectares', 'SquareMeters', 'SquareKilometers', 'SquareFeet', 'SquareYards', 'SquareMiles', 'Meters', 'Kilometers', 'Feet', 'Yards', 'Miles']
summary_fields: summaryFields (str). Optional parameter. A list of field names and statistical summary type that you wish to calculate for all features in the summaryLayer that are within each polygon in the sumWithinLayer . Eg: ["fieldname1 summary", "fieldname2 summary"]
group_by_field: groupByField (str). Optional parameter. Specify a field from the summaryLayer features to calculate statistics separately for each unique attribute value.
minority_majority: minorityMajority (bool). Optional parameter. This boolean parameter is applicable only when a groupByField is specified. If true, the minority (least dominant) or the majority (most dominant) attribute values within each group, within each boundary will be calculated.
percent_shape: percentShape (bool). Optional parameter. This boolean parameter is applicable only when a groupByField is specified. If set to true, the percentage of shape (eg. length for lines) for each unique groupByField value is calculated.
output_name: outputName (str). Optional parameter. Additional properties such as output feature service name.
context: context (str). Optional parameter. Additional settings such as processing extent and output spatial reference.
gis: Optional, the GIS on which this tool runs. If not specified, the active GIS is used.
Returns the following as a named tuple:
result_layer - resultLayer as a str
group_by_summary - groupBySummary as a str
See http://analysis6.arcgis.com:80/arcgis/rest/directories/arcgisoutput/tasks_GPServer/tasks/SummarizeWithin.htm for additional help.
"""
kwargs = locals()
param_db = {
"sum_within_layer": (str, "sumWithinLayer"),
"summary_layer": (str, "summaryLayer"),
"sum_shape": (bool, "sumShape"),
"shape_units": (str, "shapeUnits"),
"summary_fields": (str, "summaryFields"),
"group_by_field": (str, "groupByField"),
"minority_majority": (bool, "minorityMajority"),
"percent_shape": (bool, "percentShape"),
"output_name": (str, "outputName"),
"context": (str, "context"),
"result_layer": (str, "resultLayer"),
"group_by_summary": (str, "groupBySummary"),
}
return_values = [
{"name": "result_layer", "display_name": "resultLayer", "type": str},
{"name": "group_by_summary", "display_name": "groupBySummary", "type": str},
]
if gis is None:
gis = arcgis.env.active_gis
url = gis.properties.helperServices.analysis.url
return _execute_gp_tool(gis, "SummarizeWithin", kwargs, param_db, return_values, _use_async, url)
def enrich_layer(input_layer: str = None,
data_collections: str = """[]""",
analysis_variables: str = """[]""",
country: str = None,
buffer_type: str = None,
distance: float = None,
units: str = None,
return_boundaries: bool = False,
output_name: str = None,
context: str = None,
gis=None) -> str:
"""
The Enrich Layer task enriches your data by getting facts about the people, places, and businesses that surround your data locations. For example: What kind of people live here? What do people like to do in this area? What are their habits and lifestyles? What kind of businesses are there in this area?The result will be a new layer of input features that includes all demographic and geographic information from given data collections.
Parameters:
input_layer: inputLayer (str). Required parameter. Feature layer to enrich with new data
data_collections: dataCollections (str). Optional parameter. Data collections you wish to add to your features.
analysis_variables: analysisVariables (str). Optional parameter. A subset of specific variables instead of dataCollections.
country: country (str). Optional parameter. The two character country code that specifies the country of the input features. Eg. US (United States), FR (France), GB (United Kingdom) etc.
buffer_type: bufferType (str). Optional parameter. Area to be created around the point or line features for enrichment. Default is 1 Mile straight-line buffer radius.
distance: distance (float). Optional parameter. A double value that defines the straight-line distance or time (when drivingTime is used).
units: units (str). Optional parameter. The unit (eg. Miles, Minutes) to be used with the distance value(s) specified in the distance parameter to calculate the area.
Choice list:['Feet', 'Yards', 'Miles', 'Meters', 'Kilometers', 'Seconds', 'Minutes', 'Hours']
return_boundaries: returnBoundaries (bool). Optional parameter. Applicable, only for point and line input features. If true, will return a result layer of areas. The resulting areas are defined by the specified bufferType. For example, if using a StraightLine of 5 miles, your result will contain areas with a 5 mile radius around the input features and requested enrichment variables. If false, the resulting layer will return the same features as the input layer with geoenrichment variables.
output_name: outputName (str). Optional parameter. Additional properties such as output feature service name.
context: context (str). Optional parameter. Additional settings such as processing extent and output spatial reference.
gis: Optional, the GIS on which this tool runs. If not specified, the active GIS is used.
Returns:
enriched_layer - enrichedLayer as a str
See http://analysis6.arcgis.com:80/arcgis/rest/directories/arcgisoutput/tasks_GPServer/tasks/EnrichLayer.htm for additional help.
"""
kwargs = locals()
param_db = {
"input_layer": (str, "inputLayer"),
"data_collections": (str, "dataCollections"),
"analysis_variables": (str, "analysisVariables"),
"country": (str, "country"),
"buffer_type": (str, "bufferType"),
"distance": (float, "distance"),
"units": (str, "units"),
"return_boundaries": (bool, "returnBoundaries"),
"output_name": (str, "outputName"),
"context": (str, "context"),
"enriched_layer": (str, "enrichedLayer"),
}
return_values = [
{"name": "enriched_layer", "display_name": "enrichedLayer", "type": str},
]
if gis is None:
gis = arcgis.env.active_gis
url = gis.properties.helperServices.analysis.url
return _execute_gp_tool(gis, "EnrichLayer", kwargs, param_db, return_values, _use_async, url)
def overlay_layers(input_layer: str = None,
overlay_layer: str = None,
overlay_type: str = """Intersect""",
snap_to_input: bool = False,
output_type: str = """Input""",
tolerance: float = None,
output_name: str = None,
context: str = None,
gis=None) -> str:
"""
Overlays the input layer with the overlay layer. Overlay operations supported are Intersect, Union, and Erase.
Parameters:
input_layer: inputLayer (str). Required parameter. The input analysis layer.
overlay_layer: overlayLayer (str). Required parameter. The layer to be overlaid with the analysis layer.
overlay_type: overlayType (str). Optional parameter. The overlay type (INTERSECT, UNION, or ERASE) defines how the analysis layer and the overlay layer are combined.
Choice list:['Intersect', 'Union', 'Erase']
snap_to_input: snapToInput (bool). Optional parameter. When the distance between features is less than the tolerance, the features in the overlay layer will snap to the features in the input layer.
output_type: outputType (str). Optional parameter. The type of intersection (INPUT, LINE, POINT).
Choice list:['Input', 'Point', 'Line']
tolerance: tolerance (float). Optional parameter. The minimum distance separating all feature coordinates (nodes and vertices) as well as the distance a coordinate can move in X or Y (or both).
output_name: outputName (str). Optional parameter. Additional properties such as output feature service name.
context: context (str). Optional parameter. Additional settings such as processing extent and output spatial reference.
gis: Optional, the GIS on which this tool runs. If not specified, the active GIS is used.
Returns:
output_layer - outputLayer as a str
See http://analysis6.arcgis.com:80/arcgis/rest/directories/arcgisoutput/tasks_GPServer/tasks/OverlayLayers.htm for additional help.
"""
kwargs = locals()
param_db = {
"input_layer": (str, "inputLayer"),
"overlay_layer": (str, "overlayLayer"),
"overlay_type": (str, "overlayType"),
"snap_to_input": (bool, "snapToInput"),
"output_type": (str, "outputType"),
"tolerance": (float, "tolerance"),
"output_name": (str, "outputName"),
"context": (str, "context"),
"output_layer": (str, "outputLayer"),
}
return_values = [
{"name": "output_layer", "display_name": "outputLayer", "type": str},
]
if gis is None:
gis = arcgis.env.active_gis
url = gis.properties.helperServices.analysis.url
return _execute_gp_tool(gis, "OverlayLayers", kwargs, param_db, return_values, _use_async, url)
def extract_data(input_layers: str = """[]""",
extent: str = None,
clip: bool = False,
data_format: str = None,
output_name: str = None,
context: str = None,
gis=None) -> str:
"""
Select and download data for a specified area of interest. Layers that you select will be added to a zip file or layer package.
Parameters:
input_layers: inputLayers (str). Required parameter. The layers from which you can extract features.
extent: extent (str). Optional parameter. The area that defines which features will be included in the output zip file or layer package.
clip: clip (bool). Optional parameter. Select features that intersect the extent or clip features within the extent.
data_format: dataFormat (str). Optional parameter. Format of the data that will be extracted and downloaded. Layer packages will always include file geodatabases.</p>
Choice list:['FileGeodatabase', 'ShapeFile', 'KML', 'CSV']
output_name: outputName (str). Optional parameter. Additional properties such as output feature service name.
context: context (str). Optional parameter. Additional settings such as processing extent and output spatial reference.
gis: Optional, the GIS on which this tool runs. If not specified, the active GIS is used.
Returns:
content_id - contentID as a str
See http://analysis6.arcgis.com:80/arcgis/rest/directories/arcgisoutput/tasks_GPServer/tasks/ExtractData.htm for additional help.
"""
kwargs = locals()
param_db = {
"input_layers": (str, "inputLayers"),
"extent": (str, "extent"),
"clip": (bool, "clip"),
"data_format": (str, "dataFormat"),
"output_name": (str, "outputName"),
"context": (str, "context"),
"content_id": (str, "contentID"),
}
return_values = [
{"name": "content_id", "display_name": "contentID", "type": str},
]
if gis is None:
gis = arcgis.env.active_gis
url = gis.properties.helperServices.analysis.url
return _execute_gp_tool(gis, "ExtractData", kwargs, param_db, return_values, _use_async, url)
def find_existing_locations(input_layers: str = """[]""",
expressions: str = None,
output_name: str = None,
context: str = None,
gis=None) -> str:
"""
The Find Existing Locations task selects features in the input layer that meet a query you specify. A query is made up of one or more expressions. There are two types of expressions: attribute and spatial. An example of an attribute expression is that a parcel must be vacant, which is an attribute of the Parcels layer (where STATUS = 'VACANT'). An example of a spatial expression is that the parcel must also be within a certain distance of a river (Parcels within a distance of 0.75 Miles from Rivers).
Parameters:
input_layers: inputLayers (str). Required parameter. A list of layers that will be used in the expressions parameter.
expressions: expressions (str). Required parameter. Specify a list of expressions. Please refer documentation at http://developers.arcgis.com for more information on creating expressions.
output_name: outputName (str). Optional parameter. Additional properties such as output feature service name.
context: context (str). Optional parameter. Additional settings such as processing extent and output spatial reference.
gis: Optional, the GIS on which this tool runs. If not specified, the active GIS is used.
Returns:
result_layer - resultLayer as a str
See http://analysis6.arcgis.com:80/arcgis/rest/directories/arcgisoutput/tasks_GPServer/tasks/FindExistingLocations.htm for additional help.
"""
kwargs = locals()
param_db = {
"input_layers": (str, "inputLayers"),
"expressions": (str, "expressions"),
"output_name": (str, "outputName"),
"context": (str, "context"),
"result_layer": (str, "resultLayer"),
}
return_values = [
{"name": "result_layer", "display_name": "resultLayer", "type": str},
]
if gis is None:
gis = arcgis.env.active_gis
url = gis.properties.helperServices.analysis.url
return _execute_gp_tool(gis, "FindExistingLocations", kwargs, param_db, return_values, _use_async, url)
def derive_new_locations(input_layers: str = """[]""",
expressions: str = None,
output_name: str = None,
context: str = None,
gis=None) -> str:
"""
The Derive New Locations task derives new features from the input layers that meet a query you specify. A query is made up of one or more expressions. There are two types of expressions: attribute and spatial. An example of an attribute expression is that a parcel must be vacant, which is an attribute of the Parcels layer (where STATUS = 'VACANT'). An example of a spatial expression is that the parcel must also be within a certain distance of a river (Parcels within a distance of 0.75 Miles from Rivers).The Derive New Locations task is very similar to the Find Existing Locations task, the main difference is that the result of Derive New Locations can contain partial features.In both tasks, the attribute expression where and the spatial relationships within and contains return the same result. This is because these relationships return entire features.When intersects or withinDistance is used, Derive New Locations creates new features in the result. For example, when intersecting a parcel feature and a flood zone area that partially overlap each other, Find Existing Locations will return the entire parcel whereas Derive New Locations will return just the portion of the parcel that is within the flood zone.
Parameters:
input_layers: inputLayers (str). Required parameter. A list of layers that will be used in the expressions parameter.
expressions: expressions (str). Required parameter. Specify a list of expressions. Please refer documentation at http://developers.arcgis.com for more information on expressions.
output_name: outputName (str). Optional parameter. Additional properties such as output feature service name.
context: context (str). Optional parameter. Additional settings such as processing extent and output spatial reference.
gis: Optional, the GIS on which this tool runs. If not specified, the active GIS is used.
Returns:
result_layer - resultLayer as a str
See http://analysis6.arcgis.com:80/arcgis/rest/directories/arcgisoutput/tasks_GPServer/tasks/DeriveNewLocations.htm for additional help.
"""
kwargs = locals()
param_db = {
"input_layers": (str, "inputLayers"),
"expressions": (str, "expressions"),
"output_name": (str, "outputName"),
"context": (str, "context"),
"result_layer": (str, "resultLayer"),
}
return_values = [
{"name": "result_layer", "display_name": "resultLayer", "type": str},
]
if gis is None:
gis = arcgis.env.active_gis
url = gis.properties.helperServices.analysis.url
return _execute_gp_tool(gis, "DeriveNewLocations", kwargs, param_db, return_values, _use_async, url)
def field_calculator(input_layer: str = None,
expressions: str = None,
output_name: str = None,
context: str = None,
gis=None) -> str:
"""
Calculates existing fields or creates and calculates new fields.
Parameters:
input_layer: inputLayer (str). Required parameter.
expressions: expressions (str). Required parameter.
output_name: outputName (str). Optional parameter.
context: context (str). Optional parameter.
gis: Optional, the GIS on which this tool runs. If not specified, the active GIS is used.
Returns:
result_layer - resultLayer as a str
See http://analysis6.arcgis.com:80/arcgis/rest/directories/arcgisoutput/tasks_GPServer/tasks/FieldCalculator.htm for additional help.
"""
kwargs = locals()
param_db = {
"input_layer": (str, "inputLayer"),
"expressions": (str, "expressions"),
"output_name": (str, "outputName"),
"context": (str, "context"),
"result_layer": (str, "resultLayer"),
}
return_values = [
{"name": "result_layer", "display_name": "resultLayer", "type": str},
]
if gis is None:
gis = arcgis.env.active_gis
url = gis.properties.helperServices.analysis.url
return _execute_gp_tool(gis, "FieldCalculator", kwargs, param_db, return_values, _use_async, url)
def interpolate_points(input_layer: str = None,
field: str = None,
interpolate_option: str = """5""",
output_prediction_error: bool = False,
classification_type: str = """GeometricInterval""",
num_classes: int = 10,
class_breaks: str = """[]""",
bounding_polygon_layer: str = None,
predict_at_point_layer: str = None,
output_name: str = None,
context: str = None,
gis=None) -> tuple:
"""
The Interpolate Points task allows you to predict values at new locations based on measurements from a collection of points. The task takes point data with values at each point and returns areas classified by predicted values.
Parameters:
input_layer: inputLayer (str). Required parameter. The point layer whose features will be interpolated.
field: field (str). Required parameter. Name of the numeric field containing the values you wish to interpolate.
interpolate_option: interpolateOption (str). Optional parameter. Integer value declaring your preference for speed versus accuracy, from 1 (fastest) to 9 (most accurate). More accurate predictions take longer to calculate.
Choice list:['1', '5', '9']
output_prediction_error: outputPredictionError (bool). Optional parameter. If True, a polygon layer of standard errors for the interpolation predictions will be returned in the predictionError output parameter.
classification_type: classificationType (str). Optional parameter. Determines how predicted values will be classified into areas.
Choice list:['EqualArea', 'EqualInterval', 'GeometricInterval', 'Manual']
num_classes: numClasses (int). Optional parameter. This value is used to divide the range of interpolated values into distinct classes. The range of values in each class is determined by the classificationType parameter. Each class defines the boundaries of the result polygons.
class_breaks: classBreaks (str). Optional parameter. If classificationType is Manual, supply desired class break values separated by spaces. These values define the upper limit of each class, so the number of classes will equal the number of entered values. Areas will not be created for any locations with predicted values above the largest entered break value. You must enter at least two values and no more than 32.
bounding_polygon_layer: boundingPolygonLayer (str). Optional parameter. A layer specifying the polygon(s) where you want values to be interpolated.
predict_at_point_layer: predictAtPointLayer (str). Optional parameter. An optional layer specifying point locations to calculate prediction values. This allows you to make predictions at specific locations of interest.
output_name: outputName (str). Optional parameter. Additional properties such as output feature service name.
context: context (str). Optional parameter. Additional settings such as processing extent and output spatial reference.
gis: Optional, the GIS on which this tool runs. If not specified, the active GIS is used.
Returns the following as a named tuple:
result_layer - resultLayer as a str
prediction_error - predictionError as a str
predicted_point_layer - predictedPointLayer as a str
See http://analysis6.arcgis.com:80/arcgis/rest/directories/arcgisoutput/tasks_GPServer/tasks/InterpolatePoints.htm for additional help.
"""
kwargs = locals()
param_db = {
"input_layer": (str, "inputLayer"),
"field": (str, "field"),
"interpolate_option": (str, "interpolateOption"),
"output_prediction_error": (bool, "outputPredictionError"),
"classification_type": (str, "classificationType"),
"num_classes": (int, "numClasses"),
"class_breaks": (str, "classBreaks"),
"bounding_polygon_layer": (str, "boundingPolygonLayer"),
"predict_at_point_layer": (str, "predictAtPointLayer"),
"output_name": (str, "outputName"),
"context": (str, "context"),
"result_layer": (str, "resultLayer"),
"prediction_error": (str, "predictionError"),
"predicted_point_layer": (str, "predictedPointLayer"),
}
return_values = [
{"name": "result_layer", "display_name": "resultLayer", "type": str},
{"name": "prediction_error", "display_name": "predictionError", "type": str},
{"name": "predicted_point_layer", "display_name": "predictedPointLayer", "type": str},
]
if gis is None:
gis = arcgis.env.active_gis
url = gis.properties.helperServices.analysis.url
return _execute_gp_tool(gis, "InterpolatePoints", kwargs, param_db, return_values, _use_async, url)
def calculate_density(input_layer: str = None,
field: str = None,
cell_size: float = None,
cell_size_units: str = """Meters""",
radius: float = None,
radius_units: str = None,
bounding_polygon_layer: str = None,
area_units: str = None,
classification_type: str = """EqualInterval""",
num_classes: int = 10,
output_name: str = None,
context: str = None,
gis=None) -> str:
"""
The Calculate Density task creates a density map from point or line features by spreading known quantities of some phenomenon (represented as attributes of the points or lines) across the map. The result is a layer of areas classified from least dense to most dense.
Parameters:
input_layer: inputLayer (str). Required parameter. The point or line features from which to calculate density.
field: field (str). Optional parameter. A numeric field name specifying the number of incidents at each location. If not specified, each location will be assumed to represent a single count.
cell_size: cellSize (float). Optional parameter. This value is used to create a mesh of points where density values are calculated. The default is approximately 1/1000th of the smaller of the width and height of the analysis extent as defined in the context parameter.
cell_size_units: cellSizeUnits (str). Optional parameter. The units of the cellSize value
Choice list:['Meters', 'Kilometers', 'Feet', 'Miles']
radius: radius (float). Optional parameter. A distance specifying how far to search to find point or line features when calculating density values.
radius_units: radiusUnits (str). Optional parameter. The units of the radius parameter.
Choice list:['Meters', 'Kilometers', 'Feet', 'Miles']
bounding_polygon_layer: boundingPolygonLayer (str). Optional parameter. A layer specifying the polygon(s) where you want densities to be calculated.
area_units: areaUnits (str). Optional parameter. The units of the calculated density values.
Choice list:['SquareKilometers', 'SquareMiles']
classification_type: classificationType (str). Optional parameter. Determines how density values will be classified into polygons.
Choice list:['EqualArea', 'EqualInterval', 'GeometricInterval', 'NaturalBreaks', 'StandardDeviation']
num_classes: numClasses (int). Optional parameter. This value is used to divide the range of predicted values into distinct classes. The range of values in each class is determined by the classificationType parameter.
output_name: outputName (str). Optional parameter. Additional properties such as output feature service name.
context: context (str). Optional parameter. Additional settings such as processing extent and output spatial reference.
gis: Optional, the GIS on which this tool runs. If not specified, the active GIS is used.
Returns:
result_layer - resultLayer as a str
See http://analysis6.arcgis.com:80/arcgis/rest/directories/arcgisoutput/tasks_GPServer/tasks/CalculateDensity.htm for additional help.
"""
kwargs = locals()
param_db = {
"input_layer": (str, "inputLayer"),
"field": (str, "field"),
"cell_size": (float, "cellSize"),
"cell_size_units": (str, "cellSizeUnits"),
"radius": (float, "radius"),
"radius_units": (str, "radiusUnits"),
"bounding_polygon_layer": (str, "boundingPolygonLayer"),
"area_units": (str, "areaUnits"),
"classification_type": (str, "classificationType"),
"num_classes": (int, "numClasses"),
"output_name": (str, "outputName"),
"context": (str, "context"),
"result_layer": (str, "resultLayer"),
}
return_values = [
{"name": "result_layer", "display_name": "resultLayer", "type": str},
]
if gis is None:
gis = arcgis.env.active_gis
url = gis.properties.helperServices.analysis.url
return _execute_gp_tool(gis, "CalculateDensity", kwargs, param_db, return_values, _use_async, url)
def summarize_nearby(sum_nearby_layer: str = None,
summary_layer: str = None,
near_type: str = """StraightLine""",
distances: str = """[]""",
units: str = """Meters""",
time_of_day: datetime = None,
time_zone_for_time_of_day: str = """GeoLocal""",
return_boundaries: bool = True,
sum_shape: bool = True,
shape_units: str = None,
summary_fields: str = """[]""",
group_by_field: str = None,
minority_majority: bool = False,
percent_shape: bool = False,
output_name: str = None,
context: str = None,
gis=None) -> tuple:
"""
The SummarizeNearby task finds features that are within a specified distance of features in the input layer. Distance can be measured as a straight-line distance, a drive-time distance (for example, within 10 minutes), or a drive distance (within 5 kilometers). Statistics are then calculated for the nearby features. For example:Calculate the total population within five minutes of driving time of a proposed new store location.Calculate the number of freeway access ramps within a one-mile driving distance of a proposed new store location to use as a measure of store accessibility.
Parameters:
sum_nearby_layer: sumNearbyLayer (str). Required parameter. Point, line, or polygon features from which distances will be measured to features in the summarizeLayer.
summary_layer: summaryLayer (str). Required parameter. Point, line, or polygon features. Features in this layer that are within the specified distance to features in the sumNearbyLayer will be summarized.
near_type: nearType (str). Optional parameter. Defines what kind of distance measurement you want to use to create areas around the nearbyLayer features.
distances: distances (str). Required parameter. An array of double values that defines the search distance for creating areas mentioned above
units: units (str). Optional parameter. The linear unit for distances parameter above. Eg. Miles, Kilometers, Minutes Seconds etc
Choice list:['Meters', 'Kilometers', 'Feet', 'Yards', 'Miles', 'Seconds', 'Minutes', 'Hours']
time_of_day: timeOfDay (datetime). Optional parameter. For timeOfDay, set the time and day according to the number of milliseconds elapsed since the Unix epoc (January 1, 1970 UTC). When specified and if relevant for the nearType parameter, the traffic conditions during the time of the day will be considered.
time_zone_for_time_of_day: timeZoneForTimeOfDay (str). Optional parameter. Determines if the value specified for timeOfDay is specified in UTC or in a time zone that is local to the location of the origins.
Choice list:['UTC', 'GeoLocal']
return_boundaries: returnBoundaries (bool). Optional parameter. If true, will return a result layer of areas that contain the requested summary information. The resulting areas are defined by the specified nearType. For example, if using a StraightLine of 5 miles, your result will contain areas with a 5 mile radius around the input features and specified summary information.If false, the resulting layer will return the same features as the input analysis layer with requested summary information.
sum_shape: sumShape (bool). Optional parameter. A boolean value that instructs the task to calculate count of points, length of lines or areas of polygons of the summaryLayer within each polygon in sumWithinLayer.
shape_units: shapeUnits (str). Optional parameter. Specify units to summarize the length or areas when sumShape is set to true. Units is not required to summarize points.
Choice list:['Meters', 'Kilometers', 'Feet', 'Yards', 'Miles', 'Acres', 'Hectares', 'SquareMeters', 'SquareKilometers', 'SquareFeet', 'SquareYards', 'SquareMiles']
summary_fields: summaryFields (str). Optional parameter. A list of field names and statistical summary type that you wish to calculate for all features in the summaryLayer that are within each polygon in the sumWithinLayer . Eg: ["fieldname1 summary", "fieldname2 summary"]
group_by_field: groupByField (str). Optional parameter. Specify a field from the summaryLayer features to calculate statistics separately for each unique value of the field.
minority_majority: minorityMajority (bool). Optional parameter. This boolean parameter is applicable only when a groupByField is specified. If true, the minority (least dominant) or the majority (most dominant) attribute values within each group, within each boundary will be calculated.
percent_shape: percentShape (bool). Optional parameter. This boolean parameter is applicable only when a groupByField is specified. If set to true, the percentage of shape (eg. length for lines) for each unique groupByField value is calculated.
output_name: outputName (str). Optional parameter. Additional properties such as output feature service name.
context: context (str). Optional parameter. Additional settings such as processing extent and output spatial reference.
gis: Optional, the GIS on which this tool runs. If not specified, the active GIS is used.
Returns the following as a named tuple:
result_layer - resultLayer as a str
group_by_summary - groupBySummary as a str
See http://analysis6.arcgis.com:80/arcgis/rest/directories/arcgisoutput/tasks_GPServer/tasks/SummarizeNearby.htm for additional help.
"""
kwargs = locals()
param_db = {
"sum_nearby_layer": (str, "sumNearbyLayer"),
"summary_layer": (str, "summaryLayer"),
"near_type": (str, "nearType"),
"distances": (str, "distances"),
"units": (str, "units"),
"time_of_day": (datetime, "timeOfDay"),
"time_zone_for_time_of_day": (str, "timeZoneForTimeOfDay"),
"return_boundaries": (bool, "returnBoundaries"),
"sum_shape": (bool, "sumShape"),
"shape_units": (str, "shapeUnits"),
"summary_fields": (str, "summaryFields"),
"group_by_field": (str, "groupByField"),
"minority_majority": (bool, "minorityMajority"),
"percent_shape": (bool, "percentShape"),
"output_name": (str, "outputName"),
"context": (str, "context"),
"result_layer": (str, "resultLayer"),
"group_by_summary": (str, "groupBySummary"),
}
return_values = [
{"name": "result_layer", "display_name": "resultLayer", "type": str},
{"name": "group_by_summary", "display_name": "groupBySummary", "type": str},
]
if gis is None:
gis = arcgis.env.active_gis
url = gis.properties.helperServices.analysis.url
return _execute_gp_tool(gis, "SummarizeNearby", kwargs, param_db, return_values, _use_async, url)
def create_viewshed(input_layer: str = None,
dem_resolution: str = """Finest""",
maximum_distance: float = None,
max_distance_units: str = """Meters""",
observer_height: float = None,
observer_height_units: str = """Meters""",
target_height: float = None,
target_height_units: str = """Meters""",
generalize: bool = True,
output_name: str = None,
context: str = None,
gis=None) -> str:
"""
Parameters:
input_layer: inputLayer (str). Required parameter.
dem_resolution: demResolution (str). Optional parameter.
Choice list:['Finest', '10m', '30m', '90m']
maximum_distance: maximumDistance (float). Optional parameter.
max_distance_units: maxDistanceUnits (str). Optional parameter.
Choice list:['Meters', 'Kilometers', 'Feet', 'Yards', 'Miles']
observer_height: observerHeight (float). Optional parameter.
observer_height_units: observerHeightUnits (str). Optional parameter.
Choice list:['Meters', 'Kilometers', 'Feet', 'Yards', 'Miles']
target_height: targetHeight (float). Optional parameter.
target_height_units: targetHeightUnits (str). Optional parameter.
Choice list:['Meters', 'Kilometers', 'Feet', 'Yards', 'Miles']
generalize: generalize (bool). Optional parameter.
output_name: outputName (str). Optional parameter.
context: context (str). Optional parameter.
gis: Optional, the GIS on which this tool runs. If not specified, the active GIS is used.
Returns:
viewshed_layer - viewshedLayer as a str
See http://analysis6.arcgis.com:80/arcgis/rest/directories/arcgisoutput/tasks_GPServer/tasks/CreateViewshed.htm for additional help.
"""
kwargs = locals()
param_db = {
"input_layer": (str, "inputLayer"),
"dem_resolution": (str, "demResolution"),
"maximum_distance": (float, "maximumDistance"),
"max_distance_units": (str, "maxDistanceUnits"),
"observer_height": (float, "observerHeight"),
"observer_height_units": (str, "observerHeightUnits"),
"target_height": (float, "targetHeight"),
"target_height_units": (str, "targetHeightUnits"),
"generalize": (bool, "generalize"),
"output_name": (str, "outputName"),
"context": (str, "context"),
"viewshed_layer": (str, "viewshedLayer"),
}
return_values = [
{"name": "viewshed_layer", "display_name": "viewshedLayer", "type": str},
]
if gis is None:
gis = arcgis.env.active_gis
url = gis.properties.helperServices.analysis.url
return _execute_gp_tool(gis, "CreateViewshed", kwargs, param_db, return_values, _use_async, url)
def find_similar_locations(input_layer: str = None,
search_layer: str = None,
analysis_fields: str = """[]""",
input_query: str = None,
number_of_results: int = 0,
output_name: str = None,
context: str = None,
gis=None) -> tuple:
"""
Parameters:
input_layer: inputLayer (str). Required parameter.
search_layer: searchLayer (str). Required parameter.
analysis_fields: analysisFields (str). Required parameter.
input_query: inputQuery (str). Optional parameter.
number_of_results: numberOfResults (int). Optional parameter.
output_name: outputName (str). Optional parameter.
context: context (str). Optional parameter.
gis: Optional, the GIS on which this tool runs. If not specified, the active GIS is used.
Returns the following as a named tuple:
similar_result_layer - similarResultLayer as a str
process_info - processInfo as a str
See http://analysis6.arcgis.com:80/arcgis/rest/directories/arcgisoutput/tasks_GPServer/tasks/FindSimilarLocations.htm for additional help.
"""
kwargs = locals()
param_db = {
"input_layer": (str, "inputLayer"),
"search_layer": (str, "searchLayer"),
"analysis_fields": (str, "analysisFields"),
"input_query": (str, "inputQuery"),
"number_of_results": (int, "numberOfResults"),
"output_name": (str, "outputName"),
"context": (str, "context"),
"similar_result_layer": (str, "similarResultLayer"),
"process_info": (str, "processInfo"),
}
return_values = [
{"name": "similar_result_layer", "display_name": "similarResultLayer", "type": str},
{"name": "process_info", "display_name": "processInfo", "type": str},
]
if gis is None:
gis = arcgis.env.active_gis
url = gis.properties.helperServices.analysis.url
return _execute_gp_tool(gis, "FindSimilarLocations", kwargs, param_db, return_values, _use_async, url)
def create_watersheds(input_layer: str = None,
search_distance: float = None,
search_units: str = """Meters""",
source_database: str = """FINEST""",
generalize: bool = True,
output_name: str = None,
context: str = None,
gis=None) -> tuple:
"""
Parameters:
input_layer: inputLayer (str). Required parameter.
search_distance: searchDistance (float). Optional parameter.
search_units: searchUnits (str). Optional parameter.
Choice list:['Meters', 'Kilometers', 'Feet', 'Yards', 'Miles']
source_database: sourceDatabase (str). Optional parameter.
Choice list:['FINEST', '30m', '90m']
generalize: generalize (bool). Optional parameter.
output_name: outputName (str). Optional parameter.
context: context (str). Optional parameter.
gis: Optional, the GIS on which this tool runs. If not specified, the active GIS is used.
Returns the following as a named tuple:
snap_pour_pts_layer - snapPourPtsLayer as a str
watershed_layer - watershedLayer as a str
See http://analysis6.arcgis.com:80/arcgis/rest/directories/arcgisoutput/tasks_GPServer/tasks/CreateWatersheds.htm for additional help.
"""
kwargs = locals()
param_db = {
"input_layer": (str, "inputLayer"),
"search_distance": (float, "searchDistance"),
"search_units": (str, "searchUnits"),
"source_database": (str, "sourceDatabase"),
"generalize": (bool, "generalize"),
"output_name": (str, "outputName"),
"context": (str, "context"),
"snap_pour_pts_layer": (str, "snapPourPtsLayer"),
"watershed_layer": (str, "watershedLayer"),
}
return_values = [
{"name": "snap_pour_pts_layer", "display_name": "snapPourPtsLayer", "type": str},
{"name": "watershed_layer", "display_name": "watershedLayer", "type": str},
]
if gis is None:
gis = arcgis.env.active_gis
url = gis.properties.helperServices.analysis.url
return _execute_gp_tool(gis, "CreateWatersheds", kwargs, param_db, return_values, _use_async, url)
def find_nearest(analysis_layer: str = None,
near_layer: str = None,
measurement_type: str = """StraightLine""",
max_count: int = 100,
search_cutoff: float = 2147483647,
search_cutoff_units: str = None,
time_of_day: datetime = None,
time_zone_for_time_of_day: str = """GeoLocal""",
output_name: str = None,
context: str = None,
gis=None) -> tuple:
"""
Measures the straight-line distance, driving distance, or driving time from features in the analysis layer to features in the near layer, and copies the nearest features in the near layer to a new layer. Returns a layer containing the nearest features and a line layer that links the start locations to their nearest locations.
Parameters:
analysis_layer: analysisLayer (str). Required parameter. For each feature in this layer, the task finds the nearest features from the nearLayer.
near_layer: nearLayer (str). Required parameter. The features from which the nearest locations are found.
measurement_type: measurementType (str). Required parameter. The nearest locations can be determined by measuring straight-line distance, driving distance, or driving time
max_count: maxCount (int). Optional parameter. The maximum number of near locations to find for each feature in analysisLayer.
search_cutoff: searchCutoff (float). Optional parameter. Limits the search range to this value
search_cutoff_units: searchCutoffUnits (str). Optional parameter. The units for the value specified as searchCutoff
Choice list:['Miles', 'Yards', 'Feet', 'Meters', 'Kilometers', 'NauticalMiles']
time_of_day: timeOfDay (datetime). Optional parameter. When measurementType is DrivingTime, this value specifies the time of day to be used for driving time calculations based on traffic.
time_zone_for_time_of_day: timeZoneForTimeOfDay (str). Optional parameter.
Choice list:['UTC', 'GeoLocal']
output_name: outputName (str). Optional parameter. Additional properties such as output feature service name
context: context (str). Optional parameter. Additional settings such as processing extent and output spatial reference
gis: Optional, the GIS on which this tool runs. If not specified, the active GIS is used.
Returns the following as a named tuple:
nearest_layer - nearestLayer as a str
connecting_lines_layer - connectingLinesLayer as a str
See http://analysis6.arcgis.com:80/arcgis/rest/directories/arcgisoutput/tasks_GPServer/tasks/FindNearest.htm for additional help.
"""
kwargs = locals()
param_db = {
"analysis_layer": (str, "analysisLayer"),
"near_layer": (str, "nearLayer"),
"measurement_type": (str, "measurementType"),
"max_count": (int, "maxCount"),
"search_cutoff": (float, "searchCutoff"),
"search_cutoff_units": (str, "searchCutoffUnits"),
"time_of_day": (datetime, "timeOfDay"),
"time_zone_for_time_of_day": (str, "timeZoneForTimeOfDay"),
"output_name": (str, "outputName"),
"context": (str, "context"),
"nearest_layer": (str, "nearestLayer"),
"connecting_lines_layer": (str, "connectingLinesLayer"),
}
return_values = [
{"name": "nearest_layer", "display_name": "nearestLayer", "type": str},
{"name": "connecting_lines_layer", "display_name": "connectingLinesLayer", "type": str},
]
if gis is None:
gis = arcgis.env.active_gis
url = gis.properties.helperServices.analysis.url
return _execute_gp_tool(gis, "FindNearest", kwargs, param_db, return_values, _use_async, url)
def plan_routes(stops_layer: str = None,
route_count: int = None,
max_stops_per_route: int = None,
route_start_time: datetime = None,
start_layer: str = None,
start_layer_route_id_field: str = None,
return_to_start: bool = True,
end_layer: str = None,
end_layer_route_id_field: str = None,
travel_mode: str = """Driving""",
stop_service_time: float = 0,
max_route_time: float = 525600,
output_name: str = None,
context: str = None,
gis=None) -> tuple:
"""
Parameters:
stops_layer: stopsLayer (str). Required parameter.
route_count: routeCount (int). Required parameter.
max_stops_per_route: maxStopsPerRoute (int). Required parameter.
route_start_time: routeStartTime (datetime). Required parameter.
start_layer: startLayer (str). Required parameter.
start_layer_route_id_field: startLayerRouteIDField (str). Optional parameter.
return_to_start: returnToStart (bool). Optional parameter.
end_layer: endLayer (str). Optional parameter.
end_layer_route_id_field: endLayerRouteIDField (str). Optional parameter.
travel_mode: travelMode (str). Optional parameter.
stop_service_time: stopServiceTime (float). Optional parameter.
max_route_time: maxRouteTime (float). Optional parameter.
output_name: outputName (str). Optional parameter.
context: context (str). Optional parameter.
gis: Optional, the GIS on which this tool runs. If not specified, the active GIS is used.
Returns the following as a named tuple:
routes_layer - routesLayer as a str
assigned_stops_layer - assignedStopsLayer as a str
unassigned_stops_layer - unassignedStopsLayer as a str
See http://analysis6.arcgis.com:80/arcgis/rest/directories/arcgisoutput/tasks_GPServer/tasks/PlanRoutes.htm for additional help.
"""
kwargs = locals()
param_db = {
"stops_layer": (str, "stopsLayer"),
"route_count": (int, "routeCount"),
"max_stops_per_route": (int, "maxStopsPerRoute"),
"route_start_time": (datetime, "routeStartTime"),
"start_layer": (str, "startLayer"),
"start_layer_route_id_field": (str, "startLayerRouteIDField"),
"return_to_start": (bool, "returnToStart"),
"end_layer": (str, "endLayer"),
"end_layer_route_id_field": (str, "endLayerRouteIDField"),
"travel_mode": (str, "travelMode"),
"stop_service_time": (float, "stopServiceTime"),
"max_route_time": (float, "maxRouteTime"),
"output_name": (str, "outputName"),
"context": (str, "context"),
"routes_layer": (str, "routesLayer"),
"assigned_stops_layer": (str, "assignedStopsLayer"),
"unassigned_stops_layer": (str, "unassignedStopsLayer"),
}
return_values = [
{"name": "routes_layer", "display_name": "routesLayer", "type": str},
{"name": "assigned_stops_layer", "display_name": "assignedStopsLayer", "type": str},
{"name": "unassigned_stops_layer", "display_name": "unassignedStopsLayer", "type": str},
]
if gis is None:
gis = arcgis.env.active_gis
url = gis.properties.helperServices.analysis.url
return _execute_gp_tool(gis, "PlanRoutes", kwargs, param_db, return_values, _use_async, url)
def trace_downstream(input_layer: str = None,
split_distance: float = None,
split_units: str = """Kilometers""",
max_distance: float = None,
max_distance_units: str = """Kilometers""",
bounding_polygon_layer: str = None,
source_database: str = None,
generalize: bool = True,
output_name: str = None,
context: str = None,
gis=None) -> str:
"""
Parameters:
input_layer: inputLayer (str). Required parameter.
split_distance: splitDistance (float). Optional parameter.
split_units: splitUnits (str). Optional parameter.
Choice list:['Meters', 'Kilometers', 'Feet', 'Yards', 'Miles']
max_distance: maxDistance (float). Optional parameter.
max_distance_units: maxDistanceUnits (str). Optional parameter.
Choice list:['Meters', 'Kilometers', 'Feet', 'Yards', 'Miles']
bounding_polygon_layer: boundingPolygonLayer (str). Optional parameter.
source_database: sourceDatabase (str). Optional parameter.
generalize: generalize (bool). Optional parameter.
output_name: outputName (str). Optional parameter.
context: context (str). Optional parameter.
gis: Optional, the GIS on which this tool runs. If not specified, the active GIS is used.
Returns:
trace_layer - traceLayer as a str
See http://analysis6.arcgis.com:80/arcgis/rest/directories/arcgisoutput/tasks_GPServer/tasks/TraceDownstream.htm for additional help.
"""
kwargs = locals()
param_db = {
"input_layer": (str, "inputLayer"),
"split_distance": (float, "splitDistance"),
"split_units": (str, "splitUnits"),
"max_distance": (float, "maxDistance"),
"max_distance_units": (str, "maxDistanceUnits"),
"bounding_polygon_layer": (str, "boundingPolygonLayer"),
"source_database": (str, "sourceDatabase"),
"generalize": (bool, "generalize"),
"output_name": (str, "outputName"),
"context": (str, "context"),
"trace_layer": (str, "traceLayer"),
}
return_values = [
{"name": "trace_layer", "display_name": "traceLayer", "type": str},
]
if gis is None:
gis = arcgis.env.active_gis
url = gis.properties.helperServices.analysis.url
return _execute_gp_tool(gis, "TraceDownstream", kwargs, param_db, return_values, _use_async, url)
def connect_origins_to_destinations(origins_layer = None,
destinations_layer = None,
measurement_type: str = """DrivingTime""",
origins_layer_route_id_field: str = None,
destinations_layer_route_id_field: str = None,
time_of_day: datetime = None,
time_zone_for_time_of_day: str = """GeoLocal""",
output_name: str = None,
context: str = None,
gis=None,
point_barrier_layer = None,
line_barrier_layer = None,
polygon_barrier_layer = None) -> tuple:
"""
Calculates routes between pairs of points.
Parameters:
origins_layer: originsLayer (str). Required parameter. The routes start from points in the origins layer.
destinations_layer: destinationsLayer (str). Required parameter. The routes end at points in the destinations layer.
measurement_type: measurementType (str). Required parameter. The routes can be determined by measuring travel distance or travel time along street network using different travel modes or by measuring straight line distance.
origins_layer_route_id_field: originsLayerRouteIDField (str). Optional parameter. The field in the origins layer containing the IDs that are used to match an origin with a destination.
destinations_layer_route_id_field: destinationsLayerRouteIDField (str). Optional parameter. The field in the destinations layer containing the IDs that are used to match an origin with a destination.
time_of_day: timeOfDay (datetime). Optional parameter. When measurementType is DrivingTime, this value specifies the time of day to be used for driving time calculations based on traffic. WalkingTime and TruckingTime measurementType do not support calculations based on traffic.
time_zone_for_time_of_day: timeZoneForTimeOfDay (str). Optional parameter. Determines if the value specified for timeOfDay is specified in UTC or in a time zone that is local to the location of the origins.
Choice list:['GeoLocal', 'UTC']
output_name: outputName (str). Optional parameter. Additional properties such as output feature service name.
context: context (str). Optional parameter. Additional settings such as processing extent and output spatial reference.
gis: Optional, the GIS on which this tool runs. If not specified, the active GIS is used.
Returns the following as a named tuple:
routes_layer - routesLayer as a str
unassigned_origins_layer - unassignedOriginsLayer as a str
unassigned_destinations_layer - unassignedDestinationsLayer as a str
See http://analysis6.arcgis.com:80/arcgis/rest/directories/arcgisoutput/tasks_GPServer/tasks/ConnectOriginsToDestinations.htm for additional help.
"""
kwargs = locals()
param_db = {
"origins_layer": (str, "originsLayer"),
"destinations_layer": (str, "destinationsLayer"),
"measurement_type": (str, "measurementType"),
"origins_layer_route_id_field": (str, "originsLayerRouteIDField"),
"destinations_layer_route_id_field": (str, "destinationsLayerRouteIDField"),
"time_of_day": (datetime, "timeOfDay"),
"time_zone_for_time_of_day": (str, "timeZoneForTimeOfDay"),
"output_name": (str, "outputName"),
"context": (str, "context"),
"point_barrier_layer": (str, "pointBarrierLayer"),
"line_barrier_layer": (str, "lineBarrierLayer"),
"polygon_barrier_layer": (str, "polygonBarrierLayer"),
"routes_layer": (str, "routesLayer"),
"unassigned_origins_layer": (str, "unassignedOriginsLayer"),
"unassigned_destinations_layer": (str, "unassignedDestinationsLayer"),
}
return_values = [
{"name": "routes_layer", "display_name": "routesLayer", "type": str},
{"name": "unassigned_origins_layer", "display_name": "unassignedOriginsLayer", "type": str},
{"name": "unassigned_destinations_layer", "display_name": "unassignedDestinationsLayer", "type": str},
]
if gis is None:
gis = arcgis.env.active_gis
url = gis.properties.helperServices.analysis.url
return _execute_gp_tool(gis, "ConnectOriginsToDestinations", kwargs, param_db, return_values, _use_async, url)
def choose_best_facilities(goal: str = """Allocate""",
demand_locations_layer: str = None,
demand: float = 1,
demand_field: str = None,
max_travel_range: float = 2147483647,
max_travel_range_field: str = None,
max_travel_range_units: str = """Minutes""",
travel_mode: str = None,
time_of_day: datetime = None,
time_zone_for_time_of_day: str = """GeoLocal""",
travel_direction: str = """FacilityToDemand""",
required_facilities_layer: str = None,
required_facilities_capacity: float = 2147483647,
required_facilities_capacity_field: str = None,
candidate_facilities_layer: str = None,
candidate_count: int = 1,
candidate_facilities_capacity: float = 2147483647,
candidate_facilities_capacity_field: str = None,
percent_demand_coverage: float = 100,
output_name: str = None,
context: str = None,
gis=None) -> tuple:
"""
This tool chooses the best locations for facilities by allocating locations that have demand for these facilities in a way that satisfies a given goal.
Parameters:
goal: goal (str). Required parameter. Specify the goal that must be satisfied when allocating demand locations to facilities.
Choice list:['Allocate', 'MinimizeImpedance', 'MaximizeCoverage', 'MaximizeCapacitatedCoverage', 'PercentCoverage']
demand_locations_layer: demandLocationsLayer (str). Required parameter. A point layer specifying the locations that have demand for facilities
demand: demand (float). Optional parameter. The amount of demand available at every demand locations.
demand_field: demandField (str). Optional parameter. The field on the demandLocationsLayer representing the amount of demand available at each demand location.
max_travel_range: maxTravelRange (float). Optional parameter. Specify the maximum travel time or distance allowed between a demand location and the facility it is allocated to
max_travel_range_field: maxTravelRangeField (str). Optional parameter. The field on the demandLocationsLayer specifying the maximum travel time or distance allowed between a demand location and the facility it is allocated to. This parameter takes precedence when maxTravelRange parameter is also specified
max_travel_range_units: maxTravelRangeUnits (str). Optional parameter. The units for the maximum travel time or distance allowed between a demand location and the facility it is allocated to.
Choice list:['Seconds', 'Minutes', 'Hours', 'Days', 'Meters', 'Kilometers', 'Feet', 'Yards', 'Miles']
travel_mode: travelMode (str). Optional parameter. Specify the mode of transportation for the analysis
time_of_day: timeOfDay (datetime). Optional parameter. Specify whether travel times should consider traffic conditions
time_zone_for_time_of_day: timeZoneForTimeOfDay (str). Optional parameter. Specify the time zone or zones for the timeOfDay parameter.
Choice list:['GeoLocal', 'UTC']
travel_direction: travelDirection (str). Optional parameter. Specify whether to measure travel times or distances from facilities to demand locations or from demand locations to facilities.
Choice list:['FacilityToDemand', 'DemandToFacility']
required_facilities_layer: requiredFacilitiesLayer (str). Optional parameter. A point layer specifying one or more locations that act as facilities by providing some kind of service. Facilities specified by this parameter are required to be part of the output solution and will be used before any facilities from the candidatesFacilitiesLayer when allocating demand locations.
required_facilities_capacity: requiredFacilitiesCapacity (float). Optional parameter. Specify how much demand every facility in the requiredFacilitiesLayer is capable of supplying.
required_facilities_capacity_field: requiredFacilitiesCapacityField (str). Optional parameter. A field on the requiredFacilitiesLayer representing how much demand each facility in the requiredFacilitiesLayer is capable of supplying. This parameter takes precedence when requiredFacilitiesCapacity parameter is also specified.
candidate_facilities_layer: candidateFacilitiesLayer (str). Optional parameter. A point layer specifying one or more locations that act as facilities by providing some kind of service. Facilities specified by this parameter are not required to be part of the output solution and will be used only after all the facilities from the candidatesFacilitiesLayer have been used when allocating demand locations.
candidate_count: candidateCount (int). Optional parameter. Specify the number of facilities to choose when allocating demand locations. If requiredFacilitiesLayer is specified, the number of facilities to choose should be equal to or greater than the count of locations in the requiredFacilitiesLayer.
candidate_facilities_capacity: candidateFacilitiesCapacity (float). Optional parameter. Specify how much demand every facility in the candidateFacilitiesLayer is capable of supplying.
candidate_facilities_capacity_field: candidateFacilitiesCapacityField (str). Optional parameter. A field on the candidateFacilitiesLayer representing how much demand each facility in the candidatesFacilitiesLayer is capable of supplying. This parameter takes precedence when candidateFacilitiesCapacity parameter is also specified.
percent_demand_coverage: percentDemandCoverage (float). Optional parameter. Specify the percentage of the total demand that you want the chosen and required facilities to capture.
output_name: outputName (str). Optional parameter. If provided, the task will create a feature service of the results. You define the name of the service. If outputName is not supplied, the task will return a feature collection.
context: context (str). Optional parameter. Context contains additional settings that affect task execution such as the extent of inputs.
gis: Optional, the GIS on which this tool runs. If not specified, the active GIS is used.
Returns the following as a named tuple:
allocated_demand_locations_layer - allocatedDemandLocationsLayer as a str
allocation_lines_layer - allocationLinesLayer as a str
assigned_facilities_layer - assignedFacilitiesLayer as a str
See http://analysis6.arcgis.com:80/arcgis/rest/directories/arcgisoutput/tasks_GPServer/tasks/ChooseBestFacilities.htm for additional help.
"""
kwargs = locals()
param_db = {
"goal": (str, "goal"),
"demand_locations_layer": (str, "demandLocationsLayer"),
"demand": (float, "demand"),
"demand_field": (str, "demandField"),
"max_travel_range": (float, "maxTravelRange"),
"max_travel_range_field": (str, "maxTravelRangeField"),
"max_travel_range_units": (str, "maxTravelRangeUnits"),
"travel_mode": (str, "travelMode"),
"time_of_day": (datetime, "timeOfDay"),
"time_zone_for_time_of_day": (str, "timeZoneForTimeOfDay"),
"travel_direction": (str, "travelDirection"),
"required_facilities_layer": (str, "requiredFacilitiesLayer"),
"required_facilities_capacity": (float, "requiredFacilitiesCapacity"),
"required_facilities_capacity_field": (str, "requiredFacilitiesCapacityField"),
"candidate_facilities_layer": (str, "candidateFacilitiesLayer"),
"candidate_count": (int, "candidateCount"),
"candidate_facilities_capacity": (float, "candidateFacilitiesCapacity"),
"candidate_facilities_capacity_field": (str, "candidateFacilitiesCapacityField"),
"percent_demand_coverage": (float, "percentDemandCoverage"),
"output_name": (str, "outputName"),
"context": (str, "context"),
"allocated_demand_locations_layer": (str, "allocatedDemandLocationsLayer"),
"allocation_lines_layer": (str, "allocationLinesLayer"),
"assigned_facilities_layer": (str, "assignedFacilitiesLayer"),
}
return_values = [
{"name": "allocated_demand_locations_layer", "display_name": "allocatedDemandLocationsLayer", "type": str},
{"name": "allocation_lines_layer", "display_name": "allocationLinesLayer", "type": str},
{"name": "assigned_facilities_layer", "display_name": "assignedFacilitiesLayer", "type": str},
]
if gis is None:
gis = arcgis.env.active_gis
url = gis.properties.helperServices.analysis.url
return _execute_gp_tool(gis, "ChooseBestFacilities", kwargs, param_db, return_values, _use_async, url)
|
[
"[email protected]"
] | |
7cc28369b53704bdce93420c3d80c2239a0c75b0
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p03732/s203879619.py
|
5aea1f8b02c534b21ae12dcaf3b7a37028bead41
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 4,955 |
py
|
#!/usr/bin/env python3
import sys
# import time
# import math
# import numpy as np
# import scipy.sparse.csgraph as cs # csgraph_from_dense(ndarray, null_value=inf), bellman_ford(G, return_predecessors=True), dijkstra, floyd_warshall
# import random # random, uniform, randint, randrange, shuffle, sample
# import string # ascii_lowercase, ascii_uppercase, ascii_letters, digits, hexdigits
# import re # re.compile(pattern) => ptn obj; p.search(s), p.match(s), p.finditer(s) => match obj; p.sub(after, s)
# from bisect import bisect_left, bisect_right # bisect_left(a, x, lo=0, hi=len(a)) returns i such that all(val<x for val in a[lo:i]) and all(val>-=x for val in a[i:hi]).
# from collections import deque # deque class. deque(L): dq.append(x), dq.appendleft(x), dq.pop(), dq.popleft(), dq.rotate()
# from collections import defaultdict # subclass of dict. defaultdict(facroty)
# from collections import Counter # subclass of dict. Counter(iter): c.elements(), c.most_common(n), c.subtract(iter)
# from datetime import date, datetime # date.today(), date(year,month,day) => date obj; datetime.now(), datetime(year,month,day,hour,second,microsecond) => datetime obj; subtraction => timedelta obj
# from datetime.datetime import strptime # strptime('2019/01/01 10:05:20', '%Y/%m/%d/ %H:%M:%S') returns datetime obj
# from datetime import timedelta # td.days, td.seconds, td.microseconds, td.total_seconds(). abs function is also available.
# from copy import copy, deepcopy # use deepcopy to copy multi-dimentional matrix without reference
# from functools import reduce # reduce(f, iter[, init])
# from functools import lru_cache # @lrucache ...arguments of functions should be able to be keys of dict (e.g. list is not allowed)
# from heapq import heapify, heappush, heappop # built-in list. heapify(L) changes list in-place to min-heap in O(n), heappush(heapL, x) and heappop(heapL) in O(lgn).
# from heapq import nlargest, nsmallest # nlargest(n, iter[, key]) returns k-largest-list in O(n+klgn).
# from itertools import count, cycle, repeat # count(start[,step]), cycle(iter), repeat(elm[,n])
# from itertools import groupby # [(k, list(g)) for k, g in groupby('000112')] returns [('0',['0','0','0']), ('1',['1','1']), ('2',['2'])]
# from itertools import starmap # starmap(pow, [[2,5], [3,2]]) returns [32, 9]
# from itertools import product, permutations # product(iter, repeat=n), permutations(iter[,r])
# from itertools import combinations, combinations_with_replacement
from itertools import accumulate # accumulate(iter[, f])
# from operator import itemgetter # itemgetter(1), itemgetter('key')
# from fractions import gcd # for Python 3.4 (previous contest @AtCoder)
def main():
mod = 1000000007 # 10^9+7
inf = float('inf') # sys.float_info.max = 1.79...e+308
# inf = 2 ** 64 - 1 # (for fast JIT compile in PyPy) 1.84...e+19
sys.setrecursionlimit(10**6) # 1000 -> 1000000
def input(): return sys.stdin.readline().rstrip()
def ii(): return int(input())
def mi(): return map(int, input().split())
def mi_0(): return map(lambda x: int(x)-1, input().split())
def lmi(): return list(map(int, input().split()))
def lmi_0(): return list(map(lambda x: int(x)-1, input().split()))
def li(): return list(input())
n, w = mi()
L = [lmi() for _ in range(n)]
w1 = L[0][0]
if n * (w1 + 3) <= w:
print(sum(map(lambda x: x[1], L)))
elif n * w <= 10**6:
# 普通の dp 戦略
dp = [0 for _ in range(w+1)]
for i in range(n):
weight, value = L[i]
for j in range(w, 0, -1):
if j - weight >= 0:
dp[j] = max(dp[j], dp[j - weight] + value)
print(dp[w])
else:
group_by_weight = [[] for _ in range(4)]
for weight, value in L:
group_by_weight[weight - w1].append(value)
w1_0 = sorted(group_by_weight[0], reverse=True)
w1_1 = sorted(group_by_weight[1], reverse=True)
w1_2 = sorted(group_by_weight[2], reverse=True)
w1_3 = sorted(group_by_weight[3], reverse=True)
accum_0, accum_1, accum_2, accum_3 = map(lambda x: [0] + list(accumulate(x)), [w1_0, w1_1, w1_2, w1_3])
ans = -1
for i in range(len(w1_0)+1):
for j in range(len(w1_1)+1):
for k in range(len(w1_2)+1):
for l in range(len(w1_3)+1):
if (i+j+k+l)*w1 + j + 2*k + 3*l <= w:
ans = max(ans, accum_0[i] + accum_1[j] + accum_2[k] + accum_3[l])
print(ans)
if __name__ == "__main__":
main()
|
[
"[email protected]"
] | |
d396a8f8b20811e04bbd7ca37975e7cbd6e02b53
|
e1ef3cf26898340de4b1a6f64d3ec399a169e873
|
/organizer/migrations/0003_startup_data.py
|
c6ca357021080b8d600e05246675cc5d4466fadb
|
[] |
no_license
|
JMorris1575/djututorial
|
1e6db5b2513a92adc5016c77998af759b99d80db
|
307b4859ca5cc8992ec113144e174c9238449cbf
|
refs/heads/master
| 2021-01-10T16:30:28.600985 | 2016-03-31T00:23:11 | 2016-03-31T00:23:11 | 50,468,857 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 4,309 |
py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from datetime import date
from django.db import migrations
STARTUPS = [
{
"name": "Arachnobots",
"slug": "arachnobots",
"contact": "[email protected]",
"description":
"Remote-controlled internet-enabled "
"Spider Robots.",
"founded_date": date(2014, 10, 31),
"tags": ["mobile", "augmented-reality"],
"website":
"http://frightenyourroommate.com/",
},
{
"name": "Boundless Software",
"slug": "boundless-software",
"contact": "[email protected]",
"description": "The sky was the limit.",
"founded_date": date(2013, 5, 15),
"tags": ["big-data"],
"website": "http://boundless.com/",
},
{
"name": "Game Congress",
"slug": "game-congress",
"contact": "[email protected]",
"description":
"By gamers, for gamers, of gamers.",
"founded_date": date(2012, 7, 4),
"tags": ["video-games"],
"website": "http://gamecongress.com/",
},
{
"name": "JamBon Software",
"slug": "jambon-software",
"contact": "[email protected]",
"description":
"JamBon Software is a consulting "
"company that specializes in web and "
"mobile products. They can carry out "
"full end-to-end development of new "
"products, or review and advise on "
"existing projects. They also offer "
"hands-on training in Django.",
"founded_date": date(2013, 1, 18),
"tags": ["django"],
"website": "http://jambonsw.com/",
},
{
"name": "Lightning Rod Consulting",
"slug": "lightning-rod-consulting",
"contact": "[email protected]",
"description":
"Channel the storm. "
"Trouble shoot the cloud.",
"founded_date": date(2014, 4, 1),
"tags":
["ipython", "jupyter", "big-data"],
"website": "http://lightningrod.com/",
},
{
"name": "Monkey Software",
"slug": "monkey-software",
"contact": "[email protected]",
"description":
"1000 code monkeys making software.",
"founded_date": date(2014, 12, 10),
"tags": ["video-games"],
"website": "http://monkeysw.com/",
},
{
"name": "Simple Robots",
"slug": "simple-robots",
"contact": "[email protected]",
"description":
"Your resource to understanding "
"computer, robots, and technology.",
"founded_date": date(2010, 1, 2),
"tags": ["python", "augmented-reality"],
"website": "http://simplerobots.com/",
},
{
"name": "Thingies",
"slug": "thingies",
"contact": "[email protected]",
"description":
"A marketplace for arduino, "
"raspberry pi, and other "
"homemade stuff.",
"founded_date": date(2015, 4, 7),
"tags": ["python"],
"website": "http://buythingies.com/",
},
]
def add_startup_data(apps, schema_editor):
Startup = apps.get_model(
'organizer', 'Startup')
Tag = apps.get_model('organizer', 'Tag')
for startup in STARTUPS:
startup_object = Startup.objects.create(
name=startup['name'],
slug=startup['slug'],
contact=startup['contact'],
description=startup['description'],
founded_date=startup['founded_date'],
website=startup['website'])
for tag_slug in startup['tags']:
startup_object.tags.add(
Tag.objects.get(
slug=tag_slug))
def remove_startup_data(apps, schema_editor):
Startup = apps.get_model(
'organizer', 'Startup')
for startup in STARTUPS:
startup_object = Startup.objects.get(
slug=startup['slug'])
startup_object.delete()
class Migration(migrations.Migration):
dependencies = [
('organizer', '0002_tag_data'),
]
operations = [
migrations.RunPython(
add_startup_data,
remove_startup_data)
]
|
[
"[email protected]"
] | |
992cea9f56e775cee4b0e905475671a7ec84941a
|
de24f83a5e3768a2638ebcf13cbe717e75740168
|
/moodledata/vpl_data/180/usersdata/276/112623/submittedfiles/matriz1.py
|
827d37ca5f8c411adcdb865dc0e68dabe5c30d8d
|
[] |
no_license
|
rafaelperazzo/programacao-web
|
95643423a35c44613b0f64bed05bd34780fe2436
|
170dd5440afb9ee68a973f3de13a99aa4c735d79
|
refs/heads/master
| 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,244 |
py
|
# -*- coding: utf-8 -*-
import numpy as np
def primeiralinha (matriz):
for i in range (0,matriz.shape[0],1):
for j in range (0,matriz.shape[1],1):
if matriz[i,j]==1:
return (i)
def ultimalinha (matriz):
for i in range (0,matriz.shape[0],1):
for j in range (0,matriz.shape[1],1):
if matriz[i,j]==1:
l = j
return (l)
def primeiracoluna (matriz):
for j in range (0,matriz.shape[1],1):
for i in range (0,matriz.shape[0],1):
if matriz[i,j]==1:
return (j)
def ultimacoluna (matriz):
for j in range (0,matriz.shape[1],1):
for i in range (0,matriz.shape[0],1):
if matriz[i,j]==1:
c = i
return (c)
linhas = int (input('Digite a quantidade de linhas: '))
colunas = int (input('Digite a quantidade de colunas: '))
a = np.zeros ((linhas, colunas))
for i in range (0,linhas,1):
for j in range (0,colunas,1):
a[i,j] = int(input('Digite o elemento da matriz:'))
menorlinha = primeiralinha (a)
maiorlinha = ultimalinha (a)
menorcoluna = primeiracoluna (a)
maiorcoluna = ultimacoluna (a)
b = a[menorlinha:maiorlinha+1,menorcoluna:maiorcoluna+1]
print (b)
|
[
"[email protected]"
] | |
943d06d2f6e39a13dc30d6fb4ec46fdbb3e6bf10
|
baaaa9f2d3049a5bd8ec266af84b00b43eab8bbf
|
/core/migrations/0045_image_size.py
|
dedac82a43cd86f1fa2394f393e51d6aaf13745b
|
[] |
no_license
|
mary-lev/edu_test
|
13efd9e566ad99db4e2be03a391c48be609be336
|
51a18e6be3098b488e98db41f1226cb40a9b13d1
|
refs/heads/master
| 2023-01-29T08:01:57.701848 | 2020-12-13T16:31:58 | 2020-12-13T16:31:58 | 302,146,712 | 1 | 0 | null | 2020-12-06T22:11:44 | 2020-10-07T20:00:10 |
Python
|
UTF-8
|
Python
| false | false | 426 |
py
|
# Generated by Django 3.1.2 on 2020-12-11 14:40
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0044_auto_20201211_1338'),
]
operations = [
migrations.AddField(
model_name='image',
name='size',
field=models.CharField(default=1, max_length=100),
preserve_default=False,
),
]
|
[
"[email protected]"
] | |
39e8ceb3de54d0bf19c11557589193abe58821d0
|
81dfc5e875ac18e531b0582399901eab4e555cdd
|
/kalliope/neurons/neurotransmitter/__init__.py
|
8fe2a755b8a0969cb852184a1b9b60e548a85f66
|
[
"MIT"
] |
permissive
|
azrael11/kalliope
|
3bdcf200d5a14f08045263cba0f564bec67afbe1
|
a68377d2c812fa9f9db5e9bb998b595e64363418
|
refs/heads/master
| 2021-01-12T01:37:39.898237 | 2016-12-28T15:25:47 | 2016-12-28T15:25:47 | 78,412,059 | 1 | 0 | null | 2017-01-09T09:06:33 | 2017-01-09T09:06:33 | null |
UTF-8
|
Python
| false | false | 46 |
py
|
from neurotransmitter import Neurotransmitter
|
[
"[email protected]"
] | |
a48349645033c2bb643c69ebbb80cd98c1b165f8
|
cbda89443b351bb2047180dad4e300c13dc3df7f
|
/Crystals/Morpurgo_all_sp_Reorgs_qsplit_TholeExp_emp_a/Jobs/C8_BTBT/C8_BTBT_anion_neut_inner1_outer0/C8_BTBT_anion_neut_inner1_outer0.py
|
4307ea1e54784e8a85b5358d45654ab5db863908
|
[] |
no_license
|
sheridanfew/pythonpolarisation
|
080f52979f98d26360a46412a10c8e3f51ee4549
|
178e2684e9a239a8e60af5f7b1eb414ac5f31e92
|
refs/heads/master
| 2021-07-10T01:07:40.978790 | 2021-03-11T16:56:37 | 2021-03-11T16:56:37 | 96,101,351 | 0 | 0 | null | 2017-07-03T13:37:06 | 2017-07-03T10:54:52 | null |
UTF-8
|
Python
| false | false | 7,466 |
py
|
import sys
sys.path.append('../../../../../')
from BasicElements import *
from BasicElements.Register import GetRegister
from BasicElements.MoleculeFactory import ReadMoleculeType
from BasicElements.MoleculeFactory import GetMolecule
from BasicElements.Crystal import *
from Polarizability.GetDipoles import get_dipoles,split_dipoles_onto_atoms
from Polarizability import *
from Polarizability.GetEnergyFromDips import *
from Polarizability.JMatrix import JMatrix
import numpy as np
from math import *
from time import gmtime, strftime
import os
print strftime("%a, %d %b %Y %X +0000", gmtime())
qdict={"anion": -1.0, "neut": 0.0, "cation": 1.0}
name='C8_BTBT_anion_neut_inner1_outer0'
#For crystals here, all cubic and centred at centre
insize=1
#number of TVs in each dir central mol is from edge of inner region
outsize=0
state='anion'
mols_cen=['sp_C8_BTBT_mola_neut.xyz','sp_C8_BTBT_molb_neut.xyz']
mols_sur=['sp_C8_BTBT_mola_neut.xyz','sp_C8_BTBT_molb_neut.xyz']
mols_outer=['sp_C8_BTBT_mola_neut.xyz','sp_C8_BTBT_molb_neut.xyz']
Natoms=16
#From cif:
'''
C8_BTBT
_cell_length_a 5.927(7)
_cell_length_b 7.88(1)
_cell_length_c 29.18(4)
_cell_angle_alpha 90
_cell_angle_beta 92.443(4)
_cell_angle_gamma 90
_cell_volume 1361.61
'''
#Get translation vectors:
a=5.9277/0.5291772109217
b=7.881/0.5291772109217
c=29.184/0.5291772109217
alpha=90*(pi/180)
beta=92.4434*(pi/180)
gamma=90*(pi/180)
cif_unit_cell_volume=1361.61/(a*b*c*(0.5291772109217**3))
cell_volume=sqrt(1 - (cos(alpha)**2) - (cos(beta)**2) - (cos(gamma)**2) + (2*cos(alpha)*cos(beta)*cos(gamma)))
#Converts frac coords to carts
matrix_to_cartesian=np.matrix( [[a, b*cos(gamma), c*cos(beta)],
[0, b*sin(gamma), c*(cos(alpha) - cos(beta)*cos(gamma))/sin(gamma)],
[0, 0, c*cell_volume/sin(gamma)]])
#carts to frac
matrix_to_fractional=matrix_to_cartesian.I
#TVs, TV[0,1,2] are the three translation vectors.
TV=matrix_to_cartesian.T
cut=8.0
totsize=insize+outsize
#number of TVs in each dir nearest c inner mol is from edge of outer region
cenpos=[totsize,totsize,totsize]
length=[2*totsize+1,2*totsize+1,2*totsize+1]
maxTVs=insize
outer_maxTVs=insize+outsize
#for diamond outer, don't specify for cube and will fill to cube edges.
print 'name: ',name,'mols_cen: ', mols_cen,' mols_sur: ',mols_sur,' TVs: ', TV
# Place Molecules
prot_neut_cry=Crystal(name=name,mols_cen=mols_cen,mols_sur=mols_sur,cenpos=cenpos,length=length,TVs=TV,maxTVs=maxTVs,mols_outer=mols_outer,outer_maxTVs=outer_maxTVs)
#prot_neut_cry._mols contains all molecules.
#mols[0] contains a list of all molecules in position a, mols[1] all mols in pos'n b, etc.
#mols[0][x,y,z] contains molecule a in position x,y,z
#mols may as such be iterated over in a number of ways to consider different molecules.
print 'state',state
print 'q: ', qdict[state]
for atom in prot_neut_cry()._mols[0][prot_neut_cry()._cenpos[0]][prot_neut_cry()._cenpos[1]][prot_neut_cry()._cenpos[2]]():
atom()._crg=qdict[state]
prot_neut_cry().print_posns()
#Calculate Properties:
print strftime("%a, %d %b %Y %X +0000", gmtime())
E0 = np.matrix([0.,0.,0.])
print strftime("%a, %d %b %Y %X +0000", gmtime())
print 'Calc jm'
screenradius=1.7278
# Thole paper screenradius value for fit to components of pol. tensor divided by no. atoms in mol. We choose this screenradius value for smearing of charge as, with near planar mols, in some dirs we have molecule-like polarisabilities with near atom-like separations.
#This form of screenradius will result in charge being smeared along the separation axis of molecules by NAtoms*(Thole's value for a single atom)
jm = JMatrix(jmtype='TholeExp',screenradius=screenradius)
print strftime("%a, %d %b %Y %X +0000", gmtime())
print 'Calc dips:'
d = get_dipoles(E0=E0,jm=jm._m,cutoff=cut)
print strftime("%a, %d %b %Y %X +0000", gmtime())
Efield = get_electric_field(E0)
potential = get_potential()
print strftime("%a, %d %b %Y %X +0000", gmtime())
#print 'dips', d
print 'splitting dips onto atoms'
split_d = split_dipoles_onto_atoms(d)
print strftime("%a, %d %b %Y %X +0000", gmtime())
print 'summing dips:'
tot = np.matrix([0.,0.,0.])
for dd in split_d:
tot += dd
print strftime("%a, %d %b %Y %X +0000", gmtime())
print 'total dip moment', tot
Uqq = np.multiply(get_U_qq(potential=potential),27.211)
print strftime("%a, %d %b %Y %X +0000", gmtime())
print 'Uqq', Uqq
Uqd = np.multiply(get_U_qdip(dips=d,Efield=Efield),27.211)
print strftime("%a, %d %b %Y %X +0000", gmtime())
print 'Uqd', Uqd
Udd = np.multiply(get_U_dipdip(jm=jm._m,dips=d.T),27.211)
print strftime("%a, %d %b %Y %X +0000", gmtime())
print 'Udd', Udd
energyev = Udd+Uqd+Uqq
print 'energyev', energyev
energy=energyev/27.211
print strftime("%a, %d %b %Y %X +0000", gmtime())
print 'Making .dat cross sections for gnuplot'
# print TVs
if not os.path.exists('Dips_Posns_TVs'): os.makedirs('Dips_Posns_TVs')
f = open('Dips_Posns_TVs/%s_TVs.dat' % name, 'w')
TVstr=str(str(TV[0,0]) + ' ' + str(TV[0,1]) + ' ' + str(TV[0,2]) + '\n' + str(TV[1,0]) + ' ' + str(TV[1,1]) + ' ' + str(TV[1,2]) + '\n' + str(TV[2,0]) + ' ' + str(TV[2,1]) + ' ' + str(TV[2,2])+ '\n')
f.write(TVstr)
f.flush()
f.close()
# print dipoles
if not os.path.exists('Dips_Posns_TVs'): os.makedirs('Dips_Posns_TVs')
f = open('Dips_Posns_TVs/%s_dipoles.dat' % name, 'w')
for dd in split_d:
dstr=str(dd)
f.write(dstr)
f.write('\n')
f.flush()
f.close()
# print properties for charge in centrepos
time=strftime("%a, %d %b %Y %X +0000", gmtime())
f = open('%s_properties.csv' % name, 'w')
f.write ('time\tname\tmols_cen\tmols_sur\tmols_outer\tinsize\toutsize\tenergyev\tUqq\tUqd\tUdd\tTotdip_x\tTotdip_y\tTotdip_z')
f.write ('\n%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s' % (time,name,mols_cen,mols_sur,mols_outer,insize,outsize,energyev,Uqq,Uqd,Udd,tot[0,0],tot[0,1],tot[0,2]))
f.flush()
f.close()
# print header for reorgs
f = open('reorg_energies_%s_properties.csv' % name, 'w')
f.write ('time\tname\tmols_cen\tmols_sur\tmols_outer\tinsize\toutsize\ta\tb\tc\tmolincell\tReorg(eV)')
f.flush()
f.close()
# REORGANISATION ENERGIES
#Note that this assumes a cube, and values for which
for dist in range(0,(length[0]/2)+1,1):
print '\n\nDIST: ', dist, '\n'
for a in range(prot_neut_cry()._cenpos[0]-dist,prot_neut_cry()._cenpos[0]+dist+1,1):
for b in range(prot_neut_cry()._cenpos[1]-dist,prot_neut_cry()._cenpos[1]+dist+1,1):
for c in range(prot_neut_cry()._cenpos[2]-dist,prot_neut_cry()._cenpos[2]+dist+1,1):
print strftime("%a, %d %b %Y %X +0000", gmtime())
print 'a,b,c',a,b,c
for molincell in range(0,len(prot_neut_cry()._mols),1):
prot_neut_cry().calc_reorg_shareq(a1=prot_neut_cry()._cenpos[0],b1=prot_neut_cry()._cenpos[1],c1=prot_neut_cry()._cenpos[2],molincell1=0,a2=a,b2=b,c2=c,molincell2=molincell,jm=jm._m,oldUqd=Uqd)
print 'Reorg: ', prot_neut_cry()._reorgs_shareq[molincell][a][b][c]
f = open('reorg_energies_%s_properties.csv' % name, 'a')
f.write ('\n%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s' % (time,name,mols_cen,mols_sur,mols_outer,insize,outsize,a,b,c,molincell,prot_neut_cry()._reorgs_shareq[molincell][a][b][c]))
f.flush()
f.close()
# Redo this and overwrite after each set to ensure we have some even if not all reorgs complete
prot_neut_cry().print_reorgs_shareq()
print 'Job Completed Successfully.'
|
[
"[email protected]"
] | |
a47dfda2c685c8b5a97a0759d2410c2d53eca94d
|
c6e5bbafd810d23e0ee46d69026cba35339d1dbd
|
/search/constants.py
|
09cdc69a1b0e68f0ef3770ca45ce11fb198cf38b
|
[] |
no_license
|
mfonism/django-inqueerstigate
|
9c8b729848bf3df9fb9ec991ec47391b69ad7b66
|
af5420bf8adf6aa89533cd1462d9eeed6e8c88db
|
refs/heads/main
| 2023-05-26T12:59:55.774989 | 2021-06-07T11:46:48 | 2021-06-07T11:46:48 | 323,681,513 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 100 |
py
|
NO_FACE_FOUND_ERROR_CODE = "no face found"
MULTIPLE_FACES_FOUND_ERROR_CODE = "multiple faces found"
|
[
"[email protected]"
] | |
a7c965bda295df19c2aaa63b9a6c4712b018af08
|
01ec983c0a3cc35857d1900b8f26bb7e8ad93c2f
|
/Image_Processing_in_OpenCV/Geometric_Transformations_of_Images/Image_Thresholding/Otsu's_Binarization.py
|
2121e75d27dd1263e93dd4b74226da0d5eac8936
|
[] |
no_license
|
Joevaen/Opencv_On_CT
|
f88560c2eb3655e09e21ffe012d5c531c0742c94
|
3c896b4791d99c5be4fc21054aeeb25feb5d2e33
|
refs/heads/main
| 2023-03-18T21:10:28.772470 | 2021-03-04T07:34:02 | 2021-03-04T07:34:02 | 343,656,576 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,182 |
py
|
import cv2 as cv
import numpy as np
from matplotlib import pyplot as plt
img = cv.imread('/home/qiao/PythonProjects/Opencv_On_CT/Test_Img/10.jpg',0)
# global thresholding
ret1,th1 = cv.threshold(img,200,255,cv.THRESH_BINARY)
# Otsu's thresholding
ret2,th2 = cv.threshold(img,0,255,cv.THRESH_BINARY+cv.THRESH_OTSU)
# Otsu's thresholding after Gaussian filtering
blur = cv.GaussianBlur(img,(5,5),0)
ret3,th3 = cv.threshold(blur,0,255,cv.THRESH_BINARY+cv.THRESH_OTSU)
# plot all the images and their histograms
images = [img, 0, th1,
img, 0, th2,
blur, 0, th3]
titles = ['Original Noisy Image','Histogram','Global Thresholding (v=127)',
'Original Noisy Image','Histogram',"Otsu's Thresholding",
'Gaussian filtered Image','Histogram',"Otsu's Thresholding"]
for i in range(3):
plt.subplot(3,3,i*3+1),plt.imshow(images[i*3],'gray')
plt.title(titles[i*3]), plt.xticks([]), plt.yticks([])
plt.subplot(3,3,i*3+2),plt.hist(images[i*3].ravel(),256)
plt.title(titles[i*3+1]), plt.xticks([]), plt.yticks([])
plt.subplot(3,3,i*3+3),plt.imshow(images[i*3+2],'gray')
plt.title(titles[i*3+2]), plt.xticks([]), plt.yticks([])
plt.show()
|
[
"[email protected]"
] | |
831fdc372ca74798f62c475972295be51a4f1e8b
|
78c4ccb183a99ebaabcdc3a3a69f029e4aee0f5c
|
/AlgorithmStudy/백준/19 해시맵/1620 나는야 포켓몬 마스터 이다솜(10.26).py
|
f5af375ee7308b90025d55f3cf56d9a6be83d7c7
|
[] |
no_license
|
cladren123/study
|
ef2c45bc489fa658dbc9360fb0b0de53250500e5
|
241326e618f1f3bb1568d588bf6f53b78920587a
|
refs/heads/master
| 2023-09-02T02:21:24.560967 | 2021-11-05T12:20:06 | 2021-11-05T12:20:06 | 368,753,950 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 598 |
py
|
import sys
input = sys.stdin.readline
# n : 포켓몬 개수, m : 문제의 개수
n, m = map(int, input().split())
# 시간복잡도를 피하기 위해 딕셔너리를 2개 사용
dogam = dict()
dogam2 = dict()
# 한개의 딕셔너리는 숫자,이름 나머지 하나는 이름,숫자 순으로 만든다.
for number in range(1,n+1) :
name = input().strip()
dogam[str(number)] = name
dogam2[name] = str(number)
for _ in range(m) :
one = input().strip()
# 숫자인지 확인
if one.isdigit() :
print(dogam[one])
else :
print(dogam2[one])
|
[
"[email protected]"
] | |
2f754b68ad9d915c95dc469bc7caf5a5e105f0a6
|
1882ba2b04e2230692e7da0b963f20ccf859ce34
|
/Collect/VIIRS/LST_daily.py
|
b464ba525a4313c4144963048046d4c6ac92bb0a
|
[
"Apache-2.0"
] |
permissive
|
TimHessels/watertools
|
908230ae0f45de5379e6808fec827c55245c1cc2
|
2fc3680bfc6ad34bd2a11fba4cf302c5b84e5d78
|
refs/heads/master
| 2023-08-16T16:18:47.003632 | 2023-08-06T15:35:49 | 2023-08-06T15:35:49 | 158,684,796 | 4 | 6 | null | null | null | null |
UTF-8
|
Python
| false | false | 743 |
py
|
# -*- coding: utf-8 -*-
"""
Created on Fri Dec 9 13:16:00 2022
@author: timhe
"""
import sys
from watertools.Collect.VIIRS.DataAccess import DownloadData
def main(Dir, Startdate, Enddate, latlim, lonlim, Waitbar = 1):
"""
This function downloads daily LST data from VIIRS
interval, and spatial extent.
Keyword arguments:
Dir -- 'C:/file/to/path/'
Startdate -- 'yyyy-mm-dd'
Enddate -- 'yyyy-mm-dd'
latlim -- [ymin, ymax]
lonlim -- [xmin, xmax]
Waitbar -- 1 (Default) will print a waitbar
"""
print('\nDownload daily VIIRS LST data for period %s till %s' %(Startdate, Enddate))
DownloadData(Dir, Startdate, Enddate, latlim, lonlim, Waitbar)
if __name__ == '__main__':
main(sys.argv)
|
[
"[email protected]"
] | |
595cccc02dc74de53021e84d1eae7a581cf94a0d
|
e17a64f1a063fca4fc7d833f85239a39a973c119
|
/tests/test_weight_allocations.py
|
503a1d58c1e6ce57a2d21d9b2e518549b625373a
|
[
"Apache-2.0"
] |
permissive
|
stjordanis/moonshot
|
ff18694a3977fd01a40cabaae32907aae56bdb9f
|
d79cf26e7fb5ce3fcb34060771ea4992e19dc46a
|
refs/heads/master
| 2023-07-03T08:24:24.729332 | 2021-04-13T15:48:47 | 2021-04-13T15:48:47 | 271,562,585 | 0 | 0 |
Apache-2.0
| 2020-06-11T14:07:20 | 2020-06-11T14:07:19 | null |
UTF-8
|
Python
| false | false | 4,746 |
py
|
# Copyright 2018 QuantRocket LLC - All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# To run: python3 -m unittest discover -s tests/ -p test_*.py -t . -v
import os
import unittest
from unittest.mock import patch
import glob
import pandas as pd
from moonshot import Moonshot
from moonshot.cache import TMP_DIR
class WeightAllocationsTestCase(unittest.TestCase):
def test_allocate_equal_weights(self):
"""
Tests that the allocate_equal_weights returns the expected
DataFrames.
"""
signals = pd.DataFrame(
data={
"FI12345": [1, 1, 1, 0, 0],
"FI23456": [0, -1, 1, 0, -1],
}
)
target_weights = Moonshot().allocate_equal_weights(signals, cap=1.0)
self.assertDictEqual(
target_weights.to_dict(orient="list"),
{"FI12345": [1.0, 0.5, 0.5, 0.0, 0.0],
"FI23456": [0.0, -0.5, 0.5, 0.0, -1.0]}
)
target_weights = Moonshot().allocate_equal_weights(signals, cap=0.5)
self.assertDictEqual(
target_weights.to_dict(orient="list"),
{"FI12345": [0.5, 0.25, 0.25, 0.0, 0.0],
"FI23456": [0.0, -0.25, 0.25, 0.0, -0.5]}
)
def test_allocate_fixed_weights(self):
"""
Tests that the allocate_fixed_weights returns the expected
DataFrames.
"""
signals = pd.DataFrame(
data={
"FI12345": [1, 1, 1, 0, 0],
"FI23456": [0, -1, 1, 0, -1],
"FI34567": [1, 1, 1, -1, -1]
}
)
target_weights = Moonshot().allocate_fixed_weights(signals, 0.34)
self.assertDictEqual(
target_weights.to_dict(orient="list"),
{"FI12345": [0.34, 0.34, 0.34, 0.0, 0.0],
"FI23456": [0.0, -0.34, 0.34, 0.0, -0.34],
"FI34567": [0.34, 0.34, 0.34, -0.34, -0.34]}
)
def test_allocate_fixed_weights_capped(self):
"""
Tests that the allocate_fixed_weights_capped returns the expected
DataFrames.
"""
signals = pd.DataFrame(
data={
"FI12345": [1, 1, 1, 0, 0],
"FI23456": [0, -1, 1, 0, -1],
"FI34567": [1, 1, 1, -1, -1]
}
)
target_weights = Moonshot().allocate_fixed_weights_capped(signals, 0.34, cap=1.5)
self.assertDictEqual(
target_weights.to_dict(orient="list"),
{"FI12345": [0.34, 0.34, 0.34, 0.0, 0.0],
"FI23456": [0.0, -0.34, 0.34, 0.0, -0.34],
"FI34567": [0.34, 0.34, 0.34, -0.34, -0.34]}
)
target_weights = Moonshot().allocate_fixed_weights_capped(signals, 0.34, cap=0.81)
self.assertDictEqual(
target_weights.to_dict(orient="list"),
{"FI12345": [0.34, 0.27, 0.27, 0.0, 0.0],
"FI23456": [0.0, -0.27, 0.27, 0.0, -0.34],
"FI34567": [0.34, 0.27, 0.27, -0.34, -0.34]}
)
def test_allocate_market_neutral_fixed_weights_capped(self):
"""
Tests that the allocate_market_neutral_fixed_weights_capped returns
the expected DataFrames.
"""
signals = pd.DataFrame(
data={
"FI12345": [1, 1, 1, 0, 0],
"FI23456": [0, -1, 1, 1, -1],
"FI34567": [1, 1, -1, -1, -1]
}
)
target_weights = Moonshot().allocate_market_neutral_fixed_weights_capped(
signals, 0.34, cap=1.2, neutralize_weights=False)
self.assertDictEqual(
target_weights.to_dict(orient="list"),
{"FI12345": [0.3, 0.3, 0.3, 0.0, 0.0],
"FI23456": [0.0, -0.34, 0.3, 0.34, -0.3],
"FI34567": [0.3, 0.3, -0.34, -0.34, -0.3]}
)
target_weights = Moonshot().allocate_market_neutral_fixed_weights_capped(
signals, 0.34, cap=1.2, neutralize_weights=True)
self.assertDictEqual(
target_weights.to_dict(orient="list"),
{"FI12345": [0.0, 0.17, 0.17, 0.0, 0.0],
"FI23456": [0.0, -0.34, 0.17, 0.34, -0.0],
"FI34567": [0.0, 0.17, -0.34, -0.34, -0.0]}
)
|
[
"[email protected]"
] | |
80d15003ccee80d204c7d0649c3f5102c1bf4b8d
|
527d3d38b57b9f12ea6e91167a9a06149a89044b
|
/wxFEFactory/python/tools/dolphin/rsfj99/datasets.py
|
3398e379ff9c3986d2ef344ba217ef01a1894a2e
|
[] |
no_license
|
czastack/wxFEFactory
|
0d7ee77e542234c039b6056aeb44cb2a40341714
|
bed824746266734e40103010c0132aad069d723a
|
refs/heads/master
| 2021-06-04T11:31:05.808038 | 2019-06-03T18:51:01 | 2019-06-03T18:51:01 | 97,013,565 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 29,585 |
py
|
COMMON_HEAD = ("名称", "说明"), (200, 960)
LISTS = (
{
"name": "potion",
"caption": "回复",
"head": COMMON_HEAD,
"hascount": True,
"items": (
("竹制水筒", "将竹子切开做成的水筒生命力80回复 生气250取得 五成饱"),
("甜酒", "孩子也能喝的白色甜米酒生命力100回复 生气300取得 五成饱"),
("般若汤", "佛教中人喜欢的不可思议之水生命力200回复 生气450取得 五成饱"),
("神酒", "供奉神灵的酒,据说含有灵力生命力300回复 生气600取得 八成饱"),
("浊酒", "以米为原料酿制的浊酒生命力400回复 生气800取得 八成饱"),
("柿", "秋天生长的橙色果实生命力600回复 生气1000取得 八成饱"),
("蜜柑", "冬天生长的酸甜果实生命力1000回复 生气1500取得 八成饱"),
("桃", "夏天生长的水灵甜美的果实生命力2000回复 生气2000取得 八成饱"),
("回复丸", "常被忍者携带用来回复体力的药丸生命力50回复 生气10取得"),
("治愈丸", "更加强力的药丸生命力70回复 生气10取得"),
("熊胆丸", "使用熊胆作为原料的名贵药丸生命力100回复 生气10取得"),
("虎胆丸", "据说是以虎胆为原料制成的药丸生命力150回复 生气10取得"),
("龙脑丸", "使用龙脑制成的贵重药丸生命力200回复 生气10取得"),
("神仙丸", "拥有强大神通力的仙人煎制的药丸生命力300回复 生气10取得"),
("延龄丹", "吃下去能变得如年轻时一样有力的神奇之药生命力500回复 生气10取得"),
("灵方反魂丹", "能在瞬间治愈身体所有伤痛的不可思议之药生命力1000回复 生气10取得"),
("麻布之符", "能保护身体不受火炎灼伤的纸符回复灼伤状态"),
("御百草", "使用多种药草煎制,具有解毒效果的药回复毒状态"),
("虾蟇之油", "从虾蟇蛙的油中提取的贵重的软膏回复所有异常状态"),
)
},
{
"name": "consumer",
"caption": "消费",
"head": COMMON_HEAD,
"hascount": True,
"items": (
("青铜之镜", "用青铜制作,蕴藏灵力的镜子回到最后访问的祠堂"),
("八咫镜", "被称为神器的神奇镜子,可以回到最后访问的祠堂(可以无限次使用)"),
("烟玉", "投出去后能在周围产生烟幕的小球可以从战斗中脱离(头目战除外)"),
("大村砥石", "磨刀用的砥石回复五成刀的灵力"),
("伊予砥石", "磨刀用的砥石完全回复刀的灵力"),
("五十岚砥石", "磨刀用的砥石回复五成所有刀的灵力"),
("常见寺砥石", "磨刀用的砥石完全回复所有刀的灵力"),
("内昙砥石", "磨刀用的砥石一定时间内刀的灵力不减"),
)
},
{
"name": "food_material",
"caption": "食材",
"head": COMMON_HEAD,
"hascount": True,
"items": (
("米袋", "用来制作料理的。商店购买。"),
("白菜", "用来制作料理的食材。商店购买。"),
("萝卜", "用来制作料理的食材。商店购买。"),
("豆腐", "用来制作料理的食材。商店购买。"),
("油炸豆腐", "用来制作料理的食材。商店购买。"),
("薯", "用来制作料理的食材。商店购买。"),
("鸟肉", "用来制作料理的食材。干掉遇到的野鸡获得。"),
("野猪肉", "用来制作料理的食材。干掉遇到的野猪获得。"),
("岩鱼", "用来制作料理的食材。商店购买。"),
("鱿鱼", "用来制作料理的食材。商店购买。"),
)
},
{
"name": "decorator",
"caption": "装饰1",
"head": COMMON_HEAD,
"hascount": False,
"items": (
("忍者笼手", "力+1 身体能力上升"),
("妖狐笼手", "力+2 身体能力上升"),
("天狗笼手", "力+3 身体能力上升"),
("空蝉笼手", "力+4 身体能力上升"),
("修罗笼手", "力+5 身体能力上升"),
("八咫笼手", "力+6 身体能力上升"),
("鬼神笼手", "力+7 身体能力上升"),
("龙神笼手", "力+10 身体能力上升"),
("忍者腰带", "体力+1 身体能力上升"),
("妖狐腰带", "体力+2 身体能力上升"),
("天狗腰带", "体力+3 身体能力上升"),
("空蝉腰带", "体力+4 身体能力上升"),
("修罗腰带", "体力+5 身体能力上升"),
("八咫腰带", "体力+6 身体能力上升"),
("鬼神腰带", "体力+7 身体能力上升"),
("龙神腰带", "体力+10 身体能力上升"),
("天狗面具", "力・体力+1 身体能力上升"),
("修罗面具", "力・体力+3 身体能力上升"),
("鬼神面具", "力・体力+5 身体能力上升"),
("龙神面具", "力・体力+7 身体能力上升"),
("火鼠之衣", "炎无效化,可以防御着火的异常状态"),
("水虎之鳞", "毒无效化,可以防御中毒的异常状态"),
("鬼之首饰", "异常几率减半,一半几率防御所有异常状态"),
("龙之首饰", "异常无效化,有完全防御所有异常状态的效果"),
("达人之书", "增加经验值,取得的经验值增加一半"),
)
},
{
"name": "decorator2",
"caption": "装饰2",
"head": COMMON_HEAD,
"hascount": False,
"items": (
("锁帷子", "铁壁之构:将受到的伤害减轻十分之一"),
("乌头的毒药", "必杀之刃:低几率一击击倒敌人"),
("石见的毒药", "必杀之刃:贰 高几率一击击倒敌人"),
("桧之念珠", "生命吸收:吸收百分之一给予敌人的伤害"),
("七宝念珠", "生命吸收:贰 吸收百分之二给予敌人的伤害"),
("丁子油", "攻击强化:给予敌人的伤害上升百分之五"),
("剑豪的鞘", "居合强化:居合斩的攻击力提升十分之一"),
("剑圣的鞘", "瞬时居合:能使用居合斩的间隔非常短"),
("伊势的护身符", "灵力还元:吸收魂的时候灵力回复量上升"),
("出云的护身符", "灵力还元 贰:吸收魂的时候灵力回复量大幅上升"),
("大和的灵药", "自动回复:每隔一定时间生命力徐徐回复百分之一"),
("富山的灵药", "自动回复 贰:每隔一定时间生命力徐徐回复百分之二"),
("饿鬼的小碟", "瞬时消化:满腹槽的消化速度加快"),
("贪吃汉的小碟", "瞬时消化 贰:满腹槽的消化速度非常快"),
("打出小槌", "金取得量上升:钱的取得量上升十分之"),
("幸运稻草", "道具取得几率上升:道具的获得几率大幅上升"),
("水晶勾玉", "吸魂强化:魂的取得量上升百分之五"),
("翡翠勾玉", "吸魂强化 贰:魂的取得量上升十分之一"),
("恶鬼之印", "荒修行:最大生命力减少,获得经验值上升十分之一"),
("修罗之印", "荒修行 贰:最大生命力减少,获得经验值上升十分之二"),
("忍足袋", "忍足:遇敌率稍微降低"),
("隐遁足袋", "忍足 贰:遇敌率大幅降低"),
("鸣子", "诱敌:遇敌率稍微上升"),
("二连鸣子", "诱敌 贰:遇敌率大幅上升"),
("刀匠的小槌", "刀收集强化:根据收集刀的数量提升攻击力"),
("富商的算盘", "金收集强化:根据收集金钱的数量提升攻击力"),
("伊万里的大皿", "生气收集强化:根据收集生气的数量提升攻击力"),
("灵木牌位", "魂收集强化:根据收集魂的数量提升攻击力"),
("鸣神腕轮", "无灵力消费:不消费灵力使用奥义"),
("仁王腕轮", "刀攻击力固定:所有刀的攻击力固定为七百"),
)
},
{
"name": "book",
"caption": "书物",
"head": COMMON_HEAD,
"hascount": False,
"items": (
("厨房秘传之书1", "记载着厨房料理秘传菜谱的书(全5卷) 记载着料理「饭团」的制作方法"),
("烧烤料理帖1", "记载着利用篝火制作料理的书(全3卷) 记载着料理「烤红薯」的制作方法"),
("烧烤料理帖2", "记载着利用篝火制作料理的书(全3卷) 记载着料理「烤鱼」的制作方法"),
("烧烤料理帖3", "记载着利用篝火制作料理的书(全3卷) 记载着料理「烧烤鱿鱼」的制作方法"),
("烹调推荐1", "记载着烹调料理相关的书(全5卷) 记载着料理「白菜锅」的制作方法"),
("厨房秘传之书2", "记载着厨房料理秘传菜谱的书(全5卷) 记载着料理「炖煮萝卜」的制作方法"),
("厨房秘传之书3", "记载着厨房料理秘传菜谱的书(全5卷) 记载着料理「汤豆腐」的制作方法"),
("厨房秘传之书4", "记载着厨房料理秘传菜谱的书(全5卷) 记载着料理「味噌汤」的制作方法"),
("厨房秘传之书5", "记载着厨房料理秘传菜谱的书(全5卷) 记载着料理「炖红薯」的制作方法"),
("锅料理全集1", "记载着基础的锅料理的书(全3卷) 记载着料理「鸟肉汆锅」的制作方法"),
("烹调推荐2", "记载着烹调料理相关的书(全5卷) 记载着料理「雪见锅」的制作方法"),
("烹调推荐3", "记载着烹调料理相关的书(全5卷) 记载着料理「杂炊」的制作方法"),
("烹调推荐4", "记载着烹调料理相关的书(全5卷) 记载着料理「红薯杂炊」的制作方法"),
("烹调推荐5", "记载着烹调料理相关的书(全5卷) 记载着料理「鸟肉杂炊」的制作方法"),
("锅料理全集2", "记载着基础的锅料理的书(全3卷) 记载着料理「鸭锅」的制作方法"),
("锅料理全集3", "记载着基础的锅料理的书(全3卷) 记载着料理「什锦锅」的制作方法"),
("大锅料理帖1", "记载着高价的锅料理的书(全4卷) 记载着料理「馎饦」的制作方法"),
("大锅料理帖2", "记载着高价的锅料理的书(全4卷) 记载着料理「呷噗呷噗火锅」的制作方法"),
("大锅料理帖3", "记载着高价的锅料理的书(全4卷) 记载着料理「牡丹锅」的制作方法"),
("大锅料理帖4", "记载着高价的锅料理的书(全4卷) 记载着料理「相扑火锅」的制作方法"),
("村正面具", "寄宿村正之灵的面具"),
("山城国地图", "画着山城国周边的地图 持有它可以得到街道的情报"),
("大和国地图", "画着大和国周边的地图 持有它可以得到街道的情报"),
("伊贺国地图", "画着伊贺国周边的地图 持有它可以得到街道的情报"),
("伊势国地图", "画着伊势国周边的地图 持有它可以得到街道的情报"),
("近江国地图", "画着近江国周边的地图 持有它可以得到街道的情报"),
("美浓国地图", "画着美浓国周边的地图 持有它可以得到街道的情报"),
("尾张国地图", "画着尾张国周边的地图 持有它可以得到街道的情报"),
("三河国地图", "画着三河国周边的地图 持有它可以得到街道的情报"),
("飞騨国地图", "画着飞騨国周边的地图 持有它可以得到街道的情报"),
("信浓国地图", "画着信浓国周边的地图 持有它可以得到街道的情报"),
("远江国地图", "画着远江国周边的地图 持有它可以得到街道的情报"),
("甲斐国地图", "画着甲斐国周边的地图 持有它可以得到街道的情报"),
("骏河国地图", "画着骏河国周边的地图 持有它可以得到街道的情报"),
("伊豆国地图", "画着伊豆国周边的地图 持有它可以得到街道的情报"),
("相模国地图", "画着相模国周边的地图 持有它可以得到街道的情报"),
("武藏国地图", "画着武藏国周边的地图 持有它可以得到街道的情报"),
)
},
{
"name": "food",
"caption": "料理",
"head": COMMON_HEAD,
"hascount": True,
"items": (
("饭团", "(烧烤料理)将刚煮熟的饭用手捏成的饭团【生命力300回复 生气600取得 吃饱】"),
("烤红薯", "(烧烤料理)用枯叶烤出的红薯,味道甘甜【生命力600回复 生气900取得 吃饱】"),
("烤鱼", "(烧烤料理)将岩鱼用扦子串起,用篝火烧烤的烤鱼【生命力1200回复 生气1600取得 吃饱】"),
("烧烤鱿鱼", "(烧烤料理)将鱿鱼用扦子串起,用篝火烧烤的烤鱿鱼【生命力2500回复 生气2000取得 吃饱】"),
("白菜锅", "(锅料理)用白菜煮成的火锅 一段时间内攻击力上升【生命力200回复 生气1200取得 非常饱】"),
("炖煮萝卜", "(锅料理)煮得很好的炖萝卜锅 一段时间内防御力上升【生命力300回复 生气1500取得 非常饱】"),
("汤豆腐", "(锅料理)煮了豆腐的火锅 一段时间内攻击力・防御力上升【生命力400回复 生气1800取得 非常饱】"),
("味噌汤", "(锅料理)融入了味噌的酱汤 一段时间内灵力回复率上升【生命力500回复 生气2100取得 非常饱】"),
("炖红薯", "(锅料理)煮得很好的炖红薯锅 一段时间内可以吸收敌人的生命力【生命力600回复 生气2400取得 非常饱】"),
("鸟肉汆锅", "(锅料理)以鸟肉为主的汆锅 一段时间内遇敌率下降【生命力700回复 生气2800取得 非常饱】"),
("雪见锅", "(锅料理)以萝卜为主的火锅 一段时间内遇敌率上升【生命力800回复 生气3200取得 非常饱】"),
("杂炊", "(锅料理)以米为主的火锅 一段时间内得到金钱量上升【生命力1000回复 生气3600取得 非常饱】"),
("红薯杂炊", "(锅料理)以红薯为主的杂炊 一段时间内道具的入手率上升【生命力1500回复 生气4000取得 非常饱】"),
("鸟肉杂炊", "(锅料理)以鸟肉为主的杂炊 一段时间内状态异常无效化【生命力1800回复 生气5000取得 非常饱】"),
("鸭锅", "(锅料理)以鸭肉为主的汆锅 一段时间内灵力不会减少【生命力2000回复 生气6000取得 非常饱】"),
("什锦锅", "(锅料理)用各种食材制作的火锅 一段时间内奥义的效果上升【生命力3000回复 生气7000取得 非常饱】"),
("呷噗呷噗火锅", "(锅料理)以肉类为主的火锅 一段时间内角色无敌【生命力5000回复 生气9000取得 非常饱】"),
("牡丹锅", "(锅料理)以野猪肉为主的火锅 一段时间内自动大回复【生命力6000回复 生气10000取得 非常饱】"),
("相扑火锅", "(锅料理)放入了所有食材的火锅 一段时间内得到经验值增加加【生命力9999回复 生气15000取得 非常饱】"),
("年糕小豆汤", "(店料理)小豆煮熟后放上年糕用糖调制而成的甜品【生命力完全回复 生气1800取得 吃饱】"),
("什锦甜凉粉", "(店料理)寒天和年糕、水果搭配并淋上蜜汁而成的甜品【生命力完全回复 生气1800取得 吃饱】"),
("水羊羹", "(店料理)水嫩的带有豆馅的甜品 生命力完全回复【生气1800取得 吃饱】"),
("金锷", "(店料理)形状如刀锷一般的甜味烤点心 生命力完全回复【生气1800取得 吃饱】"),
("三色团子", "(店料理)用竹签串着红・白・绿三色丸子的甜品【生命力三割回复 生气600取得 五成饱】"),
("水包子", "(店料理)透明的外皮里包进去豆馅的包子【生命力三割回复 生气600取得 五成饱】"),
("豆大福饼", "(店料理)给加了豆的豆糕加入豆馅制成的年糕【生命力六割回复 生气1200取得 八成饱】"),
("草饼", "(店料理)散发艾草香味的年糕【生命力六割回复 生气1200取得 八成饱】"),
("蕨饼", "(店料理)在透明的年糕上涂满了黄豆粉制成【生命力完全回复 生气1800取得 吃饱】"),
("白米饭", "(店料理)在刚煮好米饭里放了咸菜的简朴的食物【生命力三成回复 生气480取得 五成饱】"),
("过水荞麦面", "(店料理)用冷水让面条变得劲道的二八荞麦面【生命力六成回复 生气960取得 八成饱】"),
("鲱鱼荞麦面", "(店料理)加入了鲱鱼的热气腾腾的荞麦面【生命力八成回复 生气1440取得 吃饱】"),
("天妇罗荞麦面", "(店料理)添加了巨大的海老天的豪华荞麦面【生命力完全回复 生气1920取得 非常饱】"),
("豆皮乌龙面", "(店料理)放入了油炸豆皮的切面【生命力六成回复 生气960取得 八成饱】"),
("月见乌龙面", "(店料理)添上了半熟的芙蓉蛋的切面【生命力八成回复 生气1440取得 吃饱】"),
("天妇罗乌龙面", "(店料理)添加了巨大的海老天的豪华切面【生命力完全回复 生气1920取得 非常饱】"),
("鳗鱼饭", "(店料理)在热乎乎的米饭上涂满切碎的鳗鱼宽叶香蒲烧的盖饭【生命力八成回复 生气4800取得 吃饱】"),
("青花鱼寿司", "(店料理)使用了青花鱼制作的青鱼手握寿司【生命力六成回复 生气2400取得 八成饱】"),
("幼鱼寿司", "(店料理)作为吉祥物而有名的幼鱼手握寿司【生命力八成回复 生气3600取得 八成饱】"),
("金枪鱼寿司", "(店料理)放了富含脂肪的大肥鲔鱼的手握寿司【生命力六成回复 生气1200取得 八成饱】"),
("鲆寿司", "(店料理)使用了鲆鱼白色部分的手握寿司【生命力六成回复 生气2400取得 八成饱】"),
("海胆寿司", "(店料理)添加了很多海胆的军舰卷【生命力八成回复 生气3600取得 八成饱】"),
("带头鲷鱼", "(店料理)从头到尾整个烤制的江户式鲷鱼的一种【生命力完全回复 生气6000取得 非常饱】"),
("海老天妇罗", "(店料理)用伊势大虾做的豪华天妇罗【生命力完全回复 生气6000取得 非常饱】"),
("鱚鱼天妇罗", "(店料理)将鱚鱼表面油炸制成的上品天妇罗【生命力八成回复 生气4800取得 吃饱】"),
)
},
{
"name": "blade",
"caption": "刀",
"head": (("名称", "攻击", "奥义", "效果", "带刀条件(力, 体力)"), (300, 100, 200, 300, 300)),
"hascount": False,
"items": (
("「神乐」村正", 15, "旋风", "无", "3, 3"),
("无铭刀「新明」", 11, "飞天辉夜", "无", "3, 3"),
("无铭刀「八云」", 17, "焰二连", "无", "3, 3"),
("「疾风」村正", 19, "妖炎", "铁壁之构", "4, 4"),
("「阳炎」村正", 14, "幻影阳炎", "瞬时消化", "4, 4"),
("长曾祢虎徹", 16, "地狱独乐", "毒减半", "4, 4"),
("「夏雨」村正", 25, "八丁斩", "无", "6, 5"),
("「夕凪」村正", 28, "飞天幻影", "金取得量上升", "6, 6"),
("枝打村正", 23, "丸太割", "攻击强化", "7, 7"),
("「宵暗」村正", 36, "鬼火岚", "力+1", "8, 8"),
("伊贺守金道", 30, "影十文字", "体力+1", "9, 9"),
("「文月」村正", 45, "飞天辉夜 贰", "无", "10, 10"),
("「叶月」村正", 41, "地驰", "无", "13, 12"),
("「长月」村正", 59, "斩首蜻蛉", "力・体力+1", "14, 14"),
("孙六兼元", 54, "镰鼬", "灵力还元", "17, 17"),
("「雾雨」村正", 65, "丁半斩", "无", "21, 20"),
("朝岚村正", 71, "烈风", "居合强化", "22, 22"),
("「时雨」村正", 99, "幻影阳炎 贰", "无", "24, 24"),
("「天雾」村正", 106, "幽鬼横飞", "必杀之刃", "26, 26"),
("小乌丸", 90, "逆柱", "自动回复", "29, 28"),
("「阳月」村正", 104, "弧月", "无", "33, 32"),
("「霜月」村正", 139, "地狱独乐 贰", "无", "36, 35"),
("「极月」村正", 148, "月下一闪", "体力+2", "38, 37"),
("蜘蛛切", 167, "飞天流星", "炎减半", "43, 42"),
("火走村正", 152, "焰三连", "灵力还元", "49, 47"),
("「石突」村正", 198, "烈风 贰", "无", "52, 50"),
("「风波」村正", 209, "疾风斩", "经验值增加", "55, 53"),
("「水岚」村正", 180, "八丁斩 贰", "无", "58, 56"),
("备前长船兼光", 190, "烈风走破", "灵力还元", "61, 59"),
("利刀「冰雨」村正", 209, "斩首蜻蛉 贰", "无", "67, 65"),
("利刀「水断」村正", 264, "地驰 贰", "生命吸收", "70, 68"),
("棚桥村正", 275, "影十文字 贰", "无", "74, 72"),
("相州五郎正宗", 250, "四镰鼬", "无", "80, 78"),
("利刀「黄昏」村正", 271, "怨灵横飞", "金取得量上升", "87, 85"),
("利刀「黎明」村正", 333, "飞天幻影 贰", "攻击强化", "91, 88"),
("利刀「晓光」村正", 345, "疾风斩 贰", "无", "94, 91"),
("燕反村正", 303, "飞天流星 贰", "瞬时消化", "98, 95"),
("无铭玉之绪", 369, "烈风走破 贰", "力・体力+5", "101, 98"),
("岩通村正", 314, "焰四连", "金取得量上升", "101, 98"),
("利刀「绯天」村正", 325, "幻影阳炎 叁", "铁壁之构", "105, 102"),
("利刀「明镜」村正", 392, "三妖炎", "无", "108, 105"),
("笹之雪村正", 346, "飞天幻影 叁", "力+3", "111, 108"),
("利刀「秋水」村正", 416, "飞天流星 叁", "无", "115, 112"),
("拔柄杓村正", 427, "鬼火岚 贰", "自动回复", "118, 115"),
("「千鹤」村正", 12, "霞斩", "无", "3, 3"),
("无铭刀「飞水」", 13, "飞天月光", "无", "3, 3"),
("无铭刀「小波」", 14, "三日月", "无", "3, 3"),
("「牡丹」村正", 18, "飞燕丸锯", "攻击强化", "4, 4"),
("「菖蒲」村正", 15, "雷光二连", "炎减半", "4, 4"),
("长谷部国重", 17, "星天风车", "力+1", "4, 4"),
("「海燕」村正", 19, "幻影雷光", "金取得量上升", "5, 6"),
("「月影」村正", 23, "妖雷", "瞬时消化", "6, 6"),
("「云雀」村正", 30, "骷髅", "体力+1", "7, 7"),
("立袈裟村正", 33, "夜叉之舞", "无", "8, 8"),
("火车切广光", 33, "地蜂", "炎无效化", "9, 9"),
("「睦月」村正", 41, "朔夜", "无", "10, 10"),
("「如月」村正", 49, "飞天月光 贰", "无", "12, 13"),
("「花月」村正", 53, "雷电", "灵力还元", "15, 16"),
("出云守永则", 65, "月之轮", "吸魂强化", "16, 17"),
("「玉椿」村正", 71, "怨返", "必杀之刃", "18, 19"),
("「山吹」村正", 70, "分身霞斩", "无", "20, 21"),
("朽绳村正", 78, "幻影雷光 贰", "毒无效化", "21, 23"),
("「朝樱」村正", 97, "龙卷", "无", "25, 27"),
("一文字则宗", 99, "星天彗星", "生命吸收", "27, 29"),
("「卯月」村正", 119, "飞燕丸锯 贰", "无", "32, 34"),
("「皋月」村正", 135, "飞天月光 叁", "无", "36, 39"),
("「水月」村正", 139, "骷髅 贰", "力+2", "39, 41"),
("狮子王", 148, "天地一闪", "力・体力+3", "41, 44"),
("「清花」村正", 167, "影蜂", "异常减半", "47, 50"),
("「舞鸟」村正", 180, "霞隐", "经验值增加", "50, 53"),
("「乱风」村正", 190, "日轮", "无", "52, 56"),
("月见斩村正", 198, "三身霞斩", "无", "55, 59"),
("鬼切", 209, "多重月之轮", "攻击强化", "58, 62"),
("利刀「紫电」村正", 229, "雷电 贰", "无", "64, 68"),
("利刀「飞英」村正", 250, "星天风车 贰", "灵力还元", "71, 75"),
("血滑村正", 264, "雷光三连", "无", "74, 78"),
("陆奥守吉行", 271, "大龙卷", "居合强化", "77, 82"),
("利刀「水帘」村正", 293, "地虫", "无", "84, 88"),
("利刀「雾冰」村正", 310, "诅咒三日月", "体力+3", "87, 92"),
("利刀「白露」村正", 322, "天地一闪 贰", "无", "90, 95"),
("露払村正", 325, "三怨返", "必杀之刃", "93, 99"),
("凭落", 345, "地蜂 贰", "自动回复", "97, 102"),
("雪月花村正", 357, "雷光四连", "吸魂强化", "100, 106"),
("利刀「华彩」村正", 360, "幻影雷光 叁", "居合强化", "103, 110"),
("滝之水村正", 369, "星天风车 叁", "无", "103, 112"),
("利刀「樱花」村正", 380, "三妖雷", "攻击强化", "107, 113"),
("利刀「血樱」村正", 379, "夜叉之舞 贰", "生命吸收", "110, 117"),
("通拔村正", 389, "大日轮", "无", "113, 120"),
("宝刀「月读」村正", 430, "四怨返", "道具取得几率上升", "118, 117"),
("三日月宗近", 440, "月天十六夜", "生命吸收", "122, 118"),
("宝刀「天津」村正", 414, "星天彗星 贰", "攻击强化", "119, 120"),
("鬼丸国纲", 416, "水面月之轮", "异常减半", "117, 124"),
("宝刀「青龙」村正", 463, "鬼火岚 叁", "无", "129, 131"),
("宝刀「白虎」村正", 451, "诅咒弧月", "经验值增加", "132, 134"),
("数珠丸恒次", 507, "四妖雷", "必杀之刃", "142, 144"),
("宝刀「朱雀」村正", 496, "四妖炎", "体力+4", "145, 147"),
("宝刀「神风」村正", 529, "骷髅 叁", "瞬时消化 贰", "155, 157"),
("宝刀「玄武」村正", 570, "夜叉之舞 叁", "力・体力+1", "161, 163"),
("宝刀「黄龙」村正", 590, "落岩", "力+4", "167, 169"),
("宝刀「梦幻」村正", 580, "天地一闪 叁", "居合强化", "170, 172"),
("笼钓瓶村正", 628, "影十文字 叁", "必杀之刃 贰", "179, 181"),
("大典太光世", 645, "五镰鼬", "灵力还原", "184, 187"),
("大包平", 636, "疾风斩 叁", "异常无效化", "187, 189"),
("名刀「乡愁」村正", 663, "飞燕雀蜂", "生气收集强化", "194, 197"),
("名刀「彗星」村正", 695, "星天彗星 叁", "瞬时居合", "199, 202"),
("童子切安纲", 687, "迅雷", "生命吸收 贰", "201, 204"),
("名刀「时渡」村正", 724, "真月天十六夜", "攻击强化", "208, 211"),
("胧村正", 757, "百鬼乱闪", "力・体力+7", "222, 225"),
)
}
)
|
[
"[email protected]"
] | |
17c5d396a00398931532c3a8964393328e332685
|
ff88a620c7437af9af946643cd65f06c99fe3601
|
/IntermediateCodeAndTesting/JunePsychoPyTesting/dondersExperimentsWithData/DondersAuditory_lastrun.py
|
e061da7c07226c9b77aea637be2a0ad5f8141241
|
[] |
no_license
|
bissettp/TrainedInhibitionTask
|
c2f20dadbb0e440c4fcf2bd3c4d670a7416df93c
|
82727bd3ffa101209a61f2ff4f057f8896522d5d
|
refs/heads/master
| 2020-06-06T20:02:16.470092 | 2015-06-23T17:56:15 | 2015-06-23T17:56:15 | 34,129,470 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 12,472 |
py
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
This experiment was created using PsychoPy2 Experiment Builder (v1.82.01), Tue Jun 2 14:44:04 2015
If you publish work using this script please cite the relevant PsychoPy publications
Peirce, JW (2007) PsychoPy - Psychophysics software in Python. Journal of Neuroscience Methods, 162(1-2), 8-13.
Peirce, JW (2009) Generating stimuli for neuroscience using PsychoPy. Frontiers in Neuroinformatics, 2:10. doi: 10.3389/neuro.11.010.2008
"""
from __future__ import division # so that 1/3=0.333 instead of 1/3=0
from psychopy import visual, core, data, event, logging, sound, gui
from psychopy.constants import * # things like STARTED, FINISHED
import numpy as np # whole numpy lib is available, prepend 'np.'
from numpy import sin, cos, tan, log, log10, pi, average, sqrt, std, deg2rad, rad2deg, linspace, asarray
from numpy.random import random, randint, normal, shuffle
import os # handy system and path functions
# Ensure that relative paths start from the same directory as this script
_thisDir = os.path.dirname(os.path.abspath(__file__))
os.chdir(_thisDir)
# Store info about the experiment session
expName = 'DondersAuditory' # from the Builder filename that created this script
expInfo = {'participant':'', 'session':'001'}
dlg = gui.DlgFromDict(dictionary=expInfo, title=expName)
if dlg.OK == False: core.quit() # user pressed cancel
expInfo['date'] = data.getDateStr() # add a simple timestamp
expInfo['expName'] = expName
# Data file name stem = absolute path + name; later add .psyexp, .csv, .log, etc
filename = _thisDir + os.sep + 'data/%s_%s_%s' %(expInfo['participant'], expName, expInfo['date'])
# An ExperimentHandler isn't essential but helps with data saving
thisExp = data.ExperimentHandler(name=expName, version='',
extraInfo=expInfo, runtimeInfo=None,
originPath=u'/Users/patrickbissett/Desktop/DondersAuditory.psyexp',
savePickle=True, saveWideText=True,
dataFileName=filename)
#save a log file for detail verbose info
logFile = logging.LogFile(filename+'.log', level=logging.EXP)
logging.console.setLevel(logging.WARNING) # this outputs to the screen, not a file
endExpNow = False # flag for 'escape' or other condition => quit the exp
# Start Code - component code to be run before the window creation
# Setup the Window
win = visual.Window(size=(2560, 1440), fullscr=True, screen=0, allowGUI=False, allowStencil=False,
monitor='testMonitor', color=[0,0,0], colorSpace='rgb',
blendMode='avg', useFBO=True,
)
# store frame rate of monitor if we can measure it successfully
expInfo['frameRate']=win.getActualFrameRate()
if expInfo['frameRate']!=None:
frameDur = 1.0/round(expInfo['frameRate'])
else:
frameDur = 1.0/60.0 # couldn't get a reliable measure so guess
# Initialize components for Routine "trial"
trialClock = core.Clock()
ISI = core.StaticPeriod(win=win, screenHz=expInfo['frameRate'], name='ISI')
sound_1 = sound.Sound(u'900', secs=-1)
sound_1.setVolume(.2)
text = visual.TextStim(win=win, ori=0, name='text',
text=u'+', font=u'Arial',
pos=[0, 0], height=0.1, wrapWidth=None,
color=u'white', colorSpace='rgb', opacity=1,
depth=-2.0)
# Initialize components for Routine "ITI"
ITIClock = core.Clock()
text_3 = visual.TextStim(win=win, ori=0, name='text_3',
text=u'ITI', font=u'Arial',
pos=[0, 0], height=0.1, wrapWidth=None,
color=u'white', colorSpace='rgb', opacity=1,
depth=0.0)
# Create some handy timers
globalClock = core.Clock() # to track the time since experiment started
routineTimer = core.CountdownTimer() # to track time remaining of each (non-slip) routine
# set up handler to look after randomisation of conditions etc
trials = data.TrialHandler(nReps=100, method='random',
extraInfo=expInfo, originPath=u'/Users/patrickbissett/Desktop/DondersAuditory.psyexp',
trialList=[None],
seed=None, name='trials')
thisExp.addLoop(trials) # add the loop to the experiment
thisTrial = trials.trialList[0] # so we can initialise stimuli with some values
# abbreviate parameter names if possible (e.g. rgb=thisTrial.rgb)
if thisTrial != None:
for paramName in thisTrial.keys():
exec(paramName + '= thisTrial.' + paramName)
for thisTrial in trials:
currentLoop = trials
# abbreviate parameter names if possible (e.g. rgb = thisTrial.rgb)
if thisTrial != None:
for paramName in thisTrial.keys():
exec(paramName + '= thisTrial.' + paramName)
#------Prepare to start Routine "trial"-------
t = 0
trialClock.reset() # clock
frameN = -1
# update component parameters for each repeat
import random
jitter = random.randrange(50, 100)*.01
key_resp_2 = event.BuilderKeyResponse() # create an object of type KeyResponse
key_resp_2.status = NOT_STARTED
# keep track of which components have finished
trialComponents = []
trialComponents.append(ISI)
trialComponents.append(sound_1)
trialComponents.append(text)
trialComponents.append(key_resp_2)
for thisComponent in trialComponents:
if hasattr(thisComponent, 'status'):
thisComponent.status = NOT_STARTED
#-------Start Routine "trial"-------
continueRoutine = True
while continueRoutine:
# get current time
t = trialClock.getTime()
frameN = frameN + 1 # number of completed frames (so 0 is the first frame)
# update/draw components on each frame
# start/stop sound_1
if t >= jitter and sound_1.status == NOT_STARTED:
# keep track of start time/frame for later
sound_1.tStart = t # underestimates by a little under one frame
sound_1.frameNStart = frameN # exact frame index
sound_1.play() # start the sound (it finishes automatically)
if sound_1.status == STARTED and t >= (jitter + (.2-win.monitorFramePeriod*0.75)): #most of one frame period left
sound_1.stop() # stop the sound (if longer than duration)
# *text* updates
if t >= 0.0 and text.status == NOT_STARTED:
# keep track of start time/frame for later
text.tStart = t # underestimates by a little under one frame
text.frameNStart = frameN # exact frame index
text.setAutoDraw(True)
if text.status == STARTED and t >= (0.0 + (2-win.monitorFramePeriod*0.75)): #most of one frame period left
text.setAutoDraw(False)
# *key_resp_2* updates
if t >= 0.0 and key_resp_2.status == NOT_STARTED:
# keep track of start time/frame for later
key_resp_2.tStart = t # underestimates by a little under one frame
key_resp_2.frameNStart = frameN # exact frame index
key_resp_2.status = STARTED
# keyboard checking is just starting
key_resp_2.clock.reset() # now t=0
event.clearEvents(eventType='keyboard')
if key_resp_2.status == STARTED and t >= (0.0 + (2-win.monitorFramePeriod*0.75)): #most of one frame period left
key_resp_2.status = STOPPED
if key_resp_2.status == STARTED:
theseKeys = event.getKeys(keyList=['space'])
# check for quit:
if "escape" in theseKeys:
endExpNow = True
if len(theseKeys) > 0: # at least one key was pressed
if key_resp_2.keys == []: # then this was the first keypress
key_resp_2.keys = theseKeys[0] # just the first key pressed
key_resp_2.rt = key_resp_2.clock.getTime()
# was this 'correct'?
if (key_resp_2.keys == str(u"'space'")) or (key_resp_2.keys == u"'space'"):
key_resp_2.corr = 1
else:
key_resp_2.corr = 0
# a response ends the routine
continueRoutine = False
# *ISI* period
if t >= 0.0 and ISI.status == NOT_STARTED:
# keep track of start time/frame for later
ISI.tStart = t # underestimates by a little under one frame
ISI.frameNStart = frameN # exact frame index
ISI.start(0.5)
elif ISI.status == STARTED: #one frame should pass before updating params and completing
ISI.complete() #finish the static period
# check if all components have finished
if not continueRoutine: # a component has requested a forced-end of Routine
break
continueRoutine = False # will revert to True if at least one component still running
for thisComponent in trialComponents:
if hasattr(thisComponent, "status") and thisComponent.status != FINISHED:
continueRoutine = True
break # at least one component has not yet finished
# check for quit (the Esc key)
if endExpNow or event.getKeys(keyList=["escape"]):
core.quit()
# refresh the screen
if continueRoutine: # don't flip if this routine is over or we'll get a blank screen
win.flip()
#-------Ending Routine "trial"-------
for thisComponent in trialComponents:
if hasattr(thisComponent, "setAutoDraw"):
thisComponent.setAutoDraw(False)
sound_1.stop() #ensure sound has stopped at end of routine
# check responses
if key_resp_2.keys in ['', [], None]: # No response was made
key_resp_2.keys=None
# was no response the correct answer?!
if str(u"'space'").lower() == 'none': key_resp_2.corr = 1 # correct non-response
else: key_resp_2.corr = 0 # failed to respond (incorrectly)
# store data for trials (TrialHandler)
trials.addData('key_resp_2.keys',key_resp_2.keys)
trials.addData('key_resp_2.corr', key_resp_2.corr)
if key_resp_2.keys != None: # we had a response
trials.addData('key_resp_2.rt', key_resp_2.rt)
# the Routine "trial" was not non-slip safe, so reset the non-slip timer
routineTimer.reset()
#------Prepare to start Routine "ITI"-------
t = 0
ITIClock.reset() # clock
frameN = -1
routineTimer.add(1.000000)
# update component parameters for each repeat
trials.addData("jitterTime", jitter)
# keep track of which components have finished
ITIComponents = []
ITIComponents.append(text_3)
for thisComponent in ITIComponents:
if hasattr(thisComponent, 'status'):
thisComponent.status = NOT_STARTED
#-------Start Routine "ITI"-------
continueRoutine = True
while continueRoutine and routineTimer.getTime() > 0:
# get current time
t = ITIClock.getTime()
frameN = frameN + 1 # number of completed frames (so 0 is the first frame)
# update/draw components on each frame
# *text_3* updates
if t >= 0.0 and text_3.status == NOT_STARTED:
# keep track of start time/frame for later
text_3.tStart = t # underestimates by a little under one frame
text_3.frameNStart = frameN # exact frame index
text_3.setAutoDraw(True)
if text_3.status == STARTED and t >= (0.0 + (1.0-win.monitorFramePeriod*0.75)): #most of one frame period left
text_3.setAutoDraw(False)
# check if all components have finished
if not continueRoutine: # a component has requested a forced-end of Routine
break
continueRoutine = False # will revert to True if at least one component still running
for thisComponent in ITIComponents:
if hasattr(thisComponent, "status") and thisComponent.status != FINISHED:
continueRoutine = True
break # at least one component has not yet finished
# check for quit (the Esc key)
if endExpNow or event.getKeys(keyList=["escape"]):
core.quit()
# refresh the screen
if continueRoutine: # don't flip if this routine is over or we'll get a blank screen
win.flip()
#-------Ending Routine "ITI"-------
for thisComponent in ITIComponents:
if hasattr(thisComponent, "setAutoDraw"):
thisComponent.setAutoDraw(False)
thisExp.nextEntry()
# completed 100 repeats of 'trials'
win.close()
core.quit()
|
[
"[email protected]"
] | |
6b0c978d955aa41ed67d4d22e3bf81f7d1c4269f
|
9743d5fd24822f79c156ad112229e25adb9ed6f6
|
/xai/brain/wordbase/verbs/_decorating.py
|
664550501ff25b0218fee67c62a52ded0ce21da4
|
[
"MIT"
] |
permissive
|
cash2one/xai
|
de7adad1758f50dd6786bf0111e71a903f039b64
|
e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6
|
refs/heads/master
| 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 254 |
py
|
from xai.brain.wordbase.verbs._decorate import _DECORATE
#calss header
class _DECORATING(_DECORATE, ):
def __init__(self,):
_DECORATE.__init__(self)
self.name = "DECORATING"
self.specie = 'verbs'
self.basic = "decorate"
self.jsondata = {}
|
[
"[email protected]"
] | |
400b1595acdef016b6a3e38e8563f17404ebaac7
|
7b315bbe8c85ce05e6c51112e985ae1b392d83f5
|
/métodos de classe e static metod/metod_staticmetod.py
|
65d27469fda2f7bcaba11e7083ced4d807d76800
|
[] |
no_license
|
Cica013/aprendendoPython
|
e9f993b1b144e294a338a53f2bc36673d3cd00a6
|
9c964f2322e3d52b39a811aceec64b169bab4e10
|
refs/heads/main
| 2023-08-10T20:12:47.640239 | 2021-10-06T21:01:19 | 2021-10-06T21:01:19 | 385,755,818 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 744 |
py
|
from random import randint
class Pessoa:
ano_atual = 2021
def __init__(self, nome, idade):
self.nome = nome
self.idade = idade
def get_ano_nascimento(self):
print(self.ano_atual - self.idade)
# Métodos de classes não precisam receber a instância, mas sim a classe em si.
@classmethod
def por_ano_nascimento(cls, nome, ano_nascimento):
idade = cls.ano_atual - ano_nascimento
return cls(nome, idade)
# Métodos estáticos não precisam receber instância nenhuma.
@staticmethod
def gera_id():
rand = randint(10000, 19999)
return rand
p = Pessoa.por_ano_nascimento('Luiz', 1995)
print(p.nome, p.idade)
print(Pessoa.gera_id())
print(p.gera_id())
|
[
"[email protected]"
] | |
7c045e6cdaa81eee4b7557bc8cd5d770c25980d1
|
482ffa5e0848030b9327eb58215f6b626f825e5d
|
/accounts/migrations/0003_transactions.py
|
7afe9b9c6cb159569599a8e8daafa4c6def624ed
|
[] |
no_license
|
syash5/Django-Banking-App
|
6400d862a12d2fd4b555346722c756b6f4292e34
|
7dd9ed1286b61e1a508943b05b1616620fbf0118
|
refs/heads/master
| 2020-04-17T11:17:29.478938 | 2019-01-24T08:12:47 | 2019-01-24T08:12:47 | 166,534,958 | 1 | 3 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,195 |
py
|
# Generated by Django 2.1 on 2019-01-13 18:14
import django.core.validators
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
atomic = False
dependencies = [
('accounts', '0002_auto_20190112_0635'),
]
operations = [
migrations.CreateModel(
name='transactions',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('Diposit_amount', models.DecimalField(decimal_places=2, max_digits=12, validators=[django.core.validators.MinValueValidator('10.00')])),
('Withdrawal_amount', models.DecimalField(decimal_places=2, max_digits=12, validators=[django.core.validators.MinValueValidator('10.00')])),
('Interest_amount', models.DecimalField(decimal_places=2, max_digits=12, validators=[django.core.validators.MinValueValidator('10.00')])),
('timestamp', models.DateTimeField(auto_now_add=True)),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='accounts.UserProfile')),
],
),
]
|
[
"[email protected]"
] | |
1366562def67d68e2dab7e07c82a5d1954cd37b5
|
beb9ac9ed895b375fbea240bf7d56281d6a0a481
|
/20200715/test4.py
|
588c3c362dc327af8a8622e5ff59c276f93f66f9
|
[] |
no_license
|
MinjeongSuh88/python_workspace
|
5b0c7e2a7b3543e65df1f07066e4a52f23294ac5
|
b13afdc8cf4e42496fa2b5c8df3c5effc7f7488d
|
refs/heads/master
| 2022-11-30T11:05:52.347243 | 2020-08-14T09:04:42 | 2020-08-14T09:04:42 | 285,185,265 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 658 |
py
|
print('python')
# 주석 : 한줄 주석
a=10 #a변수에 10의 값을 대입한다.
b=1+1 #b변수에 1+1의 값을 대입
c=a+b #변수에 a변수의 값과 b변수의 값을 더해서 대입
print('--------------')
print('=================')
msg="문자열" #문자열은 싱글, 더블 쿼테이션으로 둘 다 묶을 수 있음
print(msg)
print('text')
print("text2")
print('''text''')
'''
#없이 문자열로 주석을 다는 경우도 있음. 어차피 출력되지 않기 때문에
코드에 설명이나 해야할 일 등을 적기도 함
나중에 코드를 볼 때 무얼하기 위해 사용한 코드인지 설명을 붙이기도 함
'''
|
[
"[email protected]"
] | |
eb9c59510c70175b05a427bf4b1f3f9c77a87a1c
|
2a6e5fb59600444dc8c4fe3b8f7054813b55a8c4
|
/pytorchvideo/models/vision_transformers.py
|
54a3154a0d17cd7d95aee67e32c4386216180177
|
[
"Apache-2.0"
] |
permissive
|
strategist922/pytorchvideo
|
92b328e2c6993fc8e23413d9164fe84fda4b3733
|
7614451e917c07271c138b5bd6d6746d4f41fbb2
|
refs/heads/master
| 2023-07-24T21:57:20.135527 | 2021-08-31T20:21:53 | 2021-08-31T20:28:37 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 14,678 |
py
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
from functools import partial
from typing import Callable, List, Optional, Tuple
import torch
import torch.nn as nn
from pytorchvideo.layers import MultiScaleBlock, SpatioTemporalClsPositionalEncoding
from pytorchvideo.layers.utils import round_width, set_attributes
from pytorchvideo.models.head import create_vit_basic_head
from pytorchvideo.models.weight_init import init_net_weights
from torch.nn.common_types import _size_3_t
from .stem import create_conv_patch_embed
class MultiscaleVisionTransformers(nn.Module):
"""
Multiscale Vision Transformers
Haoqi Fan, Bo Xiong, Karttikeya Mangalam, Yanghao Li, Zhicheng Yan, Jitendra Malik,
Christoph Feichtenhofer
https://arxiv.org/abs/2104.11227
::
PatchEmbed
↓
PositionalEncoding
↓
Dropout
↓
Normalization
↓
Block 1
↓
.
.
.
↓
Block N
↓
Normalization
↓
Head
The builder can be found in `create_mvit`.
"""
def __init__(
self,
*,
patch_embed: nn.Module,
cls_positional_encoding: nn.Module,
pos_drop: Optional[nn.Module],
norm_patch_embed: Optional[nn.Module],
blocks: nn.ModuleList,
norm_embed: Optional[nn.Module],
head: Optional[nn.Module],
) -> None:
"""
Args:
patch_embed (nn.Module): Patch embed module.
cls_positional_encoding (nn.Module): Positional encoding module.
pos_drop (Optional[nn.Module]): Dropout module after patch embed.
blocks (nn.ModuleList): Stack of multi-scale transformer blocks.
norm_layer (nn.Module): Normalization layer before head.
head (Optional[nn.Module]): Head module.
"""
super().__init__()
set_attributes(self, locals())
assert hasattr(
cls_positional_encoding, "patch_embed_shape"
), "cls_positional_encoding should have attribute patch_embed_shape."
init_net_weights(self, init_std=0.02, style="vit")
def forward(self, x: torch.Tensor) -> torch.Tensor:
x = self.patch_embed(x)
x = self.cls_positional_encoding(x)
if self.pos_drop is not None:
x = self.pos_drop(x)
if self.norm_patch_embed is not None:
x = self.norm_patch_embed(x)
thw = self.cls_positional_encoding.patch_embed_shape
for blk in self.blocks:
x, thw = blk(x, thw)
if self.norm_embed is not None:
x = self.norm_embed(x)
if self.head is not None:
x = self.head(x)
return x
def create_multiscale_vision_transformers(
*,
spatial_size: int,
temporal_size: int,
cls_embed_on: bool = True,
sep_pos_embed: bool = True,
depth: int = 16,
norm: str = "layernorm",
# Patch embed config.
input_channels: int = 3,
patch_embed_dim: int = 96,
conv_patch_embed_kernel: Tuple[int] = (3, 7, 7),
conv_patch_embed_stride: Tuple[int] = (2, 4, 4),
conv_patch_embed_padding: Tuple[int] = (1, 3, 3),
enable_patch_embed_norm: bool = False,
use_2d_patch: bool = False,
# Attention block config.
num_heads: int = 1,
mlp_ratio: float = 4.0,
qkv_bias: bool = True,
dropout_rate_block: float = 0.0,
droppath_rate_block: float = 0.0,
pooling_mode: str = "conv",
pool_first: bool = False,
embed_dim_mul: Optional[List[List[int]]] = None,
atten_head_mul: Optional[List[List[int]]] = None,
pool_q_stride_size: Optional[List[List[int]]] = None,
pool_kv_stride_size: Optional[List[List[int]]] = None,
pool_kv_stride_adaptive: Optional[_size_3_t] = None,
pool_kvq_kernel: Optional[_size_3_t] = None,
# Head config.
head: Optional[Callable] = create_vit_basic_head,
head_dropout_rate: float = 0.5,
head_activation: Callable = None,
head_num_classes: int = 400,
) -> nn.Module:
"""
Build Multiscale Vision Transformers (MViT) for recognition. A Vision Transformer
(ViT) is a specific case of MViT that only uses a single scale attention block.
Args:
spatial_size (int): Input video spatial resolution. It assumes the width and
the height of the videos are the same.
temporal_size (int): Number of frames in the input video.
cls_embed_on (bool): If True, use cls embed in the model. Otherwise features
are average pooled before going to the final classifier.
sep_pos_embed (bool): If True, perform separate spatiotemporal embedding.
depth (int): The depth of the model.
norm (str): Normalization layer. It currently supports "layernorm".
input_channels (int): Channel dimension of the input video.
patch_embed_dim (int): Embedding dimension after patchifing the video input.
conv_patch_embed_kernel (Tuple[int]): Kernel size of the convolution for
patchifing the video input.
conv_patch_embed_stride (Tuple[int]): Stride size of the convolution for
patchifing the video input.
conv_patch_embed_padding (Tuple[int]): Padding size of the convolution for
patchifing the video input.
enable_patch_embed_norm (bool): If True, apply normalization after patchifing
the video input.
use_2d_patch (bool): If True, use 2D convolutions to get patch embed.
Otherwise, use 3D convolutions.
num_heads (int): Number of heads in the first transformer block.
mlp_ratio (float): Mlp ratio which controls the feature dimension in the
hidden layer of the Mlp block.
qkv_bias (bool): If set to False, the qkv layer will not learn an additive
bias. Default: False.
dropout_rate_block (float): Dropout rate for the attention block.
droppath_rate_block (float): Droppath rate for the attention block.
pooling_mode (str): Pooling mode. Option includes "conv" (learned pooling), "avg"
(average pooling), and "max" (max pooling).
pool_first (bool): If set to True, pool is applied before qkv projection.
Otherwise, pool is applied after qkv projection. Default: False.
embed_dim_mul (Optional[List[List[int]]]): Dimension multiplication at layer i.
If X is used, then the next block will increase the embed dimension by X
times. Format: [depth_i, mul_dim_ratio].
atten_head_mul (Optional[List[List[int]]]): Head dimension multiplication at
layer i. If X is used, then the next block will increase the head by
X times. Format: [depth_i, mul_dim_ratio].
pool_q_stride_size (Optional[List[List[int]]]): List of stride sizes for the
pool q at each layer. Format:
[[i, stride_t_i, stride_h_i, stride_w_i], ...,].
pool_kv_stride_size (Optional[List[List[int]]]): List of stride sizes for the
pool kv at each layer. Format:
[[i, stride_t_i, stride_h_i, stride_w_i], ...,].
pool_kv_stride_adaptive (Optional[_size_3_t]): Initial kv stride size for the
first block. The stride size will be further reduced at the layer where q
is pooled with the ratio of the stride of q pooling. If
pool_kv_stride_adaptive is set, then pool_kv_stride_size should be none.
pool_kvq_kernel (Optional[_size_3_t]): Pooling kernel size for q and kv. It None,
the kernel_size is [s + 1 if s > 1 else s for s in stride_size].
head (Callable): Head model.
head_dropout_rate (float): Dropout rate in the head.
head_activation (Callable): Activation in the head.
head_num_classes (int): Number of classes in the final classification head.
Example usage (building a MViT_B model for Kinetics400):
spatial_size = 224
temporal_size = 16
embed_dim_mul = [[1, 2.0], [3, 2.0], [14, 2.0]]
atten_head_mul = [[1, 2.0], [3, 2.0], [14, 2.0]]
pool_q_stride_size = [[1, 1, 2, 2], [3, 1, 2, 2], [14, 1, 2, 2]]
pool_kv_stride_adaptive = [1, 8, 8]
pool_kvq_kernel = [3, 3, 3]
head_num_classes = 400
MViT_B = create_multiscale_vision_transformers(
spatial_size=spatial_size,
temporal_size=temporal_size,
embed_dim_mul=embed_dim_mul,
atten_head_mul=atten_head_mul,
pool_q_stride_size=pool_q_stride_size,
pool_kv_stride_adaptive=pool_kv_stride_adaptive,
pool_kvq_kernel=pool_kvq_kernel,
head_num_classes=head_num_classes,
)
"""
if use_2d_patch:
assert temporal_size == 1, "If use_2d_patch, temporal_size needs to be 1."
if pool_kv_stride_adaptive is not None:
assert (
pool_kv_stride_size is None
), "pool_kv_stride_size should be none if pool_kv_stride_adaptive is set."
if norm == "layernorm":
norm_layer = partial(nn.LayerNorm, eps=1e-6)
else:
raise NotImplementedError("Only supports layernorm.")
conv_patch_op = nn.Conv2d if use_2d_patch else nn.Conv3d
patch_embed = create_conv_patch_embed(
in_channels=input_channels,
out_channels=patch_embed_dim,
conv_kernel_size=conv_patch_embed_kernel,
conv_stride=conv_patch_embed_stride,
conv_padding=conv_patch_embed_padding,
conv=conv_patch_op,
)
input_dims = [temporal_size, spatial_size, spatial_size]
input_stirde = (
(1,) + tuple(conv_patch_embed_stride)
if use_2d_patch
else conv_patch_embed_stride
)
patch_embed_dims = [
input_dims[i] // input_stirde[i] for i in range(len(input_dims))
]
cls_positional_encoding = SpatioTemporalClsPositionalEncoding(
embed_dim=patch_embed_dim,
patch_embed_shape=patch_embed_dims,
sep_pos_embed=sep_pos_embed,
has_cls=cls_embed_on,
)
dpr = [
x.item() for x in torch.linspace(0, droppath_rate_block, depth)
] # stochastic depth decay rule
if dropout_rate_block > 0.0:
pos_drop = nn.Dropout(p=dropout_rate_block)
dim_mul, head_mul = torch.ones(depth + 1), torch.ones(depth + 1)
if embed_dim_mul is not None:
for i in range(len(embed_dim_mul)):
dim_mul[embed_dim_mul[i][0]] = embed_dim_mul[i][1]
if atten_head_mul is not None:
for i in range(len(atten_head_mul)):
head_mul[atten_head_mul[i][0]] = atten_head_mul[i][1]
norm_patch_embed = norm_layer(patch_embed_dim) if enable_patch_embed_norm else None
mvit_blocks = nn.ModuleList()
pool_q = [[] for i in range(depth)]
pool_kv = [[] for i in range(depth)]
stride_q = [[] for i in range(depth)]
stride_kv = [[] for i in range(depth)]
if pool_q_stride_size is not None:
for i in range(len(pool_q_stride_size)):
stride_q[pool_q_stride_size[i][0]] = pool_q_stride_size[i][1:]
if pool_kvq_kernel is not None:
pool_q[pool_q_stride_size[i][0]] = pool_kvq_kernel
else:
pool_q[pool_q_stride_size[i][0]] = [
s + 1 if s > 1 else s for s in pool_q_stride_size[i][1:]
]
# If POOL_KV_STRIDE_ADAPTIVE is not None, initialize POOL_KV_STRIDE.
if pool_kv_stride_adaptive is not None:
_stride_kv = pool_kv_stride_adaptive
pool_kv_stride_size = []
for i in range(depth):
if len(stride_q[i]) > 0:
_stride_kv = [
max(_stride_kv[d] // stride_q[i][d], 1)
for d in range(len(_stride_kv))
]
pool_kv_stride_size.append([i] + _stride_kv)
if pool_kv_stride_size is not None:
for i in range(len(pool_kv_stride_size)):
stride_kv[pool_kv_stride_size[i][0]] = pool_kv_stride_size[i][1:]
if pool_kvq_kernel is not None:
pool_kv[pool_kv_stride_size[i][0]] = pool_kvq_kernel
else:
pool_kv[pool_kv_stride_size[i][0]] = [
s + 1 if s > 1 else s for s in pool_kv_stride_size[i][1:]
]
for i in range(depth):
num_heads = round_width(num_heads, head_mul[i], min_width=1, divisor=1)
patch_embed_dim = round_width(patch_embed_dim, dim_mul[i], divisor=num_heads)
dim_out = round_width(
patch_embed_dim,
dim_mul[i + 1],
divisor=round_width(num_heads, head_mul[i + 1]),
)
mvit_blocks.append(
MultiScaleBlock(
dim=patch_embed_dim,
dim_out=dim_out,
num_heads=num_heads,
mlp_ratio=mlp_ratio,
qkv_bias=qkv_bias,
dropout_rate=dropout_rate_block,
droppath_rate=dpr[i],
norm_layer=norm_layer,
kernel_q=pool_q[i],
kernel_kv=pool_kv[i],
stride_q=stride_q[i],
stride_kv=stride_kv[i],
pool_mode=pooling_mode,
has_cls_embed=cls_embed_on,
pool_first=pool_first,
)
)
embed_dim = dim_out
norm_embed = norm_layer(embed_dim)
if head is not None:
head_model = head(
in_features=embed_dim,
out_features=head_num_classes,
seq_pool_type="cls" if cls_embed_on else "mean",
dropout_rate=head_dropout_rate,
activation=head_activation,
)
else:
head_model = None
return MultiscaleVisionTransformers(
patch_embed=patch_embed,
cls_positional_encoding=cls_positional_encoding,
pos_drop=pos_drop if dropout_rate_block > 0.0 else None,
norm_patch_embed=norm_patch_embed,
blocks=mvit_blocks,
norm_embed=norm_embed,
head=head_model,
)
|
[
"[email protected]"
] | |
7bfc60af5297c781220498970f2bba57d33b8fe6
|
523f8f5febbbfeb6d42183f2bbeebc36f98eadb5
|
/120.py
|
665b598078de636f20809fca40b17315cda10277
|
[] |
no_license
|
saleed/LeetCode
|
655f82fdfcc3000400f49388e97fc0560f356af0
|
48b43999fb7e2ed82d922e1f64ac76f8fabe4baa
|
refs/heads/master
| 2022-06-15T21:54:56.223204 | 2022-05-09T14:05:50 | 2022-05-09T14:05:50 | 209,430,056 | 2 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 581 |
py
|
class Solution(object):
def minimumTotal(self, triangle):
"""
:type triangle: List[List[int]]
:rtype: int
"""
if len(triangle)==0:
return 0
if len(triangle[0])==0:
return 0
dp=len(triangle[-1])*[0]
dp[0]=triangle[0][0]
for i in range(1,len(triangle)):
dp[i]=dp[i-1]+triangle[i][-1]
for j in list(reversed(range(1,len(triangle[i])-1))):
dp[j]=min(dp[j],dp[j-1])+triangle[i][j]
dp[0]=triangle[i][0]+dp[0]
return min(dp)
|
[
"[email protected]"
] | |
4840a6a9eb97cf1d681992490fb82a335d0548ee
|
5a25f4f5f9c7cba03f9b5848eafc01a760c88768
|
/reduction/pipeline_scripts/member.uid___A001_X1296_X1d3.hifa_calimage.casa_pipescript.py
|
225beed4b342927fd883933387885ab8b02b6a9f
|
[] |
no_license
|
ALMA-IMF/reduction
|
b3579a548fe20193b807a7415a040f351c879beb
|
de606cc6bc542f088223ce84082ff333739c9007
|
refs/heads/master
| 2023-06-22T13:21:13.841999 | 2023-06-12T09:17:50 | 2023-06-12T09:17:50 | 115,018,799 | 9 | 29 | null | 2023-06-12T09:17:51 | 2017-12-21T15:13:55 |
Python
|
UTF-8
|
Python
| false | false | 2,919 |
py
|
from recipes.almahelpers import fixsyscaltimes # SACM/JAO - Fixes
__rethrow_casa_exceptions = True
context = h_init()
context.set_state('ProjectSummary', 'proposal_code', '2017.1.01355.L')
context.set_state('ProjectSummary', 'piname', 'unknown')
context.set_state('ProjectSummary', 'proposal_title', 'unknown')
context.set_state('ProjectStructure', 'ous_part_id', 'X1218271487')
context.set_state('ProjectStructure', 'ous_title', 'Undefined')
context.set_state('ProjectStructure', 'ppr_file', '/opt/dared/opt/qa56.1712.1/mnt/dataproc/2017.1.01355.L_2018_07_16T04_10_47.494/SOUS_uid___A001_X1296_X1cf/GOUS_uid___A001_X1296_X1d0/MOUS_uid___A001_X1296_X1d3/working/PPR_uid___A001_X1296_X1d4.xml')
context.set_state('ProjectStructure', 'ps_entity_id', 'uid://A001/X1220/Xddd')
context.set_state('ProjectStructure', 'recipe_name', 'hifa_calimage')
context.set_state('ProjectStructure', 'ous_entity_id', 'uid://A001/X1220/Xdd9')
context.set_state('ProjectStructure', 'ousstatus_entity_id', 'uid://A001/X1296/X1d3')
try:
hifa_importdata(vis=['uid___A002_Xcfae6a_Xe0b'], session=['session_1'])
fixsyscaltimes(vis = 'uid___A002_Xcfae6a_Xe0b.ms')# SACM/JAO - Fixes
h_save() # SACM/JAO - Finish weblog after fixes
h_init() # SACM/JAO - Restart weblog after fixes
hifa_importdata(vis=['uid___A002_Xcfae6a_Xe0b'], session=['session_1'])
hifa_flagdata(pipelinemode="automatic")
hifa_fluxcalflag(pipelinemode="automatic")
hif_rawflagchans(pipelinemode="automatic")
hif_refant(pipelinemode="automatic")
h_tsyscal(pipelinemode="automatic")
hifa_tsysflag(pipelinemode="automatic")
hifa_antpos(pipelinemode="automatic")
hifa_wvrgcalflag(pipelinemode="automatic")
hif_lowgainflag(pipelinemode="automatic")
hif_setmodels(pipelinemode="automatic")
hifa_bandpassflag(pipelinemode="automatic")
hifa_spwphaseup(pipelinemode="automatic")
hifa_gfluxscaleflag(pipelinemode="automatic")
hifa_gfluxscale(pipelinemode="automatic")
hifa_timegaincal(pipelinemode="automatic")
hif_applycal(pipelinemode="automatic")
hifa_imageprecheck(pipelinemode="automatic")
hif_makeimlist(intent='PHASE,BANDPASS,CHECK')
hif_makeimages(pipelinemode="automatic")
hif_checkproductsize(maxcubelimit=40.0, maxproductsize=400.0, maxcubesize=30.0)
hifa_exportdata(pipelinemode="automatic")
hif_mstransform(pipelinemode="automatic")
hifa_flagtargets(pipelinemode="automatic")
hif_makeimlist(specmode='mfs')
hif_findcont(pipelinemode="automatic")
hif_uvcontfit(pipelinemode="automatic")
hif_uvcontsub(pipelinemode="automatic")
hif_makeimages(pipelinemode="automatic")
hif_makeimlist(specmode='cont')
hif_makeimages(pipelinemode="automatic")
hif_makeimlist(pipelinemode="automatic")
hif_makeimages(pipelinemode="automatic")
hif_makeimlist(specmode='repBW')
hif_makeimages(pipelinemode="automatic")
finally:
h_save()
|
[
"[email protected]"
] | |
710cc7fb75b1a26eb048b1c7af7d321b631c07b9
|
b7a2a80843fa5141ffb9c7b4439f1d2ac713af30
|
/U4_5_Stadtlauf_Bern_Tupel.py
|
012d21ba32de43ce2e747f6ee615e4196af163d0
|
[] |
no_license
|
wunnox/python_grundlagen
|
df1bc2b9b1b561bd6733ccc25305e799a48e714e
|
fa84d7aae7332a7acbb3ba7ff0fe2216cc345fc0
|
refs/heads/master
| 2023-05-01T12:19:23.208445 | 2023-04-16T11:29:01 | 2023-04-16T11:29:01 | 222,099,539 | 2 | 3 | null | 2019-12-19T10:56:43 | 2019-11-16T12:57:54 |
Python
|
UTF-8
|
Python
| false | false | 6,801 |
py
|
#! env python3
####################################################
#
# Uebung:
# Im untenstehnden Code soll die Figur zum Zytglogge laufen
# Diese Wegstrecke im Tupel "zytglogge" hinterlegt
#
# Lesen Sie dieses Tupel ein und lassen Sie mit den Daten die Figur den Weg ablaufen.
# Hierzu benötigen Sie die Funktion x,y=go_walk_right(x,y)
# Die Texte zu den Sehenswürdigkeiten sollen genau gleich erscheinen
#
# Vorhandene Funktionen
#
# go_right() : Geht einen Schritt nach rechts
# go_left() : Geht einen Schritt nach links
# go_up() : Geht einen Schritt hoch
# go_down() : Geht einen Schritt runter
# go_walk_right(x,y): Läuft zur Position x,y nach rechts
# go_walk_left(x,y) : Läuft zur Position x,y nach links
# Taste q : Abbruch des Spiels
#
# Hinweis: Neuer Code nur im markierten Bereich eintragen
#
####################################################
#Module
import pygame
import time
from stadtlauf_bern_modul import *
d=1 ##Anzahl Durchgänge
slower=0.01 #Je höher die Zahl, umso langsamer läuft die Figur
#Wegdaten zum Zytglogge
zytglogge=((21,155), (22,155), (23,155), (24,155), (25,155), (26,155), (27,155), (28,155), (29,155), (30,154), (31,154), (32,154), (33,154), (34,154), (35,154), (36,154), (37,154), (38,154), (39,154), (40,153), (41,153), (42,153), (43,153), (44,153), (45,153), (46,153), (47,153), (48,153), (49,153), (50,152), (51,152), (52,152), (53,152), (54,152), (55,152), (56,152), (57,152), (58,152), (59,152), (60,151), (61,151), (62,151), (63,151), (64,151), (65,151), (66,151), (67,151), (68,151), (69,151), (70,150), (71,150), (72,150), (73,150), (74,150), (75,150), (76,150), (77,150), (78,150), (79,150), (80,149), (81,149), (82,149), (83,149), (84,149), (85,149), (86,149), (87,149), (88,149), (89,149), (90,148), (91,148), (92,148), (93,148), (94,148), (95,148), (96,148), (97,148), (98,148), (99,148), (100,147), (101,147), (102,147), (103,147), (104,147), (105,147), (106,147), (107,147), (108,147), (109,147), (110,146), (111,146), (112,146), (113,146), (114,146), (115,146), (116,146), (117,146), (118,146), (119,146), (120,145), (121,145), (122,145), (123,145), (124,145), (125,145), (126,145), (127,145), (128,145), (129,145), (130,144), (131,144), (132,144), (133,144), (134,144), (135,144), (136,144), (137,144), (138,144), (139,144), (140,143), (141,143), (142,143), (143,143), (144,143), (145,143), (146,143), (147,143), (148,143), (149,143), (150,142), (151,142), (152,142), (153,142), (154,142), (155,142), (156,142), (157,142), (158,142), (159,142), (160,141), (161,141), (162,141), (163,141), (164,141), (165,141), (166,141), (167,141), (168,141), (169,141), (170,140), (171,140), (172,140), (173,140), (174,140), (175,140), (176,140), (177,140), (178,140), (179,140), (180,139), (181,139), (182,139), (183,139), (184,139), (185,139), (186,139), (187,139), (188,139), (189,139), (190,138), (191,138), (192,138), (193,138), (194,138), (195,138), (196,138), (197,138), (198,138), (199,138), (200,137), (201,137), (202,137), (203,137), (204,137), (205,137), (206,137), (207,137), (208,137), (209,137), (210,136), (211,136), (212,136), (213,136), (214,136), (215,136), (216,136), (217,136), (218,136), (219,136), (220,137), (221,137), (222,137), (223,137), (224,137), (225,137), (226,137), (227,137), (228,137), (229,137), (230,138), (231,138), (232,138), (233,138), (234,138), (235,138), (236,138), (237,138), (238,138), (239,138), (240,139), (241,139), (242,139), (243,139), (244,139), (245,139), (246,139), (247,139), (248,139), (249,139), (250,140), (251,140), (252,140), (253,140), (254,140), (255,140), (256,140), (257,140), (258,140), (259,140), (260,141), (261,141), (262,141), (263,141), (264,141), (265,141), (266,141), (267,141), (268,141), (269,141), (270,142), (271,142), (272,142), (273,142), (274,142), (275,142), (276,142), (277,142), (278,142), (279,142), (280,143), (281,143), (282,143), (283,143), (284,143), (285,143), (286,143), (287,143), (288,143), (289,143), (290,144), (291,144), (292,144), (293,144), (294,144), (295,144), (296,144), (297,144), (298,144), (299,144), (300,145), (301,145), (302,145), (303,145), (304,145), (305,145), (306,145), (307,145), (308,145), (309,145), (310,146), (311,146), (312,146), (313,146), (314,146), (315,146), (316,146), (317,146), (318,146), (319,146), (320,147), (321,147), (322,147), (323,147), (324,147), (325,147), (326,147), (327,147), (328,147), (329,147), (330,148), (331,148), (332,148), (333,148), (334,148), (335,148), (336,148), (337,148), (338,148), (339,148), (340,149), (341,149), (342,149), (343,149), (344,149), (345,149), (346,149), (347,149), (348,149), (349,149), (350,150), (351,150), (352,150), (353,150), (354,150), (355,150), (356,150), (357,150), (358,150), (359,150), (360,151), (361,151), (362,151), (363,151), (364,151), (365,151), (366,151), (367,151), (368,151), (369,151), (370,152), (371,152), (372,152), (373,152), (374,152), (375,152), (376,152), (377,152), (378,152), (379,152), (380,153), (381,153), (382,153), (383,153), (384,153), (385,153), (386,153), (387,153), (388,153), (389,153), (390,154), (391,154), (392,154), (393,154), (394,154), (395,154), (396,154), (397,154), (398,154), (399,154), (400,155), (401,155), (402,155), (403,155), (404,155), (405,155), (406,155), (407,155), (408,155), (409,155), (410,156), (411,156), (412,156), (413,156), (414,156), (415,156), (416,156), (417,156), (418,156), (419,156), (420,157), (421,157), (422,157), (423,157), (424,157), (425,157), (426,157), (427,157), (428,157), (429,157), (430,158), (431,158), (432,158), (433,158), (434,158), (435,158), (436,158), (437,158), (438,158), (439,158), (440,159), (441,159), (442,159), (443,159), (444,159), (445,159), (446,159), (447,159), (448,159), (449,159), (450,159), (451,159), (452,159), (453,159), (454,159), (455,159), (456,159), (457,159), (458,159), (459,159), (460,159), (461,159), (462,159), (463,159), (464,159), (465,159), (466,159), (467,159), (468,159), (469,159), (470,159))
#Funktion
def check_text(x,y):
''' Prüfen ob eine Sehenswürdigkeit angezeigt werden soll'''
if x>200 and x<280 and y>120 and y<150:
text2show='Käfigturm'
xt,yt=x,y
elif x>410 and x<490 and y>140 and y<160:
text2show='Zytglogge'
xt,yt=x,y
else:
text2show,xt,yt='',x,y
return text2show,xt,yt
#Start Game
while run:
clock.tick(27)
if d>0:
pass
##############################################
#######################################
# Hier kommt Ihr Code
#bis hier
#######################################
##############################################
d-=1
go_stop()
run=check_key() #Prüfen ob und welche Taste gedrückt wurde
redrawGameWindow(text2show,xt,yt-15) #Grafik neu darstellen
#Ende Darstellung
pygame.quit()
|
[
"[email protected]"
] | |
caf4542e53284e2977880c2cb97c6a8fec09afc4
|
d2f9b928972e71b94c428db0ca9cc2ffedcf29e6
|
/ui/purchase_ui.py
|
b107f6ddced7d67f12042c337793ce0c52bba997
|
[] |
no_license
|
XHApplet/GoodsBook
|
e69be00ab9dfe7746f6f7f9264828ef20a20bf64
|
811bf2e0d2a0158a7a989cbe6abe4b106ecc5b81
|
refs/heads/master
| 2022-10-30T16:22:33.372798 | 2019-04-12T06:20:42 | 2019-04-12T06:20:42 | 273,460,839 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 9,305 |
py
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'D:\mycode\GoodsBook\ui\purchase.ui'
#
# Created by: PyQt5 UI code generator 5.10
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_Form(object):
def setupUi(self, Form):
Form.setObjectName("Form")
Form.resize(1056, 506)
self.gridLayout_2 = QtWidgets.QGridLayout(Form)
self.gridLayout_2.setObjectName("gridLayout_2")
self.gridLayout = QtWidgets.QGridLayout()
self.gridLayout.setSizeConstraint(QtWidgets.QLayout.SetDefaultConstraint)
self.gridLayout.setContentsMargins(30, 20, 30, 20)
self.gridLayout.setSpacing(20)
self.gridLayout.setObjectName("gridLayout")
self.pushButtonInput = QtWidgets.QPushButton(Form)
self.pushButtonInput.setEnabled(True)
self.pushButtonInput.setMinimumSize(QtCore.QSize(0, 50))
self.pushButtonInput.setObjectName("pushButtonInput")
self.gridLayout.addWidget(self.pushButtonInput, 3, 4, 1, 1)
spacerItem = QtWidgets.QSpacerItem(20, 40, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)
self.gridLayout.addItem(spacerItem, 0, 0, 1, 1)
self.lineEditInputPrice = QtWidgets.QLineEdit(Form)
self.lineEditInputPrice.setMinimumSize(QtCore.QSize(0, 50))
self.lineEditInputPrice.setInputMask("")
self.lineEditInputPrice.setMaxLength(15)
self.lineEditInputPrice.setAlignment(QtCore.Qt.AlignCenter)
self.lineEditInputPrice.setObjectName("lineEditInputPrice")
self.gridLayout.addWidget(self.lineEditInputPrice, 2, 3, 1, 1)
self.lineEditInputNum = QtWidgets.QLineEdit(Form)
self.lineEditInputNum.setMinimumSize(QtCore.QSize(0, 50))
self.lineEditInputNum.setMaxLength(10)
self.lineEditInputNum.setAlignment(QtCore.Qt.AlignCenter)
self.lineEditInputNum.setObjectName("lineEditInputNum")
self.gridLayout.addWidget(self.lineEditInputNum, 2, 4, 1, 1)
self.dateEditInput = CCustomDateEdit(Form)
self.dateEditInput.setMinimumSize(QtCore.QSize(0, 50))
self.dateEditInput.setLayoutDirection(QtCore.Qt.LeftToRight)
self.dateEditInput.setAlignment(QtCore.Qt.AlignCenter)
self.dateEditInput.setButtonSymbols(QtWidgets.QAbstractSpinBox.UpDownArrows)
self.dateEditInput.setCorrectionMode(QtWidgets.QAbstractSpinBox.CorrectToPreviousValue)
self.dateEditInput.setDateTime(QtCore.QDateTime(QtCore.QDate(2000, 1, 1), QtCore.QTime(0, 0, 0)))
self.dateEditInput.setTime(QtCore.QTime(0, 0, 0))
self.dateEditInput.setCurrentSection(QtWidgets.QDateTimeEdit.YearSection)
self.dateEditInput.setCalendarPopup(True)
self.dateEditInput.setObjectName("dateEditInput")
self.gridLayout.addWidget(self.dateEditInput, 2, 0, 1, 1)
self.comboBoxInputGoods = ExtendedComboBox(Form)
self.comboBoxInputGoods.setMinimumSize(QtCore.QSize(0, 50))
font = QtGui.QFont()
font.setPointSize(15)
self.comboBoxInputGoods.setFont(font)
self.comboBoxInputGoods.setObjectName("comboBoxInputGoods")
self.gridLayout.addWidget(self.comboBoxInputGoods, 2, 2, 1, 1)
spacerItem1 = QtWidgets.QSpacerItem(20, 40, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)
self.gridLayout.addItem(spacerItem1, 3, 0, 1, 1)
self.label_3 = QtWidgets.QLabel(Form)
font = QtGui.QFont()
font.setPointSize(30)
self.label_3.setFont(font)
self.label_3.setAlignment(QtCore.Qt.AlignCenter)
self.label_3.setObjectName("label_3")
self.gridLayout.addWidget(self.label_3, 1, 3, 1, 1)
self.label = QtWidgets.QLabel(Form)
font = QtGui.QFont()
font.setPointSize(40)
self.label.setFont(font)
self.label.setLayoutDirection(QtCore.Qt.RightToLeft)
self.label.setAlignment(QtCore.Qt.AlignCenter)
self.label.setObjectName("label")
self.gridLayout.addWidget(self.label, 1, 0, 1, 1)
self.lineEditInputRemark = QtWidgets.QLineEdit(Form)
self.lineEditInputRemark.setMinimumSize(QtCore.QSize(0, 50))
self.lineEditInputRemark.setObjectName("lineEditInputRemark")
self.gridLayout.addWidget(self.lineEditInputRemark, 2, 5, 1, 1)
self.label_5 = QtWidgets.QLabel(Form)
font = QtGui.QFont()
font.setPointSize(30)
self.label_5.setFont(font)
self.label_5.setAlignment(QtCore.Qt.AlignCenter)
self.label_5.setObjectName("label_5")
self.gridLayout.addWidget(self.label_5, 1, 5, 1, 1)
self.label_2 = QtWidgets.QLabel(Form)
font = QtGui.QFont()
font.setPointSize(30)
self.label_2.setFont(font)
self.label_2.setLayoutDirection(QtCore.Qt.LeftToRight)
self.label_2.setAlignment(QtCore.Qt.AlignCenter)
self.label_2.setObjectName("label_2")
self.gridLayout.addWidget(self.label_2, 1, 2, 1, 1)
self.comboBoxInputType = ExtendedComboBox(Form)
self.comboBoxInputType.setMinimumSize(QtCore.QSize(0, 50))
font = QtGui.QFont()
font.setPointSize(15)
self.comboBoxInputType.setFont(font)
self.comboBoxInputType.setSizeAdjustPolicy(QtWidgets.QComboBox.AdjustToContentsOnFirstShow)
self.comboBoxInputType.setObjectName("comboBoxInputType")
self.gridLayout.addWidget(self.comboBoxInputType, 2, 1, 1, 1)
self.label_13 = QtWidgets.QLabel(Form)
font = QtGui.QFont()
font.setPointSize(30)
self.label_13.setFont(font)
self.label_13.setAlignment(QtCore.Qt.AlignCenter)
self.label_13.setObjectName("label_13")
self.gridLayout.addWidget(self.label_13, 1, 1, 1, 1)
self.label_4 = QtWidgets.QLabel(Form)
font = QtGui.QFont()
font.setPointSize(30)
self.label_4.setFont(font)
self.label_4.setAlignment(QtCore.Qt.AlignCenter)
self.label_4.setObjectName("label_4")
self.gridLayout.addWidget(self.label_4, 1, 4, 1, 1)
self.InputTiplabel = QtWidgets.QLabel(Form)
font = QtGui.QFont()
font.setPointSize(15)
self.InputTiplabel.setFont(font)
self.InputTiplabel.setStyleSheet("color:rgb(255, 0, 0)")
self.InputTiplabel.setAlignment(QtCore.Qt.AlignCenter)
self.InputTiplabel.setObjectName("InputTiplabel")
self.gridLayout.addWidget(self.InputTiplabel, 0, 2, 1, 1)
self.labelAmount = QtWidgets.QLabel(Form)
font = QtGui.QFont()
font.setPointSize(12)
font.setBold(True)
font.setWeight(75)
self.labelAmount.setFont(font)
self.labelAmount.setStyleSheet("color:rgb(0, 0, 255)")
self.labelAmount.setObjectName("labelAmount")
self.gridLayout.addWidget(self.labelAmount, 0, 4, 1, 1)
self.gridLayout.setColumnMinimumWidth(0, 1)
self.gridLayout.setColumnMinimumWidth(1, 2)
self.gridLayout.setColumnMinimumWidth(2, 3)
self.gridLayout.setColumnMinimumWidth(3, 4)
self.gridLayout.setColumnMinimumWidth(4, 5)
self.gridLayout.setColumnMinimumWidth(5, 6)
self.gridLayout.setRowMinimumHeight(0, 1)
self.gridLayout.setRowMinimumHeight(1, 2)
self.gridLayout.setRowMinimumHeight(2, 3)
self.gridLayout.setRowMinimumHeight(3, 4)
self.gridLayout.setColumnStretch(0, 1)
self.gridLayout.setColumnStretch(1, 2)
self.gridLayout.setColumnStretch(2, 4)
self.gridLayout.setColumnStretch(3, 2)
self.gridLayout.setColumnStretch(4, 2)
self.gridLayout.setColumnStretch(5, 6)
self.gridLayout.setRowStretch(0, 1)
self.gridLayout.setRowStretch(1, 2)
self.gridLayout.setRowStretch(2, 3)
self.gridLayout.setRowStretch(3, 2)
self.gridLayout_2.addLayout(self.gridLayout, 0, 0, 1, 1)
self.retranslateUi(Form)
QtCore.QMetaObject.connectSlotsByName(Form)
Form.setTabOrder(self.dateEditInput, self.comboBoxInputType)
Form.setTabOrder(self.comboBoxInputType, self.comboBoxInputGoods)
Form.setTabOrder(self.comboBoxInputGoods, self.lineEditInputPrice)
Form.setTabOrder(self.lineEditInputPrice, self.lineEditInputNum)
Form.setTabOrder(self.lineEditInputNum, self.lineEditInputRemark)
Form.setTabOrder(self.lineEditInputRemark, self.pushButtonInput)
def retranslateUi(self, Form):
_translate = QtCore.QCoreApplication.translate
Form.setWindowTitle(_translate("Form", "Form"))
self.pushButtonInput.setText(_translate("Form", "确认进货"))
self.dateEditInput.setDisplayFormat(_translate("Form", "yyyy/M/d"))
self.label_3.setText(_translate("Form", "价格"))
self.label.setText(_translate("Form", "日期"))
self.label_5.setText(_translate("Form", "备注"))
self.label_2.setText(_translate("Form", "商品"))
self.label_13.setText(_translate("Form", "类别"))
self.label_4.setText(_translate("Form", "数量"))
self.InputTiplabel.setText(_translate("Form", "注意:新商品录入"))
self.labelAmount.setText(_translate("Form", "总价:"))
from lib.pubui import CCustomDateEdit, ExtendedComboBox
|
[
"[email protected]"
] | |
dd760e6f9b4ff2ec08119811edbb68ae2a2b08b9
|
f22d31484a12d001826c1775a6f2d245a720fce8
|
/Introdução à Programação com Python/Do autor/Códigi fonte e listagem/listagem/capitulo 11/11.25 - Acessando um campo do tipo data.py
|
0501a6557cd74191d14b35613f9ec6f3f88ff792
|
[] |
no_license
|
eduardoprograma/linguagem_Python
|
9eb55f0a5a432a986e047b091eb7ed7152b7da67
|
942aba9146800fc33bbea98778467f837396cb93
|
refs/heads/master
| 2021-07-07T20:48:37.673101 | 2020-07-31T21:24:17 | 2020-07-31T21:24:17 | 159,852,510 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,007 |
py
|
##############################################################################
# Parte do livro Introdução à Programação com Python
# Autor: Nilo Ney Coutinho Menezes
# Editora Novatec (c) 2010-2017
# Primeira edição - Novembro/2010 - ISBN 978-85-7522-250-8
# Primeira reimpressão - Outubro/2011
# Segunda reimpressão - Novembro/2012
# Terceira reimpressão - Agosto/2013
# Segunda edição - Junho/2014 - ISBN 978-85-7522-408-3
# Primeira reimpressão - Segunda edição - Maio/2015
# Segunda reimpressão - Segunda edição - Janeiro/2016
# Terceira reimpressão - Segunda edição - Junho/2016
# Quarta reimpressão - Segunda edição - Março/2017
#
# Site: http://python.nilo.pro.br/
#
# Arquivo: listagem\capitulo 11\11.25 - Acessando um campo do tipo data.py
##############################################################################
import sqlite3
with sqlite3.connect("brasil.db") as conexão:
for feriado in conexão.execute("select * from feriados"):
print(feriado)
|
[
"[email protected]"
] | |
6f7fae7437a7d13832deec4352974a08fee85933
|
effdd4579ce829f0965e59d3282504ccdca3278e
|
/apps/users/models.py
|
a7e464a9005dd9491e878679604a3b0ee1e071fe
|
[
"Apache-2.0",
"MIT"
] |
permissive
|
Liuyanzhi/OnlineMooc
|
55cad27d8f2168dd6a18f850b923d6c866024c24
|
88e49e0bd8ab3002c3150b6ad8bd2a8ef7b6deb8
|
refs/heads/master
| 2022-11-29T12:06:02.193585 | 2020-07-29T15:30:40 | 2020-07-29T15:30:40 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,352 |
py
|
from django.db import models
from datetime import datetime
from django.contrib.auth.models import AbstractUser
# Create your models here.
class UserProfile(AbstractUser):
# 自定义的性别选择规则
GENDER_CHOICES = (
("male", u"男"),
("female", u"女")
)
# 昵称
nick_name = models.CharField(max_length=50, verbose_name=u"昵称", default="")
# 生日,可以为空
birthday = models.DateField(verbose_name=u"生日", null=True, blank=True)
# 性别 只能男或女,默认女
gender = models.CharField(
max_length=6,
verbose_name=u"性别",
choices=GENDER_CHOICES,
default="female")
# 地址
address = models.CharField(max_length=100, verbose_name="地址", default="")
# 电话
mobile = models.CharField(
max_length=11,
null=True,
blank=True,
verbose_name=u"电话")
# 头像 默认使用default.png
image = models.ImageField(
upload_to="image/%Y/%m",
default=u"image/default.png",
max_length=100,
verbose_name=u"头像"
)
# meta信息,即后台栏目名
class Meta:
verbose_name = "用户信息"
verbose_name_plural = verbose_name
# 重载__str__方法,打印实例会打印username,username为继承自AbstractUser
def __str__(self):
return self.username
# 获取用户未读消息的数量
def unread_nums(self):
from operation.models import UserMessage
return UserMessage.objects.filter(has_read=False, user=self.id).count()
class EmailVerifyRecord(models.Model):
"""邮箱验证码model"""
SEND_CHOICES = (
("register", u"注册"),
("forget", u"找回密码"),
("update_email", u"修改邮箱"),
)
code = models.CharField(max_length=20, verbose_name=u"验证码")
# 未设置null = true blank = true 默认不可为空
email = models.EmailField(max_length=50, verbose_name=u"邮箱")
send_type = models.CharField(
choices=SEND_CHOICES,
max_length=20,
verbose_name=u"验证码类型")
# 这里的now得去掉(),不去掉会根据编译时间。而不是根据实例化时间。
send_time = models.DateTimeField(
default=datetime.now, verbose_name=u"发送时间")
class Meta:
verbose_name = "邮箱验证码"
verbose_name_plural = verbose_name
# 重载str方法使后台不再直接显示object
def __str__(self):
return '{0}({1})'.format(self.code, self.email)
class Banner(models.Model):
"""轮播图model"""
title = models.CharField(max_length=100, verbose_name=u"标题")
image = models.ImageField(
upload_to="banner/%Y/%m",
verbose_name=u"轮播图",
max_length=100)
url = models.URLField(max_length=200, verbose_name=u"访问地址")
# 默认index很大靠后。想要靠前修改index值。
index = models.IntegerField(default=100, verbose_name=u"顺序")
add_time = models.DateTimeField(default=datetime.now, verbose_name=u"添加时间")
class Meta:
verbose_name = u"轮播图"
verbose_name_plural = verbose_name
# 重载__str__方法使后台不再直接显示object
def __str__(self):
return '{0}(位于第{1}位)'.format(self.title, self.index)
|
[
"[email protected]"
] | |
274b7e630d7bb6048b374afa3e25f1d914cc5f4f
|
50948d4cb10dcb1cc9bc0355918478fb2841322a
|
/azure-mgmt-resource/azure/mgmt/resource/resources/v2016_02_01/models/deployment_extended_py3.py
|
095e187951cbf55221db70bf9c50748087ff0832
|
[
"MIT"
] |
permissive
|
xiafu-msft/azure-sdk-for-python
|
de9cd680b39962702b629a8e94726bb4ab261594
|
4d9560cfd519ee60667f3cc2f5295a58c18625db
|
refs/heads/master
| 2023-08-12T20:36:24.284497 | 2019-05-22T00:55:16 | 2019-05-22T00:55:16 | 187,986,993 | 1 | 0 |
MIT
| 2020-10-02T01:17:02 | 2019-05-22T07:33:46 |
Python
|
UTF-8
|
Python
| false | false | 1,569 |
py
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class DeploymentExtended(Model):
"""Deployment information.
Variables are only populated by the server, and will be ignored when
sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: The ID of the deployment.
:vartype id: str
:param name: Required. The name of the deployment.
:type name: str
:param properties: Deployment properties.
:type properties:
~azure.mgmt.resource.resources.v2016_02_01.models.DeploymentPropertiesExtended
"""
_validation = {
'id': {'readonly': True},
'name': {'required': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'properties': {'key': 'properties', 'type': 'DeploymentPropertiesExtended'},
}
def __init__(self, *, name: str, properties=None, **kwargs) -> None:
super(DeploymentExtended, self).__init__(**kwargs)
self.id = None
self.name = name
self.properties = properties
|
[
"[email protected]"
] | |
24bb14de8d57979dddcf4ba16b0a388f44b51dc0
|
27fd33abe12f48b0cfdafef7a624e4a96b311744
|
/stock_invoice_chained_delivery/__openerp__.py
|
520dac5979944730e9f09fa6a13a0d84e2876e7f
|
[] |
no_license
|
mgielissen/julius-openobject-addons
|
7b9966ed2894ce82b5fb396bca1bd9984f263737
|
3e35f7ba7246c54e5a5b31921b28aa5f1ab24999
|
refs/heads/master
| 2021-01-16T20:41:49.131160 | 2016-03-01T12:31:05 | 2016-03-01T12:31:05 | 52,984,093 | 1 | 0 | null | 2016-03-02T17:53:42 | 2016-03-02T17:53:42 | null |
UTF-8
|
Python
| false | false | 1,456 |
py
|
# -*- coding: utf-8 -*-
#################################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2013 Julius Network Solutions SARL <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#################################################################################
{
"name" : "Stock Picking invoice the chained delivery",
"version" : "0.1",
"author" : "Julius Network Solutions",
"website" : "http://julius.fr",
"category" : "Warehouse Management",
"depends" : [
"stock",
],
"description": """
Stock Picking invoice the chained delivery.
""",
"demo" : [],
"data" : [],
'installable' : False,
'active' : False,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
[
"[email protected]"
] | |
302aad1cf1976fdcf53cdcf52935b5bf7730306f
|
a3f4fc66a64aaf359fbc7176b3b2335db73c5dd9
|
/features/environment.py
|
821222beae4c53daad3f8e45a39a3bed7f4531f2
|
[] |
no_license
|
hpifu/go-cloud
|
cd6b13fe0f757a5d961f8be15cbc3844d6d5282f
|
fd4edabddca679c19c887c8a019e1a1ecce02e0b
|
refs/heads/master
| 2021-08-02T09:21:05.245600 | 2020-01-03T13:07:28 | 2020-01-03T13:07:28 | 203,243,843 | 0 | 0 | null | 2021-07-26T23:44:47 | 2019-08-19T20:16:13 |
Go
|
UTF-8
|
Python
| false | false | 3,043 |
py
|
#!/usr/bin/env python3
import pymysql
import redis
import subprocess
import time
import requests
import datetime
import json
import socket
from behave import *
register_type(int=int)
register_type(str=lambda x: x if x != "N/A" else "")
register_type(bool=lambda x: True if x == "true" else False)
config = {
"prefix": "output/go-cloud",
"service": {
"port": 16061,
"cookieSecure": False,
"allowOrigins": ["http://127.0.0.1:4000"],
"cookieDomain": "127.0.0.1"
},
"es": {
"uri": "http://test-elasticsearch:9200"
},
"account": {
"address": "test-go-account:16060",
"maxConn": 20,
"connTimeout": "200ms",
"recvtimeout": "200ms"
},
"mysqldb": {
"host": "test-mysql",
"port": 3306,
"user": "hatlonely",
"password": "keaiduo1",
"db": "hads"
},
"redis": {
"host": "test-redis",
"port": 6379
}
}
def wait_for_port(port, host="localhost", timeout=5.0):
start_time = time.perf_counter()
while True:
try:
with socket.create_connection((host, port), timeout=timeout):
break
except OSError as ex:
time.sleep(0.01)
if time.perf_counter() - start_time >= timeout:
raise TimeoutError("Waited too long for the port {} on host {} to start accepting connections.".format(
port, host
)) from ex
def deploy():
fp = open("{}/configs/cloud.json".format(config["prefix"]))
cf = json.loads(fp.read())
fp.close()
cf["service"]["port"] = ":{}".format(config["service"]["port"])
cf["service"]["cookieSecure"] = config["service"]["cookieSecure"]
cf["service"]["cookieDomain"] = config["service"]["cookieDomain"]
cf["service"]["allowOrigins"] = config["service"]["allowOrigins"]
cf["account"]["address"] = config["account"]["address"]
cf["es"]["uri"] = config["es"]["uri"]
print(cf)
fp = open("{}/configs/cloud.json".format(config["prefix"]), "w")
fp.write(json.dumps(cf, indent=4))
fp.close()
def start():
subprocess.Popen(
"cd {} && nohup bin/cloud &".format(config["prefix"]), shell=True
)
wait_for_port(config["service"]["port"], timeout=5)
def stop():
subprocess.getstatusoutput(
"ps aux | grep bin/cloud | grep -v grep | awk '{print $2}' | xargs kill"
)
def before_all(context):
config["url"] = "http://127.0.0.1:{}".format(config["service"]["port"])
deploy()
start()
context.config = config
context.mysql_conn = pymysql.connect(
host=config["mysqldb"]["host"],
user=config["mysqldb"]["user"],
port=config["mysqldb"]["port"],
password=config["mysqldb"]["password"],
db=config["mysqldb"]["db"],
charset="utf8",
cursorclass=pymysql.cursors.DictCursor
)
context.redis_client = redis.Redis(
config["redis"]["host"], port=6379, db=0
)
def after_all(context):
stop()
|
[
"[email protected]"
] | |
2b775a08b758b5a4abcdf87c7a14a1b6965a4cdc
|
22bec7f5d2c2bc9b6fa0c23d6733bf6d81d41982
|
/src/스택_큐/Level_2_주식가격.py
|
7f564eab4d0a16ee36b6b487f386b67870da5a90
|
[
"MIT"
] |
permissive
|
taki0112/coding_interview
|
3cb5eeb5b545cc15de84551370923e307a93293d
|
06b61646c3fafb63ac74b1170a4d0a77f02231a0
|
refs/heads/master
| 2021-08-07T08:09:13.392815 | 2020-04-22T10:11:52 | 2020-04-22T10:11:52 | 158,521,986 | 7 | 3 | null | null | null | null |
UTF-8
|
Python
| false | false | 391 |
py
|
def solution(prices):
answer = []
for i in range(len(prices) - 1) :
for j in range(i+1, len(prices)) :
if prices[i] > prices[j] :
answer.append(j - i)
break
else :
answer.append(len(prices) - 1 - i)
else :
answer.append(0)
return answer
prices = [498,501,470,489]
x = solution(prices)
print(x)
|
[
"[email protected]"
] | |
d6af03603aafba2f633ce7da7e0c01172f366049
|
59199c7e4e8a829fc7bce0774e2a3c9b62a68c39
|
/casa_reduction_pipeline/casa_sma_reduction_solsyssetjy_with_quicklooksma.py
|
519ec2dc7ab773d7afab1cb505b100c478e3c979
|
[] |
no_license
|
Smithsonian/sma-data-reduction
|
379a9913b07e1fb067d70cb79c6ac7245d5bd781
|
9580e2464ad36ab7cd11e487bb1fdf296479f3fb
|
refs/heads/main
| 2023-09-01T21:51:04.439609 | 2023-08-09T22:00:02 | 2023-08-09T22:00:02 | 345,712,396 | 7 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 25,100 |
py
|
# Based on Todd Hunter's CASA calibration script, modified by Charlie Qi 05/2021
# The measurement set is generated with CASA 5.7.2-4.
import os
import numpy as np
import sys
from datetime import datetime
# Quicklook SMA export functions:
from quicklook_sma.utilities import read_config
# Additional QA plotting routines
from quicklook_sma import make_qa_tables, make_all_caltable_txt
# Quicklook imaging
from quicklook_sma.quicklook_imaging import quicklook_continuum_imaging
# CASA 6
from casatasks import (listobs, clearcal, flagmanager, flagdata,
setjy, bandpass, gaincal, applycal, blcal, fluxscale)
from casaplotms import plotms
from casatools import table, msmetadata
from casatools import logsink
casalog = logsink()
this_date = datetime.now()
config_filename = sys.argv[-1]
if not os.path.exists(config_filename):
# raise ValueError(f"Cannot fine config filename {config_filename}")
raise ValueError("Cannot fine config filename {0}".format(config_filename))
import configparser
config_file = configparser.ConfigParser()
config_file.read(config_filename)
sma_pipe_config = config_file
###################
myvis = sma_pipe_config.get('SMA-Pipe', 'myvis')
###################
manual_flag_file = sma_pipe_config.get('SMA-Pipe', 'manual_flag_file')
# Set to None if not filename is given
if len(manual_flag_file) == 0:
manual_flag_file = None
else:
if not os.path.exists(manual_flag_file):
raise ValueError(f"Cannot find the specified flagging file {manual_flag_file}")
# Reset the corrected column when re-running the script.
restart_pipeline = sma_pipe_config.getboolean('SMA-Pipe', 'restart_pipeline')
# Interactive run
interactive_on = sma_pipe_config.getboolean('SMA-Pipe', 'interactive_on')
flux = sma_pipe_config.get('SMA-Pipe', 'flux')
# This script will use the SS models and so doesn't need the manually
# set brightness.
# flux_stokesI = float(sma_pipe_config.get('SMA-Pipe', 'flux_stokesI'))
bpcal = sma_pipe_config.get('SMA-Pipe', 'bpcal')
pcal1 = sma_pipe_config.get('SMA-Pipe', 'pcal1')
pcal2 = sma_pipe_config.get('SMA-Pipe', 'pcal2')
science_fields = sma_pipe_config.get('SMA-Pipe', 'science_fields')
is_mosaic = sma_pipe_config.getboolean('SMA-Pipe', 'is_mosaic')
if len(pcal2) > 0:
bothpcal = ",".join([pcal1, pcal2])
else:
bothpcal= pcal1
if flux == bpcal or flux in bothpcal:
calfields= ",".join([bpcal, bothpcal])
else:
calfields= ",".join([bpcal, bothpcal, flux])
if is_mosaic:
# Find all matching target names
tb.open("{0}/FIELD".format(myvis))
field_names = tb.getcol('NAME')
tb.close()
science_match = science_fields.strip("*")
science_field_list = []
for field in field_names:
if science_match in field:
science_field_list.append(field)
science_fields = ",".join(science_field_list)
bpchans = sma_pipe_config.get('SMA-Pipe', 'bpchans')
calchans = sma_pipe_config.get('SMA-Pipe', 'calchans')
spwrange = sma_pipe_config.get('SMA-Pipe', 'spwrange')
nspws = int(sma_pipe_config.get('SMA-Pipe', 'nspws'))
bpscan = sma_pipe_config.get('SMA-Pipe', 'bpscan')
rechunk = int(sma_pipe_config.get('SMA-Pipe', 'rechunk'))
edgechan_frac = float(sma_pipe_config.get('SMA-Pipe', 'edgechan_frac'))
chan_num = int(16384 / rechunk)
edge_chan_low = int(np.floor(chan_num * edgechan_frac))
edge_chan_high = int(np.floor(chan_num * (1. - edgechan_frac)))
# Flag edges of the SPWs
edgechan = "{0}:0~{1};{2}~{3}".format(spwrange, edge_chan_low,
edge_chan_high, chan_num-1)
try:
extra_chunk56_edgeflag = sma_pipe_config.getboolean('SMA-Pipe',
'extra_chunk56_edgeflag')
except Exception:
extra_chunk56_edgeflag = False
# Add a bit of extra flagging between chunks 5/6 where there's a steeper drop in sensitivity
if extra_chunk56_edgeflag:
# add an extra ~10% flagging between those two only.
edge_frac_extra = 0.12
edge_chan_high_56 = int(np.floor(chan_num * (1. - edge_frac_extra)))
# This accounts for the reversal in channel vs frequency ordering
chunk56_lsb = f"0~1:{edge_chan_high_56}~{chan_num-1}"
chunk56_usb = f"10~11:{edge_chan_high_56}~{chan_num-1}"
# Add these to the edgechan selection
edgechan += f", {chunk56_lsb}, {chunk56_usb}"
refant = sma_pipe_config.get('SMA-Pipe', 'refant')
bpsolint= sma_pipe_config.get('SMA-Pipe', 'bpsolint')
# Minimum time for solution intervals.
# Defaults to 30s (the normal SMA integration time)
try:
min_solint = sma_pipe_config.get('SMA-Pipe', 'min_solint')
except Exception:
min_solint = '30s'
###############
# Setup output directories
###############
caltab_plot_path = sma_pipe_config.get('SMA-Pipe', 'caltab_plot_path')
if not os.path.exists(caltab_plot_path):
os.mkdir(caltab_plot_path)
plot_path = sma_pipe_config.get('SMA-Pipe', 'plot_path')
if not os.path.exists(plot_path):
os.mkdir(plot_path)
scitarget_plot_path = sma_pipe_config.get('SMA-Pipe', 'scitarget_plot_path')
if not os.path.exists(scitarget_plot_path):
os.mkdir(scitarget_plot_path)
###############
# Restart and clear previous calibration/model columns
# Restore original flags.
###############
if restart_pipeline:
# Overwrite the CORRECTED column.
clearcal(vis=myvis)
try:
flagmanager(vis=myvis, mode='restore', versionname='original')
except RuntimeError:
casalog.post("Unable to find original flag version")
print("Unable to find original flag version")
else:
# Backup a version of the original flags
flagversions = flagmanager(vis=myvis, mode='list')
flagversion_names = [flagversions[key]['name']
for key in flagversions.keys()
if key != "MS"]
if "original" not in flagversion_names:
flagmanager(vis=myvis, mode='save', versionname='original')
###############
# List MS file contents
###############
listobs(myvis)
###############
# Edge and birdie flagging
###############
# Ensure zeros are flagged
flagdata(vis=myvis, mode='clip', clipzeros=True, flagbackup=False)
# Flag edges and birdies
flagdata(vis=myvis, mode='manual', spw=edgechan, flagbackup=False)
flagmanager(vis=myvis, mode='save', versionname='flag_edges')
# Use plotms to view the bright quasars interactively.
# We are looking for large outliers in either channels or time.
# Read in flag commands from a flagging txt file
if manual_flag_file is not None:
flagdata(vis=myvis, mode='list',
inpfile=manual_flag_file,
flagbackup=False)
flagmanager(vis=myvis, mode='save', versionname='manual_flagging')
else:
print("No manual flagging file given.")
# From here, apply flags. Then replot to check that we caught all the spikes.
if interactive_on:
# First look for outliers in amp vs channel
for myfield in list(set(calfields.split(","))):
plotms(vis=myvis, xaxis='channel',
yaxis='amp',field=myfield, avgtime='1e8', avgscan=False,
coloraxis='ant1',iteraxis='spw', ydatacolumn='data',
gridrows=4, gridcols=3, yselfscale=True, showgui=True)
# input(f"Done adding freq. flags for {myfield}?")
input("Done adding freq. flags for {0}?".format(myfield))
# Next look for time outliers
for myfield in list(set(calfields.split(","))):
plotms(vis=myvis, xaxis='time',
yaxis='amp',field=myfield, avgchannel='1e8',
coloraxis='ant1',iteraxis='spw', ydatacolumn='data',
gridrows=4, gridcols=3, yselfscale=True, showgui=True)
# input(f"Done adding time flags for {myfield}?")
input("Done adding time flags for {0}?".format(myfield))
# input("Stop here to run new flagging commands.")
input("Stop here to run new flagging commands.")
print("Exiting after interactive examination. Re-run with interactive_on=False "
"and set the manual_flagging_file in the config file to run the pipeline.")
sys.exit(0)
###############
# PRIORCALS
###############
# Check for an antenna pos correction table:
antpos_table = f"{myvis[:-3]}.antpos"
if os.path.exists(antpos_table):
priorcals = [antpos_table]
else:
priorcals = []
###############
# SETJY
###############
setjy(vis=myvis, field=flux, spw='',
scalebychan=True,
standard='Butler-JPL-Horizons 2012',
usescratch=False)
###############
# BANDPASS
###############
# phase-only selfcal
phaseshortgaincal_table = '{0}.bpself.gcal'.format(myvis)
if os.path.exists(phaseshortgaincal_table):
os.system(f'rm -rf {phaseshortgaincal_table}')
gaincal(vis=myvis,caltable=phaseshortgaincal_table,
field=bpcal,spw=bpchans,refant=refant, scan=bpscan,
calmode='p',solint=min_solint,minsnr=2.0, minblperant=3,
gaintable=priorcals)
plotms(vis=phaseshortgaincal_table,
xaxis='time',
yaxis='phase',
coloraxis='spw',
iteraxis='antenna',
ydatacolumn='data',
plotfile=f"{caltab_plot_path}/{phaseshortgaincal_table}.png",
gridrows=3, gridcols=3,
yselfscale=True,
xconnector='line', timeconnector=True,
showgui=False, overwrite=True, dpi=400)
# Solnorm True bandpass
bandpass_table = '{0}.bandpass.solnorm_true.bcal'.format(myvis)
if os.path.exists(bandpass_table):
os.system(f'rm -rf {bandpass_table}')
# smooth some channels CHECK SMOOTHING WINDOW
bandpass(vis=myvis,caltable=bandpass_table,
bandtype='B', scan=bpscan,
field=bpcal, spw=spwrange,
combine='scan,field',
refant=refant,
solint=bpsolint, solnorm=True, minblperant=3,
fillgaps=10, # If some channels are flagged above, interpolate over in the BP
gaintable=[phaseshortgaincal_table] + priorcals)
# Plot bandpass phase and amplitude solutions.
plotms(vis=bandpass_table,xaxis='freq',
yaxis='phase',
coloraxis='spw',iteraxis='antenna',ydatacolumn='data',
plotfile=f"{caltab_plot_path}/{bandpass_table}.phase.png",
gridrows=3, gridcols=3,
yselfscale=True,
showgui=False, overwrite=True, dpi=400)
plotms(vis=bandpass_table,xaxis='freq',
yaxis='amp',
coloraxis='spw',iteraxis='antenna',ydatacolumn='data',
plotfile=f"{caltab_plot_path}/{bandpass_table}.amp.png",
gridrows=3, gridcols=3,
yselfscale=True,
showgui=False, overwrite=True, dpi=400)
# Make ap selfcal table applying the bandpass
ampphaseshortgaincal_table = '{0}.bpself.ap.gcal'.format(myvis)
if os.path.exists(ampphaseshortgaincal_table):
os.system(f'rm -rf {ampphaseshortgaincal_table}')
gaincal(vis=myvis, caltable=ampphaseshortgaincal_table,
field=bpcal, spw=bpchans, refant=refant, scan=bpscan,
calmode='ap', solint=min_solint, minsnr=2.0, minblperant=3,
gaintable=[bandpass_table] + priorcals)
plotms(vis=ampphaseshortgaincal_table,
xaxis='time',
yaxis='phase',
coloraxis='spw',
iteraxis='antenna',
ydatacolumn='data',
xconnector='line', timeconnector=True,
plotfile=f"{caltab_plot_path}/{ampphaseshortgaincal_table}.png",
gridrows=3, gridcols=3,
yselfscale=True,
showgui=False, overwrite=True, dpi=400)
plotms(vis=ampphaseshortgaincal_table,
xaxis='time',
yaxis='amp',
coloraxis='spw',
iteraxis='antenna',
ydatacolumn='data',
xconnector='line', timeconnector=True,
plotfile=f"{caltab_plot_path}/{ampphaseshortgaincal_table}.png",
gridrows=3, gridcols=3,
yselfscale=True,
showgui=False, overwrite=True, dpi=400)
###############
# GAINS
###############
# per-int phase with combining spws across sidebands.
gain_phase_int_table = '{0}.intphase_combinespw.gcal'.format(myvis)
if os.path.exists(gain_phase_int_table):
os.system(f'rm -rf {gain_phase_int_table}')
# Solve per sideband
# LSB
gaincal(vis=myvis,caltable=gain_phase_int_table,
field=calfields,refant=refant,
combine='spw',spw='0~5',
calmode='p',solint=min_solint,minsnr=2.0,minblperant=3,
gaintable=[bandpass_table] + priorcals)
# USB
gaincal(vis=myvis,caltable=gain_phase_int_table, append=True,
field=calfields, refant=refant,
combine='spw',spw='6~11',
calmode='p',solint=min_solint,minsnr=2.0,minblperant=3,
gaintable=[bandpass_table] + priorcals)
plotms(vis=gain_phase_int_table,xaxis='time',
yaxis='phase',
coloraxis='spw',iteraxis='antenna',ydatacolumn='data',
xconnector='line', timeconnector=True,
plotfile=f"{caltab_plot_path}/{gain_phase_int_table}.png",
gridrows=3, gridcols=3,
yselfscale=True, showgui=False, overwrite=True, dpi=400)
# per-scan phase with combining spws
gain_phase_scan_table = '{0}.scanphase_combinespw.gcal'.format(myvis)
if os.path.exists(gain_phase_scan_table):
os.system(f'rm -rf {gain_phase_scan_table}')
# Solve per sideband
# LSB
gaincal(vis=myvis,caltable=gain_phase_scan_table,
field=calfields,refant=refant,
combine='spw',spw='0~5',
calmode='p',solint='300s',minsnr=2.0,minblperant=3,
gaintable=[bandpass_table] + priorcals)
# USB
gaincal(vis=myvis,caltable=gain_phase_scan_table,append=True,
field=calfields,refant=refant,
combine='spw',spw='6~11',
calmode='p',solint='300s',minsnr=2.0,minblperant=3,
gaintable=[bandpass_table] + priorcals)
plotms(vis=gain_phase_scan_table,xaxis='time',
yaxis='phase',
coloraxis='spw',iteraxis='antenna',ydatacolumn='data',
xconnector='line', timeconnector=True,
plotfile=f"{caltab_plot_path}/{gain_phase_scan_table}.png",
gridrows=3, gridcols=3,
yselfscale=True, showgui=False, overwrite=True, dpi=400)
# Amplitude gain calibration
gain_amp_scan_table = '{0}.amp.gcal'.format(myvis)
if os.path.exists(gain_amp_scan_table):
os.system(f'rm -rf {gain_amp_scan_table}')
gaincal(vis=myvis,caltable=gain_amp_scan_table,
field=calfields,spw=calchans,refant=refant,
combine='',
spwmap=[[],[0,0,0,0,0,0,6,6,6,6,6,6]],
calmode='a', solint='10min', minsnr=3.0, minblperant=3,
gaintable=[bandpass_table,
gain_phase_int_table] + priorcals)
plotms(vis=gain_amp_scan_table,xaxis='time',
yaxis='amp',
coloraxis='spw',iteraxis='antenna',ydatacolumn='data',
xconnector='line', timeconnector=True,
plotfile=f"{caltab_plot_path}/{gain_amp_scan_table}_time.png",
gridrows=3, gridcols=3,
yselfscale=True, showgui=False, overwrite=True, dpi=400)
plotms(vis=gain_amp_scan_table,
xaxis='freq', yaxis='amp',
coloraxis='spw',iteraxis='antenna', ydatacolumn='data',
xconnector='line', timeconnector=True,
plotfile=f"{caltab_plot_path}/{gain_amp_scan_table}_freq.png",
gridrows=3, gridcols=3,
yselfscale=True, showgui=False, overwrite=True, dpi=400)
# Bootstrap fluxes from our flux cal (see setjy above)
# to the gain calibrators
fluxboot_table = '{0}.flux.cal'.format(myvis)
if os.path.exists(fluxboot_table):
os.system(f'rm -rf {fluxboot_table}')
if flux == bpcal:
transfer_fields = [bothpcal]
elif flux == pcal1:
transfer_fields = [bpcal, pcal2]
elif flux == pcal2:
transfer_fields = [bpcal, pcal1]
else:
transfer_fields = [bpcal, bothpcal]
fluxresults = fluxscale(vis=myvis,
caltable=gain_amp_scan_table,
refspwmap=[-1],
transfer=transfer_fields,
fluxtable=fluxboot_table,
reference=flux,
fitorder=1,
incremental=True)
fluxdict_numpyfile = "{0}.fluxresults.npy".format(myvis)
if os.path.exists(fluxdict_numpyfile):
os.remove(fluxdict_numpyfile)
np.save(fluxdict_numpyfile,
fluxresults, allow_pickle=True)
# Import functions for summarizing/parsing the fluxscale output.
from utils.fluxscale_fit_plots import fluxscale_to_tables, plot_flux_fits
# Make and save the table results:
flux_data_table, flux_fit_table = fluxscale_to_tables(fluxresults)
flux_data_table_name = "{0}.fluxscale_data.csv".format(myvis)
if os.path.exists(flux_data_table_name):
os.remove(flux_data_table_name)
flux_data_table.write(flux_data_table_name, overwrite=True)
flux_fits_table_name = "{0}.fluxscale_fits.csv".format(myvis)
if os.path.exists(flux_fits_table_name):
os.remove(flux_fits_table_name)
flux_fit_table.write(flux_fits_table_name, overwrite=True)
# Make summary plots:
plot_flux_fits(flux_fit_table, flux_data_table)
# Set the model column based on the fits:
for ii, this_field in enumerate(flux_fit_table['field']):
this_index = [flux_fit_table['a1'][ii]]
if 'a2' in flux_fit_table.colnames:
this_index.append(flux_fit_table['a2'][ii])
# NOTE: only sets the Stokes I for now.
setjy(vis=myvis, field=this_field,
spw='',
fluxdensity=[10**float(flux_fit_table['a0'][ii]), 0, 0, 0],
spix=this_index,
reffreq=f"{float(flux_fit_table['fitRefFreq'][ii])/1.e9}GHz",
scalebychan=True,
standard='manual',
usescratch=False)
# Now rederive the amp gains set by the model column,.
gain_amp_scan_final_table = '{0}.amp_final.gcal'.format(myvis)
if os.path.exists(gain_amp_scan_final_table):
os.system(f'rm -rf {gain_amp_scan_final_table}')
gaincal(vis=myvis,caltable=gain_amp_scan_final_table,
field=calfields,spw=calchans,refant=refant,
combine='',
spwmap=[[],[0,0,0,0,0,0,6,6,6,6,6,6], []],
calmode='a', solint='300s', gaintype='G',
minsnr=2.0, minblperant=3,
solnorm=False,
gaintable=[bandpass_table,
gain_phase_int_table] + priorcals)
plotms(vis=gain_amp_scan_final_table,xaxis='time',
yaxis='amp',
coloraxis='spw',iteraxis='antenna',ydatacolumn='data',
xconnector='line', timeconnector=True,
plotfile=f"{caltab_plot_path}/{gain_amp_scan_final_table}.png",
gridrows=3, gridcols=3,
yselfscale=True, showgui=False, overwrite=True, dpi=400)
plotms(vis=gain_amp_scan_table,
xaxis='freq', yaxis='amp',
coloraxis='spw',iteraxis='antenna', ydatacolumn='data',
xconnector='line', timeconnector=True,
plotfile=f"{caltab_plot_path}/{gain_amp_scan_final_table}_freq.png",
gridrows=3, gridcols=3,
yselfscale=True, showgui=False, overwrite=True, dpi=400)
####################################################################
# Applycal
####################################################################
flagmanager(vis=myvis,mode='save',versionname='beforeapplycal')
## BP cal apply:
# With perSB mapping for fluxscaling
# This WORKS and should be consistent with how MIR applies
# flux calibration
applycal(vis=myvis,field=bpcal,
spw=spwrange,
gaintable=[bandpass_table,
gain_phase_int_table,
gain_amp_scan_final_table] + priorcals,
interp=['linear,linearflag',
'linear,linear',
'nearest,linear'],
spwmap=[[], [0,0,0,0,0,0,6,6,6,6,6,6], []],
gainfield=[bpcal, bpcal, bpcal],
flagbackup=False,
calwt=False)
for pcal in bothpcal.split(","):
applycal(vis=myvis,field=pcal,
spw=spwrange,
gaintable=[bandpass_table,
gain_phase_int_table,
gain_amp_scan_final_table] + priorcals,
interp=['linear,linearflag',
'linearPD,linear',
'nearest,linear'],
spwmap=[[], [0,0,0,0,0,0,6,6,6,6,6,6], []],
gainfield=[bpcal, pcal, pcal],
flagbackup=False, calwt=False)
# If the flux overlaps with the other calibrations, no need to
# reapply the calibration.
if flux != bpcal and flux not in bothpcal:
# Flux calibration:
applycal(vis=myvis,field=flux,
spw=spwrange,
gaintable=[bandpass_table,
gain_phase_int_table,
gain_amp_scan_final_table] + priorcals,
interp=['nearest','nearestPD','nearest'],
spwmap=[[], [0,0,0,0,0,0,6,6,6,6,6,6], []],
gainfield=[bpcal, flux, flux],
flagbackup=False, calwt=False)
# Science calibration:
applycal(vis=myvis,field=science_fields,
spw=spwrange,
gaintable=[bandpass_table,
gain_phase_scan_table,
gain_amp_scan_final_table] + priorcals,
interp=['linear,linearflag',
'linearPD,linear',
'linear,linear'],
spwmap=[[], [0,0,0,0,0,0,6,6,6,6,6,6], []],
gainfield=[bpcal, bothpcal, bothpcal],
flagbackup=False, calwt=False,
applymode='calflagstrict')
flagmanager(vis=myvis,mode='save',versionname='afterapplycal')
# Export the calibrated science targets
target_vis = "{0}.target".format(myvis)
if os.path.exists(target_vis):
os.system("rm -r {}".format(target_vis))
split(vis=myvis, outputvis=target_vis,
field=science_fields, datacolumn='CORRECTED',
keepflags=False)
# import sys
# sys.exit(0)
# Export summary products:
timestring = this_date.strftime("%Y%m%d_%H%M")
products_folder = f"products_{timestring}"
if not os.path.exists(products_folder):
os.mkdir(products_folder)
this_logfile = casalog.logfile()
# Copy the current log file to the products folder:
os.system(f"cp {this_logfile} {products_folder}/casa_reduction.log")
# Copy the config file used for the pipeline run to the products folder:
os.system(f"cp {config_filename} {products_folder}/")
# If given, copy the flags file used to the products folder:
if manual_flag_file is not None:
os.system(f"cp {manual_flag_file} {products_folder}/manual_flags.txt")
# Copy the fluxscale vals, fits and plots:
os.system(f"cp {flux_data_table_name} {products_folder}/")
os.system(f"cp {flux_fits_table_name} {products_folder}/")
os.system(f"cp -r fluxfit_plots {products_folder}/")
# Copy THIS SCRIPT in to the products so it's clear what was run
# for the reduction
this_scriptname = sys.argv[-2]
os.system(f"cp {this_scriptname} {products_folder}/casa_reduction_script.py")
# Gather up calibration tables and final flag version to enable restoration:
# TODO: add this step here!
# Command line inputs.
sma_config = read_config(config_filename)
casalog.post(f"Making quicklook products for: {sma_config['myvis']}")
# --------------------------------
# Make quicklook images of targets
# --------------------------------
run_quicklook = True
# Run dirty imaging only for a quicklook
if run_quicklook:
# Dirty images per sideband per target.
quicklook_continuum_imaging(config_filename,
image_type='target',
niter=0, nsigma=5.,
output_folder="quicklook_imaging")
# Gain and bandpass cals. No imaging of the flux cal by default.
# It's helpful to clean for a few iterations on point source
# calibrators.
quicklook_continuum_imaging(config_filename,
image_type='calibrator',
niter=0, nsigma=5.,
output_folder="quicklook_calibrator_imaging")
os.system("mv {0} {1}".format('quicklook_imaging', products_folder))
os.system("mv {0} {1}".format('quicklook_calibrator_imaging', products_folder))
# ----------------------------
# Now make additional QA plots:
# -----------------------------
# Calibration table:
make_all_caltable_txt(config_filename)
# chans_to_show : int
# Number of channels to keep for visualizing in plots. Default is to average down
# to 128 per chunk/SPW. CHOOSING LARGER VALUES WILL RESULT IN LARGE DATA FILES!
chans_to_show = 128
this_config = read_config(config_filename)
# Calculate the number of channels from the given rechunk factor
chans_in_ms = 16384 / int(this_config['rechunk'])
chans_to_avg = chans_in_ms / chans_to_show
print(f"Averaging channels by {chans_to_avg} from {chans_in_ms} to {chans_to_show}")
casalog.post(f"Averaging channels by {chans_to_avg} from {chans_in_ms} to {chans_to_show}")
chans_to_avg = int(chans_to_avg)
# Per field outputs:
# Avg over all channels over time
# Avg over
make_qa_tables(config_filename,
output_folder='scan_plots_txt',
outtype='txt',
overwrite=False,
chanavg_vs_time=16384,
chanavg_vs_chan=chans_to_avg)
# make_all_flagsummary_data(myvis, output_folder='perfield_flagfraction_txt')
# Move these folders to the products folder.
os.system("mv {0} {1}".format('final_caltable_txt', products_folder))
os.system("mv {0} {1}".format('scan_plots_txt', products_folder))
# os.system("cp -r {0} {1}".format('perfield_flagfraction_txt', products_folder))
casalog.post("Finished! To create interactive figures, run QAPlotter in the products"
" directory.")
|
[
"[email protected]"
] | |
57a213ea3ecfa383451d374b3a1edfcc716243f0
|
98590747113ca3022c67c8bc6332b2bf48d7073e
|
/remove_element.py
|
3cf2b6fba59b3b62c77e92e65fe5b24b3c65aee4
|
[] |
no_license
|
buxizhizhoum/leetcode
|
a54291519a23fe82e9f9620e5a2266833696f005
|
cf4235170db3629b65790fd0855a8a72ac5886f7
|
refs/heads/master
| 2022-06-04T02:54:26.381077 | 2022-04-01T06:58:19 | 2022-04-01T06:58:19 | 116,791,542 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 995 |
py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Given an array and a value, remove all instances of that value in-place and return the new length.
Do not allocate extra space for another array, you must do this by modifying the input array in-place with O(1) extra memory.
The order of elements can be changed. It doesn't matter what you leave beyond the new length.
Example:
Given nums = [3,2,2,3], val = 3,
Your function should return length = 2, with the first two elements of nums being 2.
"""
class Solution(object):
def removeElement(self, nums, val):
"""
:type nums: List[int]
:type val: int
:rtype: int
"""
# delete element from the end of the list
for index in range((len(nums) - 1), -1, -1):
if nums[index] == val:
del nums[index]
return len(nums)
if __name__ == "__main__":
# test_list = range(10)
test_list = [3, 2, 2, 3]
print Solution().removeElement(test_list, 3)
|
[
"[email protected]"
] | |
94cc1dad31232e4c2af4388ced4955c649c7b260
|
1287ad54942fd2020a217ab12004a541abb62558
|
/pythonexercicios/Ex108/moeda.py
|
0a519a376cc21a1e962759b43bc9ca7004e980db
|
[] |
no_license
|
LuPessoa/exerciciospy-
|
637f24581722e547a62380973ca645b55ff65d90
|
b5faad818f978bb13a65922edceb17888b73a407
|
refs/heads/master
| 2023-05-12T04:16:39.847184 | 2021-06-04T03:02:24 | 2021-06-04T03:02:24 | 374,410,212 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 375 |
py
|
def aumentar(preço=0, taxa=0):
res = preço + (preço * taxa/100)
return res
def diminuir(preço=0, taxa=0):
res = preço - (preço * taxa/100)
return res
def dobro(preço=0):
res = preço * 2
return res
def metade(preço=0):
res = preço / 2
return res
def moeda(preço=0, moeda='R$'):
return f'{moeda}{preço:>.2f}'.replace('.',',')
|
[
"[email protected]"
] | |
de1e1e5ebf9637f899dd9c345654941f004e9640
|
acb8e84e3b9c987fcab341f799f41d5a5ec4d587
|
/langs/1/chj.py
|
1307c45a41d4d9cbfc10b6b63594a73019e25098
|
[] |
no_license
|
G4te-Keep3r/HowdyHackers
|
46bfad63eafe5ac515da363e1c75fa6f4b9bca32
|
fb6d391aaecb60ab5c4650d4ae2ddd599fd85db2
|
refs/heads/master
| 2020-08-01T12:08:10.782018 | 2016-11-13T20:45:50 | 2016-11-13T20:45:50 | 73,624,224 | 0 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 486 |
py
|
import sys
def printFunction(lineRemaining):
if lineRemaining[0] == '"' and lineRemaining[-1] == '"':
if len(lineRemaining) > 2:
#data to print
lineRemaining = lineRemaining[1:-1]
print ' '.join(lineRemaining)
else:
print
def main(fileName):
with open(fileName) as f:
for line in f:
data = line.split()
if data[0] == 'cHJ':
printFunction(data[1:])
else:
print 'ERROR'
return
if __name__ == '__main__':
main(sys.argv[1])
|
[
"[email protected]"
] | |
000e48d54904ebe297662105a424f17f9f536e69
|
b5bb27d90690582783426baaae5bcb4735eaa8bf
|
/tests/rule_engine/normalisation_rule.py
|
18ed5525b29123c7fca2111848598df8fe9eccf8
|
[] |
no_license
|
leondz/ternip
|
ef94bfbdc1824a3cfabc3e800f8de55bc5ec6140
|
6a24c6419a29352dff1dd1b2ede2105dec3274d2
|
refs/heads/master
| 2021-01-17T05:04:40.382565 | 2011-06-09T22:04:12 | 2011-06-09T22:04:12 | 1,875,699 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 13,539 |
py
|
#!/usr/bin/env python
import unittest
from ternip import timex
from ternip.rule_engine import normalisation_rule
class normalisation_rule_Test(unittest.TestCase):
def testApplyValue(self):
rule = normalisation_rule(r'<(\d+)~.+><th~.+><January~.+><(\d{4})~.+>', 'date', 'testApplyValue', r'{#2} + "01" + {#1}')
t = timex(type='date')
self.assertTrue(rule.apply(t, '', '', [('06', 'POS', set([t])), ('th', 'POS', set([t])), ('January', 'POS', set([t])), ('1996', 'POS', set([t]))], [], [])[0])
self.assertEquals(t.value, '19960106')
def testApplyChangeType(self):
rule = normalisation_rule(r'<(\d+)~.+><th~.+><January~.+><(\d{4})~.+>', 'date', 'testApplyChangeType', change_type=r'"non-date"')
t = timex(type='date')
self.assertTrue(rule.apply(t, '', '', [('06', 'POS', set([t])), ('th', 'POS', set([t])), ('January', 'POS', set([t])), ('1996', 'POS', set([t]))], [], [])[0])
self.assertEquals(t.type, 'non-date')
def testApplyFreq(self):
rule = normalisation_rule(r'<(\d+)~.+><th~.+><January~.+><(\d{4})~.+>', 'date', 'testApplyFreq', freq=r'"1D"')
t = timex(type='date')
self.assertTrue(rule.apply(t, '', '', [('06', 'POS', set([t])), ('th', 'POS', set([t])), ('January', 'POS', set([t])), ('1996', 'POS', set([t]))], [], [])[0])
self.assertEquals(t.freq, '1D')
def testApplyQuant(self):
rule = normalisation_rule(r'<(\d+)~.+><th~.+><January~.+><(\d{4})~.+>', 'date', 'testApplyQuant', quant=r'"EVERY"')
t = timex(type='date')
self.assertTrue(rule.apply(t, '', '', [('06', 'POS', set([t])), ('th', 'POS', set([t])), ('January', 'POS', set([t])), ('1996', 'POS', set([t]))], [], [])[0])
self.assertEquals(t.quant, 'EVERY')
def testApplyInsensitive(self):
rule = normalisation_rule(r'<(\d+)~.+><th~.+><january~.+><(\d{4})~.+>', 'date', 'testApplyInsensitive', r'{#2} + "01" + {#1}')
t = timex(type='date')
self.assertTrue(rule.apply(t, '', '', [('06', 'POS', set([t])), ('th', 'POS', set([t])), ('January', 'POS', set([t])), ('1996', 'POS', set([t]))], [], [])[0])
self.assertEquals(t.value, '19960106')
def testNoApply(self):
rule = normalisation_rule(r'<(\d+)~.+><th~.+><February~.+><(\d{4})~.+>', 'date', 'testNoApply', r'{#2} + "01" + {#1}')
t = timex(type='date')
self.assertFalse(rule.apply(t, '', '', [('06', 'POS', set([t])), ('th', 'POS', set([t])), ('January', 'POS', set([t])), ('1996', 'POS', set([t]))], [], [])[0])
self.assertEquals(t.value, None)
def testApplyCorrectType(self):
rule = normalisation_rule(r'<(\d+)~.+><th~.+><January~.+><(\d{4})~.+>', 'date', 'testApplyCorrectType', r'{#2} + "01" + {#1}')
t = timex(type='time')
self.assertFalse(rule.apply(t, '', '', [('06', 'POS', set([t])), ('th', 'POS', set([t])), ('January', 'POS', set([t])), ('1996', 'POS', set([t]))], [], [])[0])
def testPosGuardAllows(self):
rule = normalisation_rule(r'<(\d+)~.+><th~.+><January~.+><(\d{4})~.+>', 'date', 'testPosGuardAllows', r'{#2} + "01" + {#1}',
guards = [r'<th~.+><January~.+>'])
t = timex(type='date')
(before, body, after) = (
[('We', 'POS', set()),
('took', 'POS', set()),
('a', 'POS', set()),
('plane', 'POS', set()),
('on', 'POS', set()),
('the', 'POS', set())],
[('06', 'POS', set()),
('th', 'POS', set()),
('January', 'POS', set()),
('1996', 'POS', set())],
[('to', 'POS', set()),
('Atlanta', 'POS', set())]
)
self.assertTrue(rule.apply(t, '', '', body, before, after)[0])
self.assertEquals(t.value, '19960106')
def testPosGuardBlocks(self):
rule = normalisation_rule(r'<(\d+)~.+><th~.+><January~.+><(\d{4})~.+>', 'date', 'testPosGuardBlocks', r'{#2} + "01" + {#1}',
guards = [r'<th~.+><February~.+>'])
t = timex(type='date')
(before, body, after) = (
[('We', 'POS', set()),
('took', 'POS', set()),
('a', 'POS', set()),
('plane', 'POS', set()),
('on', 'POS', set()),
('the', 'POS', set())],
[('06', 'POS', set()),
('th', 'POS', set()),
('January', 'POS', set()),
('1996', 'POS', set())],
[('to', 'POS', set()),
('Atlanta', 'POS', set())]
)
self.assertFalse(rule.apply(t, '', '', body, before, after)[0])
def testNegGuardAllows(self):
rule = normalisation_rule(r'<(\d+)~.+><th~.+><January~.+><(\d{4})~.+>', 'date', 'testNegGuardAllows', r'{#2} + "01" + {#1}',
guards = [r'!<th~.+><February~.+>'])
t = timex(type='date')
(before, body, after) = (
[('We', 'POS', set()),
('took', 'POS', set()),
('a', 'POS', set()),
('plane', 'POS', set()),
('on', 'POS', set()),
('the', 'POS', set())],
[('06', 'POS', set()),
('th', 'POS', set()),
('January', 'POS', set()),
('1996', 'POS', set())],
[('to', 'POS', set()),
('Atlanta', 'POS', set())]
)
self.assertTrue(rule.apply(t, '', '', body, before, after)[0])
self.assertEquals(t.value, '19960106')
def testNegGuardBlocks(self):
rule = normalisation_rule(r'<(\d+)~.+><th~.+><January~.+><(\d{4})~.+>', 'date', 'testNegGuardBlocks', r'{#2} + "01" + {#1}',
guards = [r'!<th~.+><January~.+>'])
t = timex(type='date')
(before, body, after) = (
[('We', 'POS', set()),
('took', 'POS', set()),
('a', 'POS', set()),
('plane', 'POS', set()),
('on', 'POS', set()),
('the', 'POS', set())],
[('06', 'POS', set()),
('th', 'POS', set()),
('January', 'POS', set()),
('1996', 'POS', set())],
[('to', 'POS', set()),
('Atlanta', 'POS', set())]
)
self.assertFalse(rule.apply(t, '', '', body, before, after)[0])
def testPosBeforeAllows(self):
rule = normalisation_rule(r'<(\d+)~.+><th~.+><January~.+><(\d{4})~.+>', 'date', 'testPosBeforeAllows', r'{#2} + "01" + {#1}',
before_guards = [r'<on~.+><the~.+>$'])
t = timex(type='date')
(before, body, after) = (
[('We', 'POS', set()),
('took', 'POS', set()),
('a', 'POS', set()),
('plane', 'POS', set()),
('on', 'POS', set()),
('the', 'POS', set())],
[('06', 'POS', set()),
('th', 'POS', set()),
('January', 'POS', set()),
('1996', 'POS', set())],
[('to', 'POS', set()),
('Atlanta', 'POS', set())]
)
self.assertTrue(rule.apply(t, '', '', body, before, after)[0])
self.assertEquals(t.value, '19960106')
def testPosBeforeBlocks(self):
rule = normalisation_rule(r'<(\d+)~.+><th~.+><January~.+><(\d{4})~.+>', 'date', 'testPosBeforeBlocks', r'{#2} + "01" + {#1}',
before_guards = [r'<to~.+>'])
t = timex(type='date')
(before, body, after) = (
[('We', 'POS', set()),
('took', 'POS', set()),
('a', 'POS', set()),
('plane', 'POS', set()),
('on', 'POS', set()),
('the', 'POS', set())],
[('06', 'POS', set()),
('th', 'POS', set()),
('January', 'POS', set()),
('1996', 'POS', set())],
[('to', 'POS', set()),
('Atlanta', 'POS', set())]
)
self.assertFalse(rule.apply(t, '', '', body, before, after)[0])
def testNegBeforeAllows(self):
rule = normalisation_rule(r'<(\d+)~.+><th~.+><January~.+><(\d{4})~.+>', 'date', 'testNegBeforeAllows', r'{#2} + "01" + {#1}',
before_guards = [r'!<to~.+><Atlanta~.+>'])
t = timex(type='date')
(before, body, after) = (
[('We', 'POS', set()),
('took', 'POS', set()),
('a', 'POS', set()),
('plane', 'POS', set()),
('on', 'POS', set()),
('the', 'POS', set())],
[('06', 'POS', set()),
('th', 'POS', set()),
('January', 'POS', set()),
('1996', 'POS', set())],
[('to', 'POS', set()),
('Atlanta', 'POS', set())]
)
self.assertTrue(rule.apply(t, '', '', body, before, after)[0])
self.assertEquals(t.value, '19960106')
def testNegBeforeBlocks(self):
rule = normalisation_rule(r'<(\d+)~.+><th~.+><January~.+><(\d{4})~.+>', 'date', 'testNegBeforeBlocks', r'{#2} + "01" + {#1}',
before_guards = [r'!<a~.+><plane~.+>'])
t = timex(type='date')
(before, body, after) = (
[('We', 'POS', set()),
('took', 'POS', set()),
('a', 'POS', set()),
('plane', 'POS', set()),
('on', 'POS', set()),
('the', 'POS', set())],
[('06', 'POS', set()),
('th', 'POS', set()),
('January', 'POS', set()),
('1996', 'POS', set())],
[('to', 'POS', set()),
('Atlanta', 'POS', set())]
)
self.assertFalse(rule.apply(t, '', '', body, before, after)[0])
def testPosAfterAllows(self):
rule = normalisation_rule(r'<(\d+)~.+><th~.+><January~.+><(\d{4})~.+>', 'date', 'testPosAfterAllows', r'{#2} + "01" + {#1}',
after_guards = [r'<to~.+><Atlanta~.+>'])
t = timex(type='date')
(before, body, after) = (
[('We', 'POS', set()),
('took', 'POS', set()),
('a', 'POS', set()),
('plane', 'POS', set()),
('on', 'POS', set()),
('the', 'POS', set())],
[('06', 'POS', set()),
('th', 'POS', set()),
('January', 'POS', set()),
('1996', 'POS', set())],
[('to', 'POS', set()),
('Atlanta', 'POS', set())]
)
self.assertTrue(rule.apply(t, '', '', body, before, after)[0])
self.assertEquals(t.value, '19960106')
def testPosAfterBlocks(self):
rule = normalisation_rule(r'<(\d+)~.+><th~.+><January~.+><(\d{4})~.+>', 'date', 'testPosAfterBlocks', r'{#2} + "01" + {#1}',
after_guards = [r'<a~.+><plane~.+>'])
t = timex(type='date')
(before, body, after) = (
[('We', 'POS', set()),
('took', 'POS', set()),
('a', 'POS', set()),
('plane', 'POS', set()),
('on', 'POS', set()),
('the', 'POS', set())],
[('06', 'POS', set()),
('th', 'POS', set()),
('January', 'POS', set()),
('1996', 'POS', set())],
[('to', 'POS', set()),
('Atlanta', 'POS', set())]
)
self.assertFalse(rule.apply(t, '', '', body, before, after)[0])
def testNegAfterAllows(self):
rule = normalisation_rule(r'<(\d+)~.+><th~.+><January~.+><(\d{4})~.+>', 'date', 'testNegAfterAllows', r'{#2} + "01" + {#1}',
after_guards = [r'!<a~.+><plane~.+>'])
t = timex(type='date')
(before, body, after) = (
[('We', 'POS', set()),
('took', 'POS', set()),
('a', 'POS', set()),
('plane', 'POS', set()),
('on', 'POS', set()),
('the', 'POS', set())],
[('06', 'POS', set()),
('th', 'POS', set()),
('January', 'POS', set()),
('1996', 'POS', set())],
[('to', 'POS', set()),
('Atlanta', 'POS', set())]
)
self.assertTrue(rule.apply(t, '', '', body, before, after)[0])
self.assertEquals(t.value, '19960106')
def testNegAfterBlocks(self):
rule = normalisation_rule(r'<(\d+)~.+><th~.+><January~.+><(\d{4})~.+>', 'date', 'testNegAfterBlocks', r'{#2} + "01" + {#1}',
after_guards = [r'!<to~.+><Atlanta~.+>'])
t = timex(type='date')
(before, body, after) = (
[('We', 'POS', set()),
('took', 'POS', set()),
('a', 'POS', set()),
('plane', 'POS', set()),
('on', 'POS', set()),
('the', 'POS', set())],
[('06', 'POS', set()),
('th', 'POS', set()),
('January', 'POS', set()),
('1996', 'POS', set())],
[('to', 'POS', set()),
('Atlanta', 'POS', set())]
)
self.assertFalse(rule.apply(t, '', '', body, before, after)[0])
|
[
"[email protected]"
] | |
d677171f409b4bf6deeacc7549f13d7a184b153e
|
9c462effa9f3fa5b5680b1a969198dfea8bb6702
|
/code_py/waveLambda/kinwave.py
|
7eb85d26422fc92a119a4ebeeb7d28799821c08e
|
[] |
no_license
|
nvladimi/nliwaves
|
51035e56f09c78bd61b9c356f6ba6875ce03e55c
|
81969b96a60d9a5f160af489172547ceff2578cd
|
refs/heads/master
| 2023-05-02T07:39:42.470850 | 2023-04-22T04:07:10 | 2023-04-22T04:07:10 | 219,194,156 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 19,049 |
py
|
# this is a comment
import numpy as np
eps=1e-12
#-- define functions --
def kfun(p, q, r):
k = p + q - r
return(k)
def Qfun(k,p,q):
#print("Qfun arguments:", k, p, q)
pp = sum(p*p)
qq = sum(q*q)
kk = sum(k*k)
kp = sum(p*k)
ss = kk * pp
if ss < eps:
return(0)
Q = (qq/ss)**(1/8) * (np.sqrt(ss) + kp)
return(Q)
def Ufun(k,p,q):
U = - Qfun(-k,p,q) - Qfun(-k,q,p) + Qfun(p,q,-k)
return(U)
def U1fun(k,p,q):
U1 = Qfun(k,p,q) + Qfun(k,q,p) + Qfun(p,q,k)
return(U1)
def Afun(k,p,q):
U = Ufun(k,p,q)
#print("Afun arguments, U:", k, p, q, U)
if np.abs(U) < eps:
return(0)
denom = (sum(k*k))**(1/4) - (sum(p*p))**(1/4) - (sum(q*q))**(1/4)
A = - U / denom
return(A)
def A1fun(k,p,q):
U1 = Ufun(k,p,q)
#print("A1fun arguments, U1:", k, p, q, U1)
if np.abs(U1) < eps:
return(0)
denom = (sum(k*k))**(1/4) + (sum(p*p))**(1/4) + (sum(q*q))**(1/4)
A1 = - U1 / denom
return(A1)
def Zfun(k0,k1,k2,k3):
Z = U1fun( k0, k1, -k0-k1 ) * A1fun( k2, k3, -k2-k3 ) \
+ Ufun( k0+k1, k0, k1 ) * Afun( k2+k3, k2, k3 ) \
- Ufun( k0, k2, k0-k2 ) * Afun( k3, k1, k3-k1 ) \
- Ufun( k2, k0, k2-k0 ) * Afun( k1, k3, k1-k3 ) \
- Ufun( k0, k3, k0-k3 ) * Afun( k2, k1, k2-k1 ) \
- Ufun( k3, k0, k3-k0 ) * Afun( k1, k2, k1-k2 )
return(Z)
def Vfun(k0,k1,k2,k3):
kk0 = sum(k0**2)
kk1 = sum(k1**2)
if kk0<eps:
return(0)
if kk1<eps:
return(0)
kk2 = sum(k2**2)
kk3 = sum(k3**2)
k02 = np.sqrt( sum((k0+k2)**2) )
k12 = np.sqrt( sum((k1+k2)**2) )
k03 = np.sqrt( sum((k0+k3)**2) )
k13 = np.sqrt( sum((k1+k3)**2) )
v = 2 * kk0 * np.sqrt(kk1) + 2 * kk1 * np.sqrt(kk0) \
- np.sqrt(kk0*kk1) * (k02 + k12 + k03 +k13)
V = (kk2 * kk3 / kk0 / kk1)**(1/8) * v
return(V)
def Wfun(p,q,k,r):
W = Vfun(-p,-q,k,r) + Vfun(k,r,-p,-q) - Vfun(-p,k,-q,r) \
- Vfun(-q,k,-p,r) - Vfun(-p,r,-q,k) - Vfun(-q,r,-p,k)
return(W)
def Lfun(p,q,k,r):
#print("Lfun arguments:", p, q, k, r)
L = Wfun(p,q,k,r) + \
0.25*( Zfun(p,q,k,r) + Zfun(q,p,k,r) + Zfun(k,r,p,q) + Zfun(r,k,p,q) )
return(L)
#------------------------------------------------------------------------------
def Nfun(r,q,p,p1):
#print(r, q, p, p1)
k = p + q - r
q1 = p + q - p1
pp = sum(p*p)
qq = sum(q*q)
rr = sum(r*r)
kk = sum(k*k)
denom = pp**(0.25) + qq**(0.25) - rr**(0.25) - kk**(0.25)
if np.abs(denom) < eps:
return(0)
L1 = Lfun(p,q,k,r)
if np.abs(L1) < eps:
return(0)
L2 = Lfun(k,r,p1,q1)
if np.abs(L2) < eps:
return(0)
L3 = Lfun(p,q,p1,q1)
if np.abs(L3) < eps:
return(0)
#print("L1, L2, L3 = ", L1, L2, L3)
c = ( rr**(-0.125) + kk**(-0.125) ) / denom
#print("rr, kk, denom, c = ", rr, kk, denom, c)
N = c * L1 * L2 / L3
#print("N = ", N)
return(N)
#------------------------------------------------------------------------------
def NLfun(r,q,p,p1):
#print(r, q, p, p1)
k = p + q - r
q1 = p + q - p1
L1 = Lfun(p,q,k,r)
if np.abs(L1) < eps:
return(0)
L2 = Lfun(k,r,p1,q1)
if np.abs(L2) < eps:
return(0)
L3 = Lfun(p,q,p1,q1)
if np.abs(L3) < eps:
return(0)
NL = L1 * L2 / L3
return(NL)
#------------------------------------------------------------------------------
def NFfun(r,q,p,p1):
#print(r, q, p, p1)
k = p + q - r
pp = sum(p*p)
qq = sum(q*q)
rr = sum(r*r)
kk = sum(k*k)
if np.abs(rr) < eps:
return(0)
if np.abs(kk) < eps:
return(0)
denom = pp**(0.25) + qq**(0.25) - rr**(0.25) - kk**(0.25)
if np.abs(denom) < eps:
return(0)
NF = ( rr**(-0.125) + kk**(-0.125) ) / denom
return(NF)
#------------------------------------------------------------------------------
def denomFfun(r,q,p,p1):
#print(r, q, p, p1)
k = p + q - r
pp = sum(p*p)
qq = sum(q*q)
rr = sum(r*r)
kk = sum(k*k)
denom = pp**(0.25) + qq**(0.25) - rr**(0.25) - kk**(0.25)
return(denom)
#------------------------------------------------------------------------------
def L4Dfun(m, mr, fname):
L4D = np.zeros((2*m+1,2*m+1,2*m+1,2*m+1))
r = np.array((mr, 0))
for q0 in np.arange(-m,m+1):
for q1 in np.arange(-m,m+1):
q = np.array((q0, q1))
for p0 in np.arange(-m,m+1):
for p1 in np.arange(-m,m+1):
p = np.array((p0, p1))
k = p + q - r
L4D[q0+m,q1+m,p0+m,p1+m] = Lfun(p,q,k,r)
L4D = L4D/mr**3
with open(fname, 'wb') as f:
np.save(f, np.array([m, mr]))
np.save(f, L4D)
#------------------------------------------------------------------------------
def L2Dfun(m, dq, pp, fname):
r = np.array((1, 0))
n = len(pp)
L2D = np.zeros((n,2*m+1,2*m+1))
for i in np.arange(n):
p = pp[i]
for q0 in np.arange(-m,m+1):
for q1 in np.arange(-m,m+1):
q = np.array((q0, q1))*dq
k = p + q - r
L2D[i,q0+m,q1+m] = Lfun(p,q,k,r)
with open(fname, 'wb') as f:
np.save(f, np.array([m, dq]))
np.save(f, pp)
np.save(f, L2D)
#------------------------------------------------------------------------------
def Nsum(p,p1, q, mr, dr, arrout = False):
NF = np.zeros((2*mr+1, 2*mr+1))
NL = np.zeros((2*mr+1, 2*mr+1))
Nr = np.zeros((2*mr+1, 2*mr+1))
for i0 in np.arange(-mr,mr+1):
for i1 in np.arange(-mr,mr+1):
r = np.array((i0,i1))*dr
#Nr[mr+i0,mr+i1] = Nfun(r,q,p,p1)
NL[mr+i0,mr+i1] = NLfun(r,q,p,p1)
NF[mr+i0,mr+i1] = NFfun(r,q,p,p1)
S = np.sum(NL*NF)*dr*dr
if arrout:
return(S, NF, NL)
else:
return(S)
#------------------------------------------------------------------------------
def N1sum(p,p1, q, mr, dr, arrout = False):
NF = np.zeros((2*mr+1, 2*mr+1))
for i0 in np.arange(-mr,mr+1):
for i1 in np.arange(-mr,mr+1):
r = np.array((i0,i1))*dr
NF[mr+i0,mr+i1] = NFfun(r,q,p,p1)
S = np.sum(NF)*dr*dr
if arrout:
return(S, NF)
else:
return(S)
#------------------------------------------------------------------------------
def integratePV(p, p1, q, mr, dr, arrout):
debug = True
drmin = dr/32
#-- compute denominator field --
D = np.zeros((2*mr+1, 2*mr+1))
for i0 in np.arange(-mr,mr+1):
for i1 in np.arange(-mr,mr+1):
r = np.array((i0,i1))*dr
D[mr+i0,mr+i1] = denomFfun(r,q,p,p1)
#-- find and classify cells where denominator changes sign --
type1 = set({})
type2 = set({})
type3 = set({})
type4 = set({})
type5 = set({})
type6 = set({})
signD = np.sign(D)
change0 = signD[:-1, :] * signD[1:, :]
change1 = signD[:, :-1] * signD[:, 1:]
ind0 = np.argwhere(change0 < 0)
ind1 = np.argwhere(change1 < 0)
for i in ind0:
for j in ind1:
if (i == j).all():
type6.add(tuple(i))
if (i == j-[1,0]).all():
type5.add(tuple(i))
if (i-[0,1] == j).all():
type3.add(tuple(j))
if (i+[1,-1] == j).all():
type4.add(tuple(i-[0,1]))
for j in ind0:
if (i+[0,1] == j).all():
type1.add(tuple(i))
for i in ind1:
for j in ind1:
if (i+[1,0] == j).all():
type2.add(tuple(i))
if 1==2:
#for i in ind0:
# print(i, D[i[0],i[1]], D[i[0]+1, i[1] ])
#print(type(ind1))
#for i in ind1:
# print(i, D[i[0],i[1]], D[i[0], i[1]+1 ])
#
#print("number of crossings = ", len(ind0) + len(ind1))
def print_corners(i):
print("[{:3.0f}, {:3.0f}] {:3.0f} {:3.0f} {:3.0f} {:3.0f}".format(i[0], i[1],
signD[i]*signD[i], signD[i[0],i[1]+1]*signD[i],
signD[i[0]+1, i[1]+1]*signD[i], signD[i[0]+1,i[1]]*signD[i] ) )
print("number type1 cells = ", len(type1) )
for i in type1:
print_corners(i)
print("number type2 cells = ", len(type2) )
for i in type2:
print_corners(i)
print("number type3 cells = ", len(type3) )
for i in type3:
print_corners(i)
print("number type4 cells = ", len(type4) )
for i in type4:
print_corners(i)
print("number type5 cells = ", len(type5) )
for i in type5:
print_corners(i)
print("number type6 cells = ", len(type6) )
for i in type6:
print_corners(i)
print("number of crossings = ", len(ind0) + len(ind1))
print("number of cells = ",
len(type1) + len(type2) + len(type3) +len(type4) + len(type5) + len(type6) )
#-- find adjustments to sum in each cells with interface --
def evalFun(x,y):
r = np.array((x,y))
# v = NFfun(r,q,p,p1)
v = 1/denomFfun(r,q,p,p1)
# v = 5*x - 4*y + 3
# v=1
#v = evalDenom(x,y)
return(v)
def evalDenom(x,y):
r = np.array((x,y))
v = denomFfun(r,q,p,p1)
return(v)
def findrootX(x0,x1,y):
dx = x1 - x0
f0 = evalDenom(x0,y)
f1 = evalDenom(x1,y)
#print("findrootX: ", f0,f1)
while dx > drmin:
xm = (x0+x1)/2
fm = evalDenom(xm,y)
if (f0*fm) < 0:
x1 = xm
f1 = fm
else:
x0 = xm
f0 = fm
dx = dx/2
#print("findrootX: ", fm)
return(xm)
def findrootY(y0,y1,x):
dy = y1 - y0
f0 = evalDenom(x,y0)
f1 = evalDenom(x,y1)
#print("findrootY: ", f0,f1)
while dy > drmin:
ym = (y0+y1)/2
fm = evalDenom(x,ym)
if (f0*fm) < 0:
y1 = ym
f1 = fm
else:
y0 = ym
f0 = fm
dy = dy/2
#print("findrootY: ", fm)
return(ym)
def cellcoords(cell,dr):
return((cell[0]-mr)*dr, (cell[0]-mr+1)*dr, (cell[1]-mr)*dr, (cell[1]-mr+1)*dr)
def celleval(x0,x1,y0,y1):
a = evalFun(x0,y0)
b = evalFun(x0,y1)
c = evalFun(x1,y1)
d = evalFun(x1,y0)
return(a,b,c,d)
def offcut(x0,y0,x1,y1):
dx = np.abs(x0-x1)
dy = np.abs(y0-y1)
l = np.sqrt(dx*dx + dy*dy)
dx = dy/l * drmin*2
dy = dx/l * drmin*2
return(dx, dy)
def volume12():
v1 = a * dr * (A + F1 + D)
v2 = d * dr * (D + F1 + G1)
v3 = b * dr * (G2 + F2 + B)
v4 = c * dr * (B + C + G2)
dV = (v1 + v2 + v3 + v4)/6 - dr*dr*(A+B+C+D)/4
return(dV)
def volume36():
v1 = a * c * (A + F1 + G1)
v2 = b * dr * (B + C + F2)
v3 = d * dr * (D + C + G2)
v4 = (a*dr + c*dr - a*c) * (C + F2 + G2)
dV = (v1 + v2 + v3 + v4)/6 - dr*dr*(A+B+C+D)/4
return(dV)
for cell in type1:
x0, x1, y0, y1 = cellcoords(cell,dr)
X0 = findrootX(x0, x1, y0)
X1 = findrootX(x0, x1, y1)
dx, dy = offcut(X0,y0, X1,y1)
if X0<X1:
b = x1 - X0
a = X0 - x0
c = x1 - X1
d = X1 - x0
A, D, C, B = celleval(x0,x1,y0,y1)
G1 = evalFun(X1-dx, y1+dy)
G2 = evalFun(X1+dx, y1-dy)
F1 = evalFun(X0-dx, y0+dy)
F2 = evalFun(X0+dx, y0-dy)
dV = volume12()
#print( np.sign(np.array((A, D, F1, G1, C, B, F2, G2))*np.sign(A)).astype("int") )
#print(D, G1, G2, C)
else:
a = x1 - X0
b = X0 - x0
d = x1 - X1
c = X1 - x0
B, C, D, A = celleval(x0,x1,y0,y1)
G1 = evalFun(X1+dx, y1+dy)
G2 = evalFun(X1-dx, y1-dy)
F1 = evalFun(X0+dx, y0+dy)
F2 = evalFun(X0-dx, y0-dy)
dV = volume12()
#print( np.sign(np.array((A, D, F1, G1, C, B, F2, G2))*np.sign(A)).astype("int") )
#print(dV, dr*dr*(A+B+C+D)/4 )
for cell in type2:
x0, x1, y0, y1 = cellcoords(cell,dr)
Y0 = findrootY(y0, y1, x0)
Y1 = findrootY(y0, y1, x1)
dx, dy = offcut(x0,Y0, x1,Y1)
if Y0<Y1:
b = y1 - Y0
a = Y0 - y0
c = y1 - Y1
d = Y1 - y0
A, B, C, D = celleval(x0,x1,y0,y1)
F1 = evalFun(x0+dx, Y0-dy)
F2 = evalFun(x0-dx, Y0+dy)
G1 = evalFun(x1+dx, Y1-dy)
G2 = evalFun(x1-dx, Y1+dy)
dV = volume12()
print( np.sign(np.array((A, D, F1, G1, C, B, F2, G2))*np.sign(A)).astype("int") )
else:
c = y1 - Y0
d = Y0 - y0
b = y1 - Y1
a = Y1 - y0
D, C, B, A = celleval(x0,x1,y0,y1)
G1 = evalFun(x0-dx, Y0-dy)
G2 = evalFun(x0+dx, Y0+dy)
F1 = evalFun(x1-dx, Y1-dy)
F2 = evalFun(x1+dx, Y1+dy)
dV = volume12()
#print( np.sign(np.array((A, D, F1, G1, C, B, F2, G2))*np.sign(A)).astype("int") )
#print(dV, dr*dr*(A+B+C+D)/4 )
for cell in type3:
x0, x1, y0, y1 = cellcoords(cell,dr)
Y0 = findrootY(y0, y1, x0)
X1 = findrootX(x0, x1, y1)
dx, dy = offcut(x0,Y0, X1,y1)
b = x1 - X1
a = X1 - x0
c = y1 - Y0
d = Y0 - y0
D, A, B, C = celleval(x0,x1,y0,y1)
F1 = evalFun(X1-dx, y1+dy)
F2 = evalFun(X1+dx, y1-dy)
G1 = evalFun(x0-dx, Y0+dy)
G2 = evalFun(x0+dx, Y0-dy)
dV = volume36()
#print(dV, dr*dr*(A+B+C+D)/4 )
for cell in type4:
x0, x1, y0, y1 = cellcoords(cell,dr)
X1 = findrootX(x0, x1, y1)
Y1 = findrootY(y0, y1, x1)
dx, dy = offcut(X1,y1, x1,Y1)
c = x1 - X1
d = X1 - x0
a = y1 - Y1
b = Y1 - y0
C, D, A, B = celleval(x0,x1,y0,y1)
F1 = evalFun(x1+dx, Y1+dy)
F2 = evalFun(x1-dx, Y1-dy)
G1 = evalFun(X1+dx, y1+dy)
G2 = evalFun(X1-dx, y1-dy)
dV = volume36()
#print(dV, dr*dr*(A+B+C+D)/4 )
for cell in type5:
x0, x1, y0, y1 = cellcoords(cell,dr)
X0 = findrootX(x0, x1, y0)
Y1 = findrootY(y0, y1, x1)
dx, dy = offcut(X0,y0, x1,Y1)
a = x1 - X0
b = X0 - x0
d = y1 - Y1
c = Y1 - y0
B, C, D, A = celleval(x0,x1,y0,y1)
F1 = evalFun(X0+dx, y0-dy)
F2 = evalFun(X0-dx, y0+dy)
G1 = evalFun(x1+dx, Y1-dy)
G2 = evalFun(x1-dx, Y1+dy)
dV = volume36()
#print(dV, dr*dr*(A+B+C+D)/4 )
for cell in type6:
x0, x1, y0, y1 = cellcoords(cell,dr)
X0 = findrootX(x0, x1, y0)
Y0 = findrootY(y0, y1, x0)
dx, dy = offcut(x0,Y0, X0,y0)
d = x1 - X0
c = X0 - x0
b = y1 - Y0
a = Y0 - y0
A, B, C, D = celleval(x0,x1,y0,y1)
F1 = evalFun(x0-dx, Y0-dy)
F2 = evalFun(x0+dx, Y0+dy)
G1 = evalFun(X0-dx, y0-dy)
G2 = evalFun(X0+dx, y0+dy)
dV = volume36()
#print(dV, dr*dr*(A+B+C+D)/4 )
#S = np.sum(NF)*dr*dr
S = 0
return(S, signD)
#------------------------------------------------------------------------------
def N2Darr(p,p1, mq, dq, mr, dr):
Nq = np.zeros((2*mq+1, 2*mq+1))
for i0 in np.arange(-mq,mq+1):
for i1 in np.arange(-mq,mq+1):
q = np.array((i0,i1))*dq
Nq[mq+i0, mq+i1] = Nsum(p,p1,q, mr, dr)
return(Nq)
#------------------------------------------------------------------------------
#------------------------------------------------------------------------------
def write_N2D_pi2(nq, nr):
p = [1, 0]
p1 = [0, 1]
fname = "sumN_pi2_q" + str(nq).zfill(2) + "r" + str(nr).zfill(2)+ ".npy"
dq = 1/nq
dr = 1/nr
mq = nq*nq
mr = nr*nr
Nlambda = N2Darr(np.array(p), np.array(p1), mq, dq, mr, dr)
with open(fname, 'wb') as f:
np.save(f, p)
np.save(f, p1)
np.save(f, (mq, dq, mr, dr))
np.save(f, Nlambda)
def write_N2D_pi4(nq, nr):
p = [1, 0]
p1 = [1, 1]/np.sqrt(2)
fname = "sumN_pi4_q" + str(nq).zfill(2) + "r" + str(nr).zfill(2)+ ".npy"
dq = 1/nq
dr = 1/nr
mq = nq*nq
mr = nr*nr
Nlambda = N2Darr(np.array(p), np.array(p1), mq, dq, mr, dr)
with open(fname, 'wb') as f:
np.save(f, p)
np.save(f, p1)
np.save(f, (mq, dq, mr, dr))
np.save(f, Nlambda)
#------------------------------------------------------------------------------
def N2Dfld_pi2(q, mr, dr, fname):
p = [1, 0]
p1 = [1, 1]
S, NF, NL = Nsum(np.array(p), np.array(p1), np.array(q), mr, dr, arrout=True)
with open(fname, 'wb') as f:
np.save(f, p)
np.save(f, p1)
np.save(f, q)
np.save(f, (mr, dr, S))
np.save(f, NF)
np.save(f, NL)
def N2Dfld_pi4(q, mr, dr, fname):
p = [1, 0]
p1 = [1, 1]/np.sqrt(2)
S, NF, NL = Nsum(np.array(p), np.array(p1), np.array(q), mr, dr, arrout=True)
with open(fname, 'wb') as f:
np.save(f, p)
np.save(f, p1)
np.save(f, q)
np.save(f, (mr, dr, S))
np.save(f, NF)
np.save(f, NL)
def N1fld(q, mr, dr, fname):
p = [1, 0]
p1 = [0, 1]
S, NF = N1sum(np.array(p), np.array(p1), np.array(q), mr, dr, arrout=True)
with open(fname, 'wb') as f:
np.save(f, p)
np.save(f, p1)
np.save(f, q)
np.save(f, (mr, dr, S))
np.save(f, NF)
#------------------------------------------------------------------------------
def testNfld(q, mr, dr, fname):
p = [1, 0]
p1 = [0, 1]
S, NF = integratePV(np.array(p), np.array(p1), np.array(q), mr, dr, arrout=True)
with open(fname, 'wb') as f:
np.save(f, p)
np.save(f, p1)
np.save(f, q)
np.save(f, (mr, dr, S))
np.save(f, NF)
#------------------------------------------------------------------------------
|
[
"[email protected]"
] | |
2372ed755ddb8bc26b62dd243e35889f5f63cb2a
|
2a3743ced45bd79826dcdc55f304da049f627f1b
|
/venv/lib/python3.7/site-packages/jedi/third_party/typeshed/third_party/2and3/click/termui.pyi
|
95b685076433b2403becc6ca2d84a6d9f8a366c8
|
[
"MIT",
"Apache-2.0"
] |
permissive
|
Dimasik007/Deribit_funding_rate_indicator
|
12cc8cd7c0be564d6e34d9eae91940c62492ae2a
|
3251602ae5249069489834f9afb57b11ff37750e
|
refs/heads/master
| 2023-05-26T10:14:20.395939 | 2019-08-03T11:35:51 | 2019-08-03T11:35:51 | 198,705,946 | 5 | 3 |
MIT
| 2023-05-22T22:29:24 | 2019-07-24T20:32:19 |
Python
|
UTF-8
|
Python
| false | false | 3,356 |
pyi
|
from typing import (
Any,
Callable,
Generator,
Iterable,
IO,
List,
Optional,
Text,
overload,
Tuple,
TypeVar,
)
from click.core import _ConvertibleType
from click._termui_impl import ProgressBar as _ProgressBar
def hidden_prompt_func(prompt: str) -> str:
...
def _build_prompt(
text: str,
suffix: str,
show_default: bool = ...,
default: Optional[str] = ...,
) -> str:
...
def prompt(
text: str,
default: Optional[str] = ...,
hide_input: bool = ...,
confirmation_prompt: bool = ...,
type: Optional[_ConvertibleType] = ...,
value_proc: Optional[Callable[[Optional[str]], Any]] = ...,
prompt_suffix: str = ...,
show_default: bool = ...,
err: bool = ...,
show_choices: bool = ...,
) -> Any:
...
def confirm(
text: str,
default: bool = ...,
abort: bool = ...,
prompt_suffix: str = ...,
show_default: bool = ...,
err: bool = ...,
) -> bool:
...
def get_terminal_size() -> Tuple[int, int]:
...
def echo_via_pager(text: str, color: Optional[bool] = ...) -> None:
...
_T = TypeVar('_T')
@overload
def progressbar(
iterable: Iterable[_T],
length: Optional[int] = ...,
label: Optional[str] = ...,
show_eta: bool = ...,
show_percent: Optional[bool] = ...,
show_pos: bool = ...,
item_show_func: Optional[Callable[[_T], str]] = ...,
fill_char: str = ...,
empty_char: str = ...,
bar_template: str = ...,
info_sep: str = ...,
width: int = ...,
file: Optional[IO] = ...,
color: Optional[bool] = ...,
) -> _ProgressBar[_T]:
...
@overload
def progressbar(
iterable: None = ...,
length: Optional[int] = ...,
label: Optional[str] = ...,
show_eta: bool = ...,
show_percent: Optional[bool] = ...,
show_pos: bool = ...,
item_show_func: Optional[Callable[[_T], str]] = ...,
fill_char: str = ...,
empty_char: str = ...,
bar_template: str = ...,
info_sep: str = ...,
width: int = ...,
file: Optional[IO] = ...,
color: Optional[bool] = ...,
) -> _ProgressBar[int]:
...
def clear() -> None:
...
def style(
text: str,
fg: Optional[str] = ...,
bg: Optional[str] = ...,
bold: Optional[bool] = ...,
dim: Optional[bool] = ...,
underline: Optional[bool] = ...,
blink: Optional[bool] = ...,
reverse: Optional[bool] = ...,
reset: bool = ...,
) -> str:
...
def unstyle(text: str) -> str:
...
# Styling options copied from style() for nicer type checking.
def secho(
text: str,
file: Optional[IO] = ...,
nl: bool = ...,
err: bool = ...,
color: Optional[bool] = ...,
fg: Optional[str] = ...,
bg: Optional[str] = ...,
bold: Optional[bool] = ...,
dim: Optional[bool] = ...,
underline: Optional[bool] = ...,
blink: Optional[bool] = ...,
reverse: Optional[bool] = ...,
reset: bool = ...,
):
...
def edit(
text: Optional[str] = ...,
editor: Optional[str] = ...,
env: Optional[str] = ...,
require_save: bool = ...,
extension: str = ...,
filename: Optional[str] = ...,
) -> str:
...
def launch(url: str, wait: bool = ..., locate: bool = ...) -> int:
...
def getchar(echo: bool = ...) -> Text:
...
def pause(
info: str = ..., err: bool = ...
) -> None:
...
|
[
"[email protected]"
] | |
d7f559bd279f22f7496147ac75813dd0b5c527ec
|
21590487701d2dcbe1a1c1dd81c6e983f7523cb6
|
/opentelemetry-proto/src/opentelemetry/proto/common/v1/common_pb2.pyi
|
304feec5abb7924061a837d4a64891a288db52b4
|
[
"Apache-2.0"
] |
permissive
|
open-telemetry/opentelemetry-python
|
837199e541c03cff311cad075401791ee2a23583
|
d8490c5f557dd7005badeb800095cb51b553c98c
|
refs/heads/main
| 2023-08-26T06:47:23.837997 | 2023-08-17T22:35:13 | 2023-08-17T22:35:13 | 185,478,926 | 1,361 | 668 |
Apache-2.0
| 2023-09-14T20:48:40 | 2019-05-07T21:13:30 |
Python
|
UTF-8
|
Python
| false | false | 6,760 |
pyi
|
"""
@generated by mypy-protobuf. Do not edit manually!
isort:skip_file
"""
import builtins
import google.protobuf.descriptor
import google.protobuf.internal.containers
import google.protobuf.message
import typing
import typing_extensions
DESCRIPTOR: google.protobuf.descriptor.FileDescriptor = ...
class AnyValue(google.protobuf.message.Message):
"""AnyValue is used to represent any type of attribute value. AnyValue may contain a
primitive value such as a string or integer or it may contain an arbitrary nested
object containing arrays, key-value lists and primitives.
"""
DESCRIPTOR: google.protobuf.descriptor.Descriptor = ...
STRING_VALUE_FIELD_NUMBER: builtins.int
BOOL_VALUE_FIELD_NUMBER: builtins.int
INT_VALUE_FIELD_NUMBER: builtins.int
DOUBLE_VALUE_FIELD_NUMBER: builtins.int
ARRAY_VALUE_FIELD_NUMBER: builtins.int
KVLIST_VALUE_FIELD_NUMBER: builtins.int
BYTES_VALUE_FIELD_NUMBER: builtins.int
string_value: typing.Text = ...
bool_value: builtins.bool = ...
int_value: builtins.int = ...
double_value: builtins.float = ...
@property
def array_value(self) -> global___ArrayValue: ...
@property
def kvlist_value(self) -> global___KeyValueList: ...
bytes_value: builtins.bytes = ...
def __init__(self,
*,
string_value : typing.Text = ...,
bool_value : builtins.bool = ...,
int_value : builtins.int = ...,
double_value : builtins.float = ...,
array_value : typing.Optional[global___ArrayValue] = ...,
kvlist_value : typing.Optional[global___KeyValueList] = ...,
bytes_value : builtins.bytes = ...,
) -> None: ...
def HasField(self, field_name: typing_extensions.Literal["array_value",b"array_value","bool_value",b"bool_value","bytes_value",b"bytes_value","double_value",b"double_value","int_value",b"int_value","kvlist_value",b"kvlist_value","string_value",b"string_value","value",b"value"]) -> builtins.bool: ...
def ClearField(self, field_name: typing_extensions.Literal["array_value",b"array_value","bool_value",b"bool_value","bytes_value",b"bytes_value","double_value",b"double_value","int_value",b"int_value","kvlist_value",b"kvlist_value","string_value",b"string_value","value",b"value"]) -> None: ...
def WhichOneof(self, oneof_group: typing_extensions.Literal["value",b"value"]) -> typing.Optional[typing_extensions.Literal["string_value","bool_value","int_value","double_value","array_value","kvlist_value","bytes_value"]]: ...
global___AnyValue = AnyValue
class ArrayValue(google.protobuf.message.Message):
"""ArrayValue is a list of AnyValue messages. We need ArrayValue as a message
since oneof in AnyValue does not allow repeated fields.
"""
DESCRIPTOR: google.protobuf.descriptor.Descriptor = ...
VALUES_FIELD_NUMBER: builtins.int
@property
def values(self) -> google.protobuf.internal.containers.RepeatedCompositeFieldContainer[global___AnyValue]:
"""Array of values. The array may be empty (contain 0 elements)."""
pass
def __init__(self,
*,
values : typing.Optional[typing.Iterable[global___AnyValue]] = ...,
) -> None: ...
def ClearField(self, field_name: typing_extensions.Literal["values",b"values"]) -> None: ...
global___ArrayValue = ArrayValue
class KeyValueList(google.protobuf.message.Message):
"""KeyValueList is a list of KeyValue messages. We need KeyValueList as a message
since `oneof` in AnyValue does not allow repeated fields. Everywhere else where we need
a list of KeyValue messages (e.g. in Span) we use `repeated KeyValue` directly to
avoid unnecessary extra wrapping (which slows down the protocol). The 2 approaches
are semantically equivalent.
"""
DESCRIPTOR: google.protobuf.descriptor.Descriptor = ...
VALUES_FIELD_NUMBER: builtins.int
@property
def values(self) -> google.protobuf.internal.containers.RepeatedCompositeFieldContainer[global___KeyValue]:
"""A collection of key/value pairs of key-value pairs. The list may be empty (may
contain 0 elements).
The keys MUST be unique (it is not allowed to have more than one
value with the same key).
"""
pass
def __init__(self,
*,
values : typing.Optional[typing.Iterable[global___KeyValue]] = ...,
) -> None: ...
def ClearField(self, field_name: typing_extensions.Literal["values",b"values"]) -> None: ...
global___KeyValueList = KeyValueList
class KeyValue(google.protobuf.message.Message):
"""KeyValue is a key-value pair that is used to store Span attributes, Link
attributes, etc.
"""
DESCRIPTOR: google.protobuf.descriptor.Descriptor = ...
KEY_FIELD_NUMBER: builtins.int
VALUE_FIELD_NUMBER: builtins.int
key: typing.Text = ...
@property
def value(self) -> global___AnyValue: ...
def __init__(self,
*,
key : typing.Text = ...,
value : typing.Optional[global___AnyValue] = ...,
) -> None: ...
def HasField(self, field_name: typing_extensions.Literal["value",b"value"]) -> builtins.bool: ...
def ClearField(self, field_name: typing_extensions.Literal["key",b"key","value",b"value"]) -> None: ...
global___KeyValue = KeyValue
class InstrumentationScope(google.protobuf.message.Message):
"""InstrumentationScope is a message representing the instrumentation scope information
such as the fully qualified name and version.
"""
DESCRIPTOR: google.protobuf.descriptor.Descriptor = ...
NAME_FIELD_NUMBER: builtins.int
VERSION_FIELD_NUMBER: builtins.int
ATTRIBUTES_FIELD_NUMBER: builtins.int
DROPPED_ATTRIBUTES_COUNT_FIELD_NUMBER: builtins.int
name: typing.Text = ...
"""An empty instrumentation scope name means the name is unknown."""
version: typing.Text = ...
@property
def attributes(self) -> google.protobuf.internal.containers.RepeatedCompositeFieldContainer[global___KeyValue]:
"""Additional attributes that describe the scope. [Optional].
Attribute keys MUST be unique (it is not allowed to have more than one
attribute with the same key).
"""
pass
dropped_attributes_count: builtins.int = ...
def __init__(self,
*,
name : typing.Text = ...,
version : typing.Text = ...,
attributes : typing.Optional[typing.Iterable[global___KeyValue]] = ...,
dropped_attributes_count : builtins.int = ...,
) -> None: ...
def ClearField(self, field_name: typing_extensions.Literal["attributes",b"attributes","dropped_attributes_count",b"dropped_attributes_count","name",b"name","version",b"version"]) -> None: ...
global___InstrumentationScope = InstrumentationScope
|
[
"[email protected]"
] | |
97563f2780dada381587ccbf25d41f2ad572c094
|
07eaef75c6bc0066d56a8810711e82b0e8b01dda
|
/options/option_generation/MOMI.py
|
45e9f0749ce75b147a73a712866dff4b238e63c4
|
[] |
no_license
|
jinnaiyuu/Optimal-Options-ICML-2019
|
eb10da610d8ad7828f364c1bdb2e058aa35e7d65
|
4f5cd1776b47f9b16c1022d22b2cc91d6044775b
|
refs/heads/master
| 2021-06-21T06:54:41.968578 | 2021-02-18T09:14:09 | 2021-02-18T09:14:09 | 186,303,067 | 13 | 3 | null | null | null | null |
UTF-8
|
Python
| false | false | 902 |
py
|
#!/usr/bin/env python
# Libraries
import numpy as np
from options.graph.set_cover import SC_APPROX, SC_APPROX2, SC_OPT
from options.util import DeriveGraph
def MOMI(mdp, distance, l, solver):
X = DeriveGraph(distance, l - 1) + np.identity(distance.shape[0])
# Remove states which is already reachable within l steps
xg = []
for s in range(X.shape[0]):
if all(X[s] <= l):
xg.append(s)
if solver == 'chvatal':
print("MOMI(l =", l, ", chvatal)")
C = SC_APPROX2(X.transpose())
elif solver == 'hochbaum':
print("MOMI(l =", l, ", hochbaum)")
C = SC_APPROX(X)
elif solver == 'optimal':
print("MOMI(l =", l, ", OPT)")
C = SC_OPT(X.transpose())
else:
print('unknown solver for set cover', approx)
assert(False)
exit(0)
return C
if __name__ == "__main__":
pass
|
[
"[email protected]"
] | |
dc5637e6a701297a732340cf01ffba977ba1acbd
|
2f98aa7e5bfc2fc5ef25e4d5cfa1d7802e3a7fae
|
/python/python_26468.py
|
fec5d7144e7bb1fae1f38711170b9a3d48a18a18
|
[] |
no_license
|
AK-1121/code_extraction
|
cc812b6832b112e3ffcc2bb7eb4237fd85c88c01
|
5297a4a3aab3bb37efa24a89636935da04a1f8b6
|
refs/heads/master
| 2020-05-23T08:04:11.789141 | 2015-10-22T19:19:40 | 2015-10-22T19:19:40 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 114 |
py
|
# Add new lines to CSV file at each iteration in Python
with open("test.csv","a") as f:
f.write("name\n")
|
[
"[email protected]"
] | |
46c77cb564c9a110aa518a527e32b9cf01996707
|
35d16ac49032083cafbc8304aebaf462d5346808
|
/server/utils.py
|
156d104b0b830c9816696397e17076e04be03ac4
|
[
"MIT"
] |
permissive
|
panuta/wealth-crawler-advanced
|
48c601b29c505f1f31d48a98bbf60c8032136232
|
3b1bfb7f5efd9080514fa40ecdc1325f02f1a78f
|
refs/heads/master
| 2021-04-06T19:31:24.255296 | 2018-03-15T14:56:36 | 2018-03-15T14:56:36 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,248 |
py
|
import sys
def load_module_from_file(module_name, filepath, sys_path=None):
if sys_path:
sys.path.insert(0, sys_path)
import importlib.util
spec = importlib.util.spec_from_file_location(module_name, filepath)
cls = importlib.util.module_from_spec(spec)
spec.loader.exec_module(cls)
if sys_path:
sys.path.remove(sys_path)
return cls
def datetime_to_str(dt):
return dt.strftime('%Y-%m-%dT%H:%M')
def merge_dict(existing_dict, new_dict):
for config_name, config_value in new_dict.items():
existing_dict[config_name] = config_value
return existing_dict
def crontab_hour_to_utc(crontab_hour, timezone):
import re
rebuild_hour_items = []
for hour_item in re.split(r'([-,])', crontab_hour):
if hour_item in ['-', ',']:
rebuild_hour_items.append(hour_item)
else:
try:
hour_num = int(hour_item)
except ValueError:
# Error, return original
return crontab_hour
utc_hour = hour_num - timezone
if utc_hour < 0:
utc_hour = utc_hour + 24
rebuild_hour_items.append(str(utc_hour))
return ''.join(rebuild_hour_items)
|
[
"[email protected]"
] | |
40d2aa50fc726880a476008150bc6877e48c24c7
|
82b946da326148a3c1c1f687f96c0da165bb2c15
|
/sdk/python/pulumi_azure_native/automation/v20190601/connection.py
|
2bd99df22a5db0bff546750ad492b5bfbeb13c0d
|
[
"Apache-2.0",
"BSD-3-Clause"
] |
permissive
|
morrell/pulumi-azure-native
|
3916e978382366607f3df0a669f24cb16293ff5e
|
cd3ba4b9cb08c5e1df7674c1c71695b80e443f08
|
refs/heads/master
| 2023-06-20T19:37:05.414924 | 2021-07-19T20:57:53 | 2021-07-19T20:57:53 | 387,815,163 | 0 | 0 |
Apache-2.0
| 2021-07-20T14:18:29 | 2021-07-20T14:18:28 | null |
UTF-8
|
Python
| false | false | 13,887 |
py
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
from ._inputs import *
__all__ = ['ConnectionArgs', 'Connection']
@pulumi.input_type
class ConnectionArgs:
def __init__(__self__, *,
automation_account_name: pulumi.Input[str],
connection_type: pulumi.Input['ConnectionTypeAssociationPropertyArgs'],
name: pulumi.Input[str],
resource_group_name: pulumi.Input[str],
connection_name: Optional[pulumi.Input[str]] = None,
description: Optional[pulumi.Input[str]] = None,
field_definition_values: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None):
"""
The set of arguments for constructing a Connection resource.
:param pulumi.Input[str] automation_account_name: The name of the automation account.
:param pulumi.Input['ConnectionTypeAssociationPropertyArgs'] connection_type: Gets or sets the connectionType of the connection.
:param pulumi.Input[str] name: Gets or sets the name of the connection.
:param pulumi.Input[str] resource_group_name: Name of an Azure Resource group.
:param pulumi.Input[str] connection_name: The parameters supplied to the create or update connection operation.
:param pulumi.Input[str] description: Gets or sets the description of the connection.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] field_definition_values: Gets or sets the field definition properties of the connection.
"""
pulumi.set(__self__, "automation_account_name", automation_account_name)
pulumi.set(__self__, "connection_type", connection_type)
pulumi.set(__self__, "name", name)
pulumi.set(__self__, "resource_group_name", resource_group_name)
if connection_name is not None:
pulumi.set(__self__, "connection_name", connection_name)
if description is not None:
pulumi.set(__self__, "description", description)
if field_definition_values is not None:
pulumi.set(__self__, "field_definition_values", field_definition_values)
@property
@pulumi.getter(name="automationAccountName")
def automation_account_name(self) -> pulumi.Input[str]:
"""
The name of the automation account.
"""
return pulumi.get(self, "automation_account_name")
@automation_account_name.setter
def automation_account_name(self, value: pulumi.Input[str]):
pulumi.set(self, "automation_account_name", value)
@property
@pulumi.getter(name="connectionType")
def connection_type(self) -> pulumi.Input['ConnectionTypeAssociationPropertyArgs']:
"""
Gets or sets the connectionType of the connection.
"""
return pulumi.get(self, "connection_type")
@connection_type.setter
def connection_type(self, value: pulumi.Input['ConnectionTypeAssociationPropertyArgs']):
pulumi.set(self, "connection_type", value)
@property
@pulumi.getter
def name(self) -> pulumi.Input[str]:
"""
Gets or sets the name of the connection.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: pulumi.Input[str]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> pulumi.Input[str]:
"""
Name of an Azure Resource group.
"""
return pulumi.get(self, "resource_group_name")
@resource_group_name.setter
def resource_group_name(self, value: pulumi.Input[str]):
pulumi.set(self, "resource_group_name", value)
@property
@pulumi.getter(name="connectionName")
def connection_name(self) -> Optional[pulumi.Input[str]]:
"""
The parameters supplied to the create or update connection operation.
"""
return pulumi.get(self, "connection_name")
@connection_name.setter
def connection_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "connection_name", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
"""
Gets or sets the description of the connection.
"""
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter(name="fieldDefinitionValues")
def field_definition_values(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
Gets or sets the field definition properties of the connection.
"""
return pulumi.get(self, "field_definition_values")
@field_definition_values.setter
def field_definition_values(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "field_definition_values", value)
class Connection(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
automation_account_name: Optional[pulumi.Input[str]] = None,
connection_name: Optional[pulumi.Input[str]] = None,
connection_type: Optional[pulumi.Input[pulumi.InputType['ConnectionTypeAssociationPropertyArgs']]] = None,
description: Optional[pulumi.Input[str]] = None,
field_definition_values: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
Definition of the connection.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] automation_account_name: The name of the automation account.
:param pulumi.Input[str] connection_name: The parameters supplied to the create or update connection operation.
:param pulumi.Input[pulumi.InputType['ConnectionTypeAssociationPropertyArgs']] connection_type: Gets or sets the connectionType of the connection.
:param pulumi.Input[str] description: Gets or sets the description of the connection.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] field_definition_values: Gets or sets the field definition properties of the connection.
:param pulumi.Input[str] name: Gets or sets the name of the connection.
:param pulumi.Input[str] resource_group_name: Name of an Azure Resource group.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: ConnectionArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Definition of the connection.
:param str resource_name: The name of the resource.
:param ConnectionArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(ConnectionArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
automation_account_name: Optional[pulumi.Input[str]] = None,
connection_name: Optional[pulumi.Input[str]] = None,
connection_type: Optional[pulumi.Input[pulumi.InputType['ConnectionTypeAssociationPropertyArgs']]] = None,
description: Optional[pulumi.Input[str]] = None,
field_definition_values: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = ConnectionArgs.__new__(ConnectionArgs)
if automation_account_name is None and not opts.urn:
raise TypeError("Missing required property 'automation_account_name'")
__props__.__dict__["automation_account_name"] = automation_account_name
__props__.__dict__["connection_name"] = connection_name
if connection_type is None and not opts.urn:
raise TypeError("Missing required property 'connection_type'")
__props__.__dict__["connection_type"] = connection_type
__props__.__dict__["description"] = description
__props__.__dict__["field_definition_values"] = field_definition_values
if name is None and not opts.urn:
raise TypeError("Missing required property 'name'")
__props__.__dict__["name"] = name
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__.__dict__["resource_group_name"] = resource_group_name
__props__.__dict__["creation_time"] = None
__props__.__dict__["last_modified_time"] = None
__props__.__dict__["type"] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:automation/v20190601:Connection"), pulumi.Alias(type_="azure-native:automation:Connection"), pulumi.Alias(type_="azure-nextgen:automation:Connection"), pulumi.Alias(type_="azure-native:automation/v20151031:Connection"), pulumi.Alias(type_="azure-nextgen:automation/v20151031:Connection"), pulumi.Alias(type_="azure-native:automation/v20200113preview:Connection"), pulumi.Alias(type_="azure-nextgen:automation/v20200113preview:Connection")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(Connection, __self__).__init__(
'azure-native:automation/v20190601:Connection',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'Connection':
"""
Get an existing Connection resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = ConnectionArgs.__new__(ConnectionArgs)
__props__.__dict__["connection_type"] = None
__props__.__dict__["creation_time"] = None
__props__.__dict__["description"] = None
__props__.__dict__["field_definition_values"] = None
__props__.__dict__["last_modified_time"] = None
__props__.__dict__["name"] = None
__props__.__dict__["type"] = None
return Connection(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="connectionType")
def connection_type(self) -> pulumi.Output[Optional['outputs.ConnectionTypeAssociationPropertyResponse']]:
"""
Gets or sets the connectionType of the connection.
"""
return pulumi.get(self, "connection_type")
@property
@pulumi.getter(name="creationTime")
def creation_time(self) -> pulumi.Output[str]:
"""
Gets the creation time.
"""
return pulumi.get(self, "creation_time")
@property
@pulumi.getter
def description(self) -> pulumi.Output[Optional[str]]:
"""
Gets or sets the description.
"""
return pulumi.get(self, "description")
@property
@pulumi.getter(name="fieldDefinitionValues")
def field_definition_values(self) -> pulumi.Output[Mapping[str, str]]:
"""
Gets the field definition values of the connection.
"""
return pulumi.get(self, "field_definition_values")
@property
@pulumi.getter(name="lastModifiedTime")
def last_modified_time(self) -> pulumi.Output[str]:
"""
Gets the last modified time.
"""
return pulumi.get(self, "last_modified_time")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
The name of the resource
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
The type of the resource.
"""
return pulumi.get(self, "type")
|
[
"[email protected]"
] | |
8ed83e1c7ee7d3368da4cd31284945c72863762f
|
40b27bdd261a0d8a9e100bc4e83c9f76b9ef710e
|
/contests/ABC1-100/ABC100/d.py
|
68f44a57690db03c79955432af5eba4426879e22
|
[] |
no_license
|
kouma1990/AtCoder-code-collection
|
486d612ae1def6df49f4aa3632e06aae7ff73d2f
|
a3040a6025b43fb7dd3522945dce05a2626a92aa
|
refs/heads/master
| 2020-04-16T22:42:39.023009 | 2019-08-29T07:05:43 | 2019-08-29T07:05:43 | 165,980,129 | 2 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 346 |
py
|
n, m = (int(i) for i in input().split())
xyz = [[int(i) for i in input().split()] for i in range(n)]
res = 0
for i in [-1, 1]:
for j in [-1, 1]:
for k in [-1, 1]:
l = []
for x,y,z in xyz:
l.append(x*i + y*j + z*k)
res = max(res, abs(sum(sorted(l, reverse=True)[:m])))
print(res)
|
[
"[email protected]"
] | |
b5e452698c80343c60fc868829630680ebdc41e0
|
d3440843f0b3ed85a41e1697ed9862d50b763056
|
/8.Regression/test.py
|
32267e2fb85c94b891cb888205bae41c09a44ead
|
[] |
no_license
|
keepingoner/ml
|
6f2d800b9e37a6324b2e2e10edd9d64b1bad6fb2
|
0b0091f08c1f77ec0fd176aa3375ada4153d8732
|
refs/heads/master
| 2020-04-02T16:11:10.616674 | 2018-11-05T05:47:01 | 2018-11-05T05:47:01 | 154,601,937 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,429 |
py
|
# -*- encoding:utf-8 -*-
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LinearRegression, Lasso, Ridge
from sklearn.model_selection import GridSearchCV
data = pd.read_csv('8.Advertising.csv')
x = data[['TV', 'Radio', 'Newspaper']]
y = data['Sales']
x_train, x_test, y_train, y_test = train_test_split(x, y, random_state=1)
# model = Lasso()
model = Ridge()
alpha_can = np.logspace(-3, 2, 10)
print(alpha_can)
lasso_model = GridSearchCV(model, param_grid={'alpha': alpha_can}, cv=5)
lasso_model.fit(x, y)
print '验证参数:\n', lasso_model.best_params_
y_hat = lasso_model.predict(np.array(x_test))
mse = np.average((y_hat - np.array(y_test)) ** 2) # Mean Squared Error
rmse = np.sqrt(mse) # Root Mean Squared Error
print mse, rmse
# 预测准确率
print("预测准确率{}".format(lasso_model.score(x_test, y_test)))
# 交叉验证中最好的结果
print("交叉验证中最好的结果{}".format(lasso_model.best_score_))
# 最好的模型
print("最好的模型{}".format(lasso_model.best_estimator_))
# 每个k的验证结果
print("每个k的验证结果{}".format(lasso_model.cv_results_))
t = np.arange(len(x_test))
plt.plot(t, y_test, 'r-', linewidth=2, label='Test')
plt.plot(t, y_hat, 'g-', linewidth=2, label='Predict')
plt.legend(loc='upper right')
plt.grid()
plt.show()
|
[
"[email protected]"
] | |
f4aeca722de031bfba81b683aa5645494895f05c
|
01d982d22d214265eeb7a00b2b8bdd8c869d9064
|
/tests/test_invest.py
|
316b3bc47753e1d729297ad3a606d0768ec3a132
|
[
"BSD-3-Clause",
"LicenseRef-scancode-warranty-disclaimer"
] |
permissive
|
hkotaro1215/invest
|
ad6874ea1a9ac73813292fb88c138d13279988b5
|
1ba08bd746977bfa8a4600ad8c821fc43598c421
|
refs/heads/master
| 2022-11-12T06:06:22.826122 | 2018-03-26T21:08:18 | 2018-03-26T21:08:18 | 142,378,565 | 0 | 1 |
NOASSERTION
| 2022-10-15T06:47:29 | 2018-07-26T02:36:20 |
Python
|
UTF-8
|
Python
| false | false | 2,321 |
py
|
"""General InVEST tests."""
import unittest
import os
class FileRegistryTests(unittest.TestCase):
"""Tests for the InVEST file registry builder."""
def test_build_file_registry_duplicate_paths(self):
"""InVEST test that file registry recognizes duplicate paths."""
from natcap.invest import utils
with self.assertRaises(ValueError):
utils.build_file_registry(
[({'a': 'a.tif'}, ''), ({'b': 'a.tif'}, '')], '')
def test_build_file_registry_duplicate_keys(self):
"""InVEST test that file registry recognizes duplicate keys."""
from natcap.invest import utils
with self.assertRaises(ValueError):
utils.build_file_registry(
[({'a': 'a.tif'}, ''), ({'a': 'b.tif'}, '')], '')
def test_build_file_registry(self):
"""InVEST test a complicated file registry creation."""
from natcap.invest import utils
dict_a = {
'a': 'aggregated_results.shp',
'b': 'P.tif',
'': 'CN.tif',
'l_avail_path': ''}
dict_b = {
'apple': '.shp',
'bear': 'tif',
'cat': 'CN.tif'}
dict_c = {}
result = utils.build_file_registry(
[(dict_a, ''), (dict_b, 'foo'), (dict_c, 'garbage')], '')
expected_dict = {
'a': 'aggregated_results.shp',
'b': 'P.tif',
'': 'CN.tif',
'l_avail_path': '',
'apple': os.path.join('foo', '.shp'),
'bear': os.path.join('foo', 'tif'),
'cat': os.path.join('foo', 'CN.tif'),
}
unexpected_paths = []
for key, result_path in expected_dict.iteritems():
expected_path = os.path.normpath(result[key])
if os.path.normpath(result_path) != expected_path:
unexpected_paths.append(
(key, expected_path, os.path.normpath(result_path)))
extra_keys = set(result.keys()).difference(set(expected_dict.keys()))
if len(unexpected_paths) > 0 or len(extra_keys) > 0:
raise AssertionError(
"Unexpected paths or keys: %s %s" % (
str(unexpected_paths), str(extra_keys)))
|
[
"[email protected]"
] | |
7e80b0a0be78686787eaafec4793b508eea9b27d
|
b332e9e5b63db27b23250ddbbb85b470ceaf92a1
|
/List/minSwaps.py
|
e7f6e4eead0c4705df276774356d0ebc8d20dc2a
|
[] |
no_license
|
huangketsudou/leetcode_python
|
66fcc695b0a4f94a35cc52e161ae4bfdb1138dc2
|
e983f42d245b69f9bddd9855f51ee59648a2039e
|
refs/heads/master
| 2021-08-07T23:25:45.532458 | 2020-08-23T06:15:22 | 2020-08-23T06:15:22 | 214,324,229 | 2 | 0 | null | 2020-04-12T14:40:47 | 2019-10-11T02:16:43 |
Python
|
UTF-8
|
Python
| false | false | 973 |
py
|
from typing import List
class Solution:
def minSwaps(self, grid: List[List[int]]) -> int:
if not grid: return 0
# 统计每一行 从右向左连续0的个数
n = len(grid)
zero_nums = []
for i in range(n):
j = n - 1
while j >= 0 and grid[i][j] == 0: j -= 1
zero_nums.append(n - 1 - j)
# 贪心算法,从上到下查找满足条件的最小下标,即为交换到当前行的次数
cnt = 0
for i in range(n - 1):
need_zeros = n - 1 - i
j = i
while j < len(zero_nums) and zero_nums[j] < need_zeros: j += 1
# 没找到则说明不满足条件
if j == len(zero_nums): return -1
# 增加交换次数
cnt += j - i
# 交换数值
while j > i:
zero_nums[j], zero_nums[j-1]= zero_nums[j-1], zero_nums[j]
j -= 1
return cnt
|
[
"[email protected]"
] | |
f99682103fd1863b63d36bb3fd3f33ba90d0dd06
|
d4224cb20c48933909fc2a1834b75f4f062bd3c9
|
/google_or_tools/who_killed_agatha.py
|
4003b896e3136b99551cfd6911f118f9296d3f80
|
[] |
no_license
|
ajgappmark/hakank
|
dfe3255fd9d0bcdeb2e3eef7ad68d3428b0dc9f2
|
7c4265d109cfc3f1bf379c1140d434ccf537f982
|
refs/heads/master
| 2020-05-18T10:33:07.592353 | 2014-08-17T19:34:39 | 2014-08-17T19:34:39 | 23,218,111 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 6,967 |
py
|
# Copyright 2010 Hakan Kjellerstrand [email protected]
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Who killed agatha? (The Dreadsbury Mansion Murder Mystery) in Google CP Solver.
This is a standard benchmark for theorem proving.
http://www.lsv.ens-cachan.fr/~goubault/H1.dist/H1.1/Doc/h1003.html
'''
Someone in Dreadsbury Mansion killed Aunt Agatha.
Agatha, the butler, and Charles live in Dreadsbury Mansion, and
are the only ones to live there. A killer always hates, and is no
richer than his victim. Charles hates noone that Agatha hates. Agatha
hates everybody except the butler. The butler hates everyone not richer
than Aunt Agatha. The butler hates everyone whom Agatha hates.
Noone hates everyone. Who killed Agatha?
'''
Originally from F. J. Pelletier:
Seventy-five problems for testing automatic theorem provers.
Journal of Automated Reasoning, 2: 216, 1986.
Note1: Since Google CP Solver/Pythons (currently) don't have
special support for logical operations on decision
variables (i.e. ->, <->, and, or, etc), this model
use some IP modeling tricks.
Note2: There are 8 different solutions, all stating that Agatha
killed herself
Compare with the following models:
* Choco : http://www.hakank.org/choco/WhoKilledAgatha.java
* Choco : http://www.hakank.org/choco/WhoKilledAgatha_element.java
* Comet : http://www.hakank.org/comet/who_killed_agatha.co
* ECLiPSE : http://www.hakank.org/eclipse/who_killed_agatha.ecl
* Gecode : http://www.hakank.org/gecode/who_killed_agatha.cpp
* JaCoP : http://www.hakank.org/JaCoP/WhoKilledAgatha.java
* JaCoP : http://www.hakank.org/JaCoP/WhoKilledAgatha_element.java
* MiniZinc: http://www.hakank.org/minizinc/who_killed_agatha.mzn
* Tailor/Essence': http://www.hakank.org/tailor/who_killed_agatha.eprime
* SICStus : http://hakank.org/sicstus/who_killed_agatha.pl
* Zinc :http://hakank.org/minizinc/who_killed_agatha.zinc
This model was created by Hakan Kjellerstrand ([email protected])
Also see my other Google CP Solver models: http://www.hakank.org/google_or_tools/
"""
from collections import defaultdict
from constraint_solver import pywrapcp
def var_matrix_array(solver, rows, cols, lb, ub, name):
x = []
for i in range(rows):
t = []
for j in range(cols):
t.append(solver.IntVar(lb, ub, '%s[%i,%i]'%(name, i,j)))
x.append(t)
return x
def flatten_matrix(solver, m, rows, cols):
return [m[i][j] for i in range(rows) for j in range(cols)]
def print_flat_matrix(m_flat, rows, cols):
for i in range(rows):
for j in range(cols):
print m_flat[i*cols+j].Value(),
print
print
def main(the_killers):
# Create the solver.
solver = pywrapcp.Solver('Who killed agatha?')
#
# data
#
n = 3
agatha = 0
butler = 1
charles = 2
#
# declare variables
#
the_killer = solver.IntVar(0,2, 'the_killer')
the_victim = solver.IntVar(0,2, 'the_victim' )
hates = var_matrix_array(solver, n, n, 0, 1, 'hates')
richer = var_matrix_array(solver, n, n, 0, 1, 'richer')
hates_flat = flatten_matrix(solver, hates, n, n)
richer_flat = flatten_matrix(solver, richer, n, n)
#
# constraints
#
# Agatha, the butler, and Charles live in Dreadsbury Mansion, and
# are the only ones to live there.
# A killer always hates, and is no richer than his victim.
# solver.Add(hates[the_killer, the_victim] == 1)
solver.Add(solver.Element(hates_flat,the_killer*n+the_victim) == 1)
# solver.Add(richer[the_killer, the_victim] == 0)
solver.Add(solver.Element(richer_flat,the_killer*n+the_victim) == 0)
# define the concept of richer: no one is richer than him-/herself
for i in range(n):
solver.Add(richer[i][i] == 0)
# (contd...) if i is richer than j then j is not richer than i
# (i != j) => (richer[i,j] = 1) <=> (richer[j,i] = 0),
for i in range(n):
for j in range(n):
if i != j:
solver.Add((richer[i][j] == 1) == (richer[j][i] == 0))
# Charles hates noone that Agatha hates.
#forall i : Range .
# (hates[agatha, i] = 1) => (hates[charles, i] = 0),
for i in range(n):
solver.Add((hates[agatha][i]==1) <= (hates[charles][i] == 0))
# Agatha hates everybody except the butler.
solver.Add(hates[agatha][charles] == 1)
solver.Add(hates[agatha][agatha] == 1)
solver.Add(hates[agatha][butler] == 0)
# The butler hates everyone not richer than Aunt Agatha.
# forall i : Range .
# (richer[i, agatha] = 0) => (hates[butler, i] = 1),
for i in range(n):
solver.Add((richer[i][agatha]==0) <= (hates[butler][i]==1))
# The butler hates everyone whom Agatha hates.
#forall i : Range .
# (hates[agatha, i] = 1) => (hates[butler, i] = 1),
for i in range(n):
solver.Add((hates[agatha][i]==1) <= (hates[butler][i]==1))
# Noone hates everyone.
# forall i : Range .
# (sum j : Range . hates[i,j]) <= 2,
for i in range(n):
solver.Add(solver.Sum([hates[i][j] for j in range(n)]) <= 2)
# Who killed Agatha?
solver.Add(the_victim == agatha)
#
# solution and search
#
solution = solver.Assignment()
solution.Add(the_killer)
solution.Add(the_victim)
solution.Add(hates_flat)
solution.Add(richer_flat)
# db: DecisionBuilder
db = solver.Phase(hates_flat + richer_flat,
solver.CHOOSE_FIRST_UNBOUND,
solver.ASSIGN_MIN_VALUE)
solver.NewSearch(db)
num_solutions = 0
while solver.NextSolution():
print "the_killer:", the_killer.Value()
the_killers[the_killer.Value()] += 1
print "the_victim:", the_victim.Value()
print "hates:"
print_flat_matrix(hates_flat,n,n)
print "richer:"
print_flat_matrix(richer_flat,n,n)
print
num_solutions += 1
solver.EndSearch()
print
print "num_solutions:", num_solutions
print "failures:", solver.Failures()
print "branches:", solver.Branches()
print "WallTime:", solver.WallTime()
the_killers = defaultdict(int)
p = ["agatha", "butler", "charles"]
if __name__ == '__main__':
main(the_killers)
print "\n"
for k in the_killers:
print "the killer %s was choosen in %i solutions" % (p[k], the_killers[k])
|
[
"[email protected]"
] | |
3490eb215b290ce4f27cdec5797fab3a54c5595b
|
9d30115d59ed821a5c7aecf2318b5e0ed22c9676
|
/src/codewars/python/6kyu/alphabet_position.py
|
ea394f02b1c84aeb367855d718532ac9ca2829c7
|
[] |
no_license
|
garigari-kun/til
|
02c7bf05274d1077b454e1f7d4a7355849441524
|
b71f36a66045ab7da7f4a97f7e18de2aaa05f493
|
refs/heads/master
| 2020-04-16T02:13:45.727909 | 2018-12-16T01:26:40 | 2018-12-16T01:26:40 | 56,369,670 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 840 |
py
|
"""
Welcome.
In this kata you are required to, given a string, replace every letter with its position in the alphabet.
If anything in the text isn't a letter, ignore it and don't return it.
a being 1, b being 2, etc.
As an example:
alphabet_position("The sunset sets at twelve o' clock.")
Should return "20 8 5 19 21 14 19 5 20 19 5 20 19 1 20 20 23 5 12 22 5 15 3 12 15 3 11" as a string.
"""
import string
def alphabet_position(text):
alphabets = list(string.ascii_letters)
pos_list = []
for ch in text:
if ch in alphabets:
pos = alphabets.index(ch.lower())
# index is started at 0 so need to add 1 for pos index
pos_list.append(str(pos+1))
return ' '.join(pos_list)
if __name__ == '__main__':
print(alphabet_position("The sunset sets at twelve o' clock."))
|
[
"[email protected]"
] | |
7cdc9598584590dacb3d6b8a0f07716b5b178462
|
97a09265d7898765a3f561c1b4a12e5b46346db8
|
/30DaysOfCode/day27_testing.py
|
cdd71b0cc82b218eaa507470534ddd6b8be43b44
|
[] |
no_license
|
14E47/Hackerrank
|
35e7b5520fe00ae98377624b8429d42d237cbd46
|
c2af2fa7ee49c2a94304ee543900425f5a3b6551
|
refs/heads/master
| 2020-03-26T21:22:45.492630 | 2019-10-04T03:37:14 | 2019-10-04T03:37:14 | 145,384,365 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 885 |
py
|
def minimum_index(seq):
if len(seq) == 0:
raise ValueError("Cannot get the minimum value index from an empty sequence")
min_idx = 0
for i in range(1, len(seq)):
if seq[i] < seq[min_idx]:
min_idx = i
return min_idx
class TestDataEmptyArray(object):
@staticmethod
def get_array():
# complete this function
return []
class TestDataUniqueValues(object):
@staticmethod
def get_array():
# complete this function
return [5, 7, 2, 4]
@staticmethod
def get_expected_result():
# complete this function
return 2
class TestDataExactlyTwoDifferentMinimums(object):
@staticmethod
def get_array():
# complete this function
return [5, 4, 2, 3, 2, 7]
@staticmethod
def get_expected_result():
# complete this function
return 2
|
[
"[email protected]"
] | |
7ab151207135bb1d3da3bcd2c20b4b0233a5da8d
|
e821f62aead9a6a4911435224ecf3ff9ccb2be96
|
/CNN/tok.py
|
76f589c72efc2394d98d31273775f77d2e6868c5
|
[] |
no_license
|
dagrawa2/toxic_comments
|
799bcaabf8d8bf461fd5a011e2fc124379d021ea
|
aaccdc3184b48ff6086093a70cda9bbd20ff0f02
|
refs/heads/master
| 2022-12-28T23:03:03.139883 | 2018-04-29T21:00:00 | 2018-04-29T21:00:00 | 302,200,948 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 871 |
py
|
import pickle
import numpy as np
import pandas as pd
from keras.preprocessing.text import Tokenizer
num_words = 10000
data = pd.read_csv("../Data/train.csv")
comments = data["comment_text"].tolist()
T = Tokenizer(num_words=num_words)
T.fit_on_texts(comments)
X_train = T.texts_to_sequences(comments)
with open("Objects/X_train.list", "wb") as fp:
pickle.dump(X_train, fp)
Y_train = data.iloc[:,2:].as_matrix()
np.save("Objects/Y_train.npy", Y_train)
data = pd.read_csv("../Data/test.csv")
comments = data["comment_text"].tolist()
X_test = T.texts_to_sequences(comments)
with open("Objects/X_test.list", "wb") as fp:
pickle.dump(X_test, fp)
with open("Objects/T.tok", "wb") as fp:
pickle.dump(T, fp)
labels = ["toxic", "severe_toxic", "obscene", "threat", "insult", "identity_hate"]
with open("Objects/labels.list", "wb") as fp:
pickle.dump(labels, fp)
|
[
"[email protected]"
] | |
8722b14a680985dc607c5e90cf3a7732dc440d27
|
f8d2521a88e465eed01adc3981c7a173d5c2554b
|
/round/round0401-0425/round0408/a1.py
|
74f23d763b4d58d544c619ea8758163784228ab0
|
[] |
no_license
|
clarinet758/codeforces
|
b2a8a349bba40e7761a8ce50dd5ff9a57477b60d
|
d79870c47bdb109547891a0d076dd173d6d647cf
|
refs/heads/main
| 2021-12-15T05:46:51.000160 | 2021-12-01T12:01:33 | 2021-12-01T12:01:33 | 41,968,658 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 335 |
py
|
#!/usr/bin/env pypy3
# -*- coding: UTF-8 -*-
n,m,k=map(int,input().split())
a=[int(i) for i in input().split()]
ans=100
for x,i in enumerate(range(m,n,1)):
if a[i]>0 and a[i]<=k:
ans=x+1
break
for y,j in enumerate(range(m-2,-1,-1)):
if a[j]>0 and a[j]<=k:
ans=min(ans,y+1)
break
print(ans*10)
|
[
"[email protected]"
] | |
230a5484a1735e92d8f44f8d59512c0924044e05
|
6bf4867b690f59a77f7caddc1238c3bae6b3e1c3
|
/rally/benchmark/scenarios/sahara/utils.py
|
c0acb424077e0f750aafd32e32798f1cb9f58cd6
|
[
"Apache-2.0"
] |
permissive
|
kambiz-aghaiepour/rally
|
641c044cc24c10eb15e4d6b4ab3bc4885779e076
|
be708bacf0bc898a9538b9b6cb0ba4e1c015c1f2
|
refs/heads/master
| 2021-01-15T19:35:15.318291 | 2014-08-18T23:51:30 | 2014-08-18T23:51:30 | 23,090,342 | 3 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 7,610 |
py
|
# Copyright 2014: Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo.config import cfg
from saharaclient.api import base as sahara_base
from rally.benchmark.scenarios import base
from rally.benchmark import utils as bench_utils
from rally.openstack.common import log as logging
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
CREATE_CLUSTER_OPTS = [
cfg.IntOpt("cluster_create_timeout", default=600,
help="A timeout in seconds for a cluster create operation"),
cfg.IntOpt("cluster_check_interval", default=5,
help="Cluster status polling interval in seconds")
]
benchmark_group = cfg.OptGroup(name='benchmark', title='benchmark options')
CONF.register_opts(CREATE_CLUSTER_OPTS, group=benchmark_group)
class SaharaScenario(base.Scenario):
RESOURCE_NAME_LENGTH = 20
# TODO(nkonovalov): Add other provisioning plugins
NODE_PROCESSES = {
"vanilla": {
"1.2.1": {
"master": ["namenode", "jobtracker"],
"worker": ["datanode", "tasktracker"]
},
"2.3.0": {
"master": ["namenode", "resourcemanager", "historyserver"],
"worker": ["datanode", "nodemanager"]
}
}
}
REPLICATION_CONFIGS = {
"vanilla": {
"1.2.1": {
"target": "HDFS",
"config_name": "dfs.replication"
},
"2.3.0": {
"target": "HDFS",
"config_name": "dfs.replication"
}
}
}
@base.atomic_action_timer('sahara.list_node_group_templates')
def _list_node_group_templates(self):
"""Returns user Node Group Templates list."""
return self.clients("sahara").node_group_templates.list()
@base.atomic_action_timer('sahara.create_master_node_group_template')
def _create_master_node_group_template(self, flavor_id, plugin_name,
hadoop_version):
"""Creates a master Node Group Template with a random name.
:param flavor_id: The required argument for the Template
:param plugin_name: Sahara provisioning plugin name
:param hadoop_version: The version of Hadoop distribution supported by
the plugin
:return: The created Template
"""
name = self._generate_random_name(prefix="master-ngt-")
return self.clients("sahara").node_group_templates.create(
name=name,
plugin_name=plugin_name,
hadoop_version=hadoop_version,
flavor_id=flavor_id,
node_processes=self.NODE_PROCESSES[plugin_name][hadoop_version]
["master"])
@base.atomic_action_timer('sahara.create_worker_node_group_template')
def _create_worker_node_group_template(self, flavor_id, plugin_name,
hadoop_version):
"""Creates a worker Node Group Template with a random name.
:param flavor_id: The required argument for the Template
:param plugin_name: Sahara provisioning plugin name
:param hadoop_version: The version of Hadoop distribution supported by
the plugin
:return: The created Template
"""
name = self._generate_random_name(prefix="worker-ngt-")
return self.clients("sahara").node_group_templates.create(
name=name,
plugin_name=plugin_name,
hadoop_version=hadoop_version,
flavor_id=flavor_id,
node_processes=self.NODE_PROCESSES[plugin_name][hadoop_version]
["worker"])
@base.atomic_action_timer('sahara.delete_node_group_template')
def _delete_node_group_template(self, node_group):
"""Deletes a Node Group Template by id.
:param node_group: The Node Group Template to be deleted
:return:
"""
self.clients("sahara").node_group_templates.delete(node_group.id)
@base.atomic_action_timer('sahara.launch_cluster')
def _launch_cluster(self, plugin_name, hadoop_version, flavor_id,
image_id, node_count):
"""Creates a cluster and wait until it becomes Active.
The cluster is created with two node groups. The master Node Group is
created with one instance. The worker node group contains
node_count - 1 instances.
:param plugin_name: The provisioning plugin name
:param hadoop_version: Hadoop version supported by the plugin
:param flavor_id: The flavor which will be used to create instances
:param image_id: The image id that will be used to boot instances
:param node_count: The total number of instances. 1 master node, others
for the workers
:return: The created cluster
"""
node_groups = [
{
"name": "master-ng",
"flavor_id": flavor_id,
"node_processes": self.NODE_PROCESSES[plugin_name]
[hadoop_version]["master"],
"count": 1
}, {
"name": "worker-ng",
"flavor_id": flavor_id,
"node_processes": self.NODE_PROCESSES[plugin_name]
[hadoop_version]["worker"],
"count": node_count - 1
}
]
name = self._generate_random_name(prefix="sahara-cluster-")
replication_value = min(node_count - 1, 3)
# 3 is a default Hadoop replication
conf = self.REPLICATION_CONFIGS[plugin_name][hadoop_version]
LOG.debug("Using replication factor: %s" % replication_value)
cluster_object = self.clients("sahara").clusters.create(
name=name,
plugin_name=plugin_name,
hadoop_version=hadoop_version,
node_groups=node_groups,
default_image_id=image_id,
cluster_configs={conf["target"]: {
conf["config_name"]: replication_value}
}
)
def is_active(cluster_id):
return self.clients("sahara").clusters.get(
cluster_id).status.lower() == "active"
bench_utils.wait_for(
resource=cluster_object.id, is_ready=is_active,
timeout=CONF.benchmark.cluster_create_timeout,
check_interval=CONF.benchmark.cluster_check_interval)
return self.clients("sahara").clusters.get(cluster_object.id)
@base.atomic_action_timer('sahara.delete_cluster')
def _delete_cluster(self, cluster):
"""Calls a Cluster delete by id and waits for complete deletion.
:param cluster: The Cluster to be deleted
:return:
"""
self.clients("sahara").clusters.delete(cluster.id)
def is_deleted(cl_id):
try:
self.clients("sahara").clusters.get(cl_id)
return False
except sahara_base.APIException:
return True
bench_utils.wait_for(resource=cluster.id, is_ready=is_deleted)
|
[
"[email protected]"
] | |
f6b334de835c54ff274cbcae4e9c5b5691a10e1e
|
45f93a9d47204d76b8bf25a71dfb79403e75c33c
|
/Threading/long-running-thread.py
|
3222c862dc2c304516b29971b5c97a2bdb95d5e9
|
[] |
no_license
|
tahmid-tanzim/problem-solving
|
0173bce1973ac3e95441a76c10324c0e1b0a57c3
|
6ddb51de6772130f209474e76f39ca2938f444f0
|
refs/heads/master
| 2023-06-25T02:18:03.690263 | 2023-06-20T06:58:46 | 2023-06-20T06:58:46 | 137,173,850 | 4 | 1 | null | 2022-03-30T08:28:41 | 2018-06-13T06:44:25 |
Python
|
UTF-8
|
Python
| false | false | 756 |
py
|
from threading import Timer
import time
def heartbeat_tick():
current_time = time.strftime("%H:%M:%S", time.localtime())
print('im ok ' + current_time)
def heartbeat_tick2():
current_time = time.strftime("%H:%M:%S", time.localtime())
print('im very good ' + current_time)
class RepeatingTimer(Timer):
def run(self):
while not self.finished.is_set():
self.function(*self.args, **self.kwargs)
self.finished.wait(self.interval)
if __name__ == '__main__':
t1 = RepeatingTimer(20, heartbeat_tick)
t1.start() # every 30 seconds, call heartbeat_tick
t2 = RepeatingTimer(5, heartbeat_tick2)
t2.start() # every 30 seconds, call heartbeat_tick
# later
# t.cancel() # cancels execution
|
[
"[email protected]"
] | |
90c2f1b800d86a24a43491b61d150a6366b6aa65
|
2940f5416082dadd9c646cd9a46d2d0a99883efb
|
/venv/Lib/site-packages/pandas/core/indexes/range.py
|
ec896d94a20ba0ae3f77f67639f062cda407c696
|
[
"MIT"
] |
permissive
|
tpike3/SugarScape
|
4813e4fefbfb0a701f5913d74f045fd0eaed1942
|
39efe4007fba2b12b75c72f7795827a1f74d640b
|
refs/heads/main
| 2021-06-20T03:55:46.288721 | 2021-01-20T17:06:35 | 2021-01-20T17:06:35 | 168,583,530 | 11 | 3 |
MIT
| 2021-01-20T17:19:53 | 2019-01-31T19:29:40 |
Jupyter Notebook
|
UTF-8
|
Python
| false | false | 29,648 |
py
|
from datetime import timedelta
import operator
from sys import getsizeof
from typing import Any, List, Optional, Tuple
import warnings
import numpy as np
from pandas._libs import index as libindex
from pandas._libs.lib import no_default
from pandas._typing import Label
from pandas.compat.numpy import function as nv
from pandas.util._decorators import Appender, cache_readonly, doc
from pandas.core.dtypes.common import (
ensure_platform_int,
ensure_python_int,
is_float,
is_integer,
is_list_like,
is_scalar,
is_signed_integer_dtype,
is_timedelta64_dtype,
)
from pandas.core.dtypes.generic import ABCTimedeltaIndex
from pandas.core import ops
import pandas.core.common as com
from pandas.core.construction import extract_array
import pandas.core.indexes.base as ibase
from pandas.core.indexes.base import _index_shared_docs, maybe_extract_name
from pandas.core.indexes.numeric import Float64Index, Int64Index
from pandas.core.ops.common import unpack_zerodim_and_defer
_empty_range = range(0)
class RangeIndex(Int64Index):
"""
Immutable Index implementing a monotonic integer range.
RangeIndex is a memory-saving special case of Int64Index limited to
representing monotonic ranges. Using RangeIndex may in some instances
improve computing speed.
This is the default index type used
by DataFrame and Series when no explicit index is provided by the user.
Parameters
----------
start : int (default: 0), or other RangeIndex instance
If int and "stop" is not given, interpreted as "stop" instead.
stop : int (default: 0)
step : int (default: 1)
dtype : np.int64
Unused, accepted for homogeneity with other index types.
copy : bool, default False
Unused, accepted for homogeneity with other index types.
name : object, optional
Name to be stored in the index.
Attributes
----------
start
stop
step
Methods
-------
from_range
See Also
--------
Index : The base pandas Index type.
Int64Index : Index of int64 data.
"""
_typ = "rangeindex"
_engine_type = libindex.Int64Engine
_range: range
# --------------------------------------------------------------------
# Constructors
def __new__(
cls, start=None, stop=None, step=None, dtype=None, copy=False, name=None
):
cls._validate_dtype(dtype)
name = maybe_extract_name(name, start, cls)
# RangeIndex
if isinstance(start, RangeIndex):
start = start._range
return cls._simple_new(start, name=name)
# validate the arguments
if com.all_none(start, stop, step):
raise TypeError("RangeIndex(...) must be called with integers")
start = ensure_python_int(start) if start is not None else 0
if stop is None:
start, stop = 0, start
else:
stop = ensure_python_int(stop)
step = ensure_python_int(step) if step is not None else 1
if step == 0:
raise ValueError("Step must not be zero")
rng = range(start, stop, step)
return cls._simple_new(rng, name=name)
@classmethod
def from_range(cls, data: range, name=None, dtype=None) -> "RangeIndex":
"""
Create RangeIndex from a range object.
Returns
-------
RangeIndex
"""
if not isinstance(data, range):
raise TypeError(
f"{cls.__name__}(...) must be called with object coercible to a "
f"range, {repr(data)} was passed"
)
cls._validate_dtype(dtype)
return cls._simple_new(data, name=name)
@classmethod
def _simple_new(cls, values: range, name: Label = None) -> "RangeIndex":
result = object.__new__(cls)
assert isinstance(values, range)
result._range = values
result.name = name
result._cache = {}
result._reset_identity()
return result
# --------------------------------------------------------------------
@cache_readonly
def _constructor(self):
""" return the class to use for construction """
return Int64Index
@cache_readonly
def _data(self):
"""
An int array that for performance reasons is created only when needed.
The constructed array is saved in ``_cache``.
"""
return np.arange(self.start, self.stop, self.step, dtype=np.int64)
@cache_readonly
def _int64index(self) -> Int64Index:
return Int64Index._simple_new(self._data, name=self.name)
def _get_data_as_items(self):
""" return a list of tuples of start, stop, step """
rng = self._range
return [("start", rng.start), ("stop", rng.stop), ("step", rng.step)]
def __reduce__(self):
d = self._get_attributes_dict()
d.update(dict(self._get_data_as_items()))
return ibase._new_Index, (type(self), d), None
# --------------------------------------------------------------------
# Rendering Methods
def _format_attrs(self):
"""
Return a list of tuples of the (attr, formatted_value)
"""
attrs = self._get_data_as_items()
if self.name is not None:
attrs.append(("name", ibase.default_pprint(self.name)))
return attrs
def _format_data(self, name=None):
# we are formatting thru the attributes
return None
def _format_with_header(self, header: List[str], na_rep: str = "NaN") -> List[str]:
if not len(self._range):
return header
first_val_str = str(self._range[0])
last_val_str = str(self._range[-1])
max_length = max(len(first_val_str), len(last_val_str))
return header + [f"{x:<{max_length}}" for x in self._range]
# --------------------------------------------------------------------
_deprecation_message = (
"RangeIndex.{} is deprecated and will be "
"removed in a future version. Use RangeIndex.{} "
"instead"
)
@cache_readonly
def start(self):
"""
The value of the `start` parameter (``0`` if this was not supplied).
"""
# GH 25710
return self._range.start
@property
def _start(self):
"""
The value of the `start` parameter (``0`` if this was not supplied).
.. deprecated:: 0.25.0
Use ``start`` instead.
"""
warnings.warn(
self._deprecation_message.format("_start", "start"),
FutureWarning,
stacklevel=2,
)
return self.start
@cache_readonly
def stop(self):
"""
The value of the `stop` parameter.
"""
return self._range.stop
@property
def _stop(self):
"""
The value of the `stop` parameter.
.. deprecated:: 0.25.0
Use ``stop`` instead.
"""
# GH 25710
warnings.warn(
self._deprecation_message.format("_stop", "stop"),
FutureWarning,
stacklevel=2,
)
return self.stop
@cache_readonly
def step(self):
"""
The value of the `step` parameter (``1`` if this was not supplied).
"""
# GH 25710
return self._range.step
@property
def _step(self):
"""
The value of the `step` parameter (``1`` if this was not supplied).
.. deprecated:: 0.25.0
Use ``step`` instead.
"""
# GH 25710
warnings.warn(
self._deprecation_message.format("_step", "step"),
FutureWarning,
stacklevel=2,
)
return self.step
@cache_readonly
def nbytes(self) -> int:
"""
Return the number of bytes in the underlying data.
"""
rng = self._range
return getsizeof(rng) + sum(
getsizeof(getattr(rng, attr_name))
for attr_name in ["start", "stop", "step"]
)
def memory_usage(self, deep: bool = False) -> int:
"""
Memory usage of my values
Parameters
----------
deep : bool
Introspect the data deeply, interrogate
`object` dtypes for system-level memory consumption
Returns
-------
bytes used
Notes
-----
Memory usage does not include memory consumed by elements that
are not components of the array if deep=False
See Also
--------
numpy.ndarray.nbytes
"""
return self.nbytes
@property
def dtype(self) -> np.dtype:
return np.dtype(np.int64)
@property
def is_unique(self) -> bool:
""" return if the index has unique values """
return True
@cache_readonly
def is_monotonic_increasing(self) -> bool:
return self._range.step > 0 or len(self) <= 1
@cache_readonly
def is_monotonic_decreasing(self) -> bool:
return self._range.step < 0 or len(self) <= 1
@property
def has_duplicates(self) -> bool:
return False
def __contains__(self, key: Any) -> bool:
hash(key)
try:
key = ensure_python_int(key)
except TypeError:
return False
return key in self._range
# --------------------------------------------------------------------
# Indexing Methods
@doc(Int64Index.get_loc)
def get_loc(self, key, method=None, tolerance=None):
if method is None and tolerance is None:
if is_integer(key) or (is_float(key) and key.is_integer()):
new_key = int(key)
try:
return self._range.index(new_key)
except ValueError as err:
raise KeyError(key) from err
raise KeyError(key)
return super().get_loc(key, method=method, tolerance=tolerance)
@Appender(_index_shared_docs["get_indexer"])
def get_indexer(self, target, method=None, limit=None, tolerance=None):
if com.any_not_none(method, tolerance, limit) or not is_list_like(target):
return super().get_indexer(
target, method=method, tolerance=tolerance, limit=limit
)
if self.step > 0:
start, stop, step = self.start, self.stop, self.step
else:
# GH 28678: work on reversed range for simplicity
reverse = self._range[::-1]
start, stop, step = reverse.start, reverse.stop, reverse.step
target_array = np.asarray(target)
if not (is_signed_integer_dtype(target_array) and target_array.ndim == 1):
# checks/conversions/roundings are delegated to general method
return super().get_indexer(target, method=method, tolerance=tolerance)
locs = target_array - start
valid = (locs % step == 0) & (locs >= 0) & (target_array < stop)
locs[~valid] = -1
locs[valid] = locs[valid] / step
if step != self.step:
# We reversed this range: transform to original locs
locs[valid] = len(self) - 1 - locs[valid]
return ensure_platform_int(locs)
# --------------------------------------------------------------------
def tolist(self):
return list(self._range)
@doc(Int64Index.__iter__)
def __iter__(self):
yield from self._range
@doc(Int64Index._shallow_copy)
def _shallow_copy(self, values=None, name: Label = no_default):
name = self.name if name is no_default else name
if values is not None:
if values.dtype.kind == "f":
return Float64Index(values, name=name)
return Int64Index._simple_new(values, name=name)
result = self._simple_new(self._range, name=name)
result._cache = self._cache
return result
@doc(Int64Index.copy)
def copy(self, name=None, deep=False, dtype=None, names=None):
name = self._validate_names(name=name, names=names, deep=deep)[0]
new_index = self._shallow_copy(name=name)
if dtype:
warnings.warn(
"parameter dtype is deprecated and will be removed in a future "
"version. Use the astype method instead.",
FutureWarning,
stacklevel=2,
)
new_index = new_index.astype(dtype)
return new_index
def _minmax(self, meth: str):
no_steps = len(self) - 1
if no_steps == -1:
return np.nan
elif (meth == "min" and self.step > 0) or (meth == "max" and self.step < 0):
return self.start
return self.start + self.step * no_steps
def min(self, axis=None, skipna=True, *args, **kwargs) -> int:
"""The minimum value of the RangeIndex"""
nv.validate_minmax_axis(axis)
nv.validate_min(args, kwargs)
return self._minmax("min")
def max(self, axis=None, skipna=True, *args, **kwargs) -> int:
"""The maximum value of the RangeIndex"""
nv.validate_minmax_axis(axis)
nv.validate_max(args, kwargs)
return self._minmax("max")
def argsort(self, *args, **kwargs) -> np.ndarray:
"""
Returns the indices that would sort the index and its
underlying data.
Returns
-------
argsorted : numpy array
See Also
--------
numpy.ndarray.argsort
"""
nv.validate_argsort(args, kwargs)
if self._range.step > 0:
return np.arange(len(self))
else:
return np.arange(len(self) - 1, -1, -1)
def factorize(
self, sort: bool = False, na_sentinel: Optional[int] = -1
) -> Tuple[np.ndarray, "RangeIndex"]:
codes = np.arange(len(self), dtype=np.intp)
uniques = self
if sort and self.step < 0:
codes = codes[::-1]
uniques = uniques[::-1]
return codes, uniques
def equals(self, other: object) -> bool:
"""
Determines if two Index objects contain the same elements.
"""
if isinstance(other, RangeIndex):
return self._range == other._range
return super().equals(other)
# --------------------------------------------------------------------
# Set Operations
def _intersection(self, other, sort=False):
if not isinstance(other, RangeIndex):
# Int64Index
return super()._intersection(other, sort=sort)
if not len(self) or not len(other):
return self._simple_new(_empty_range)
first = self._range[::-1] if self.step < 0 else self._range
second = other._range[::-1] if other.step < 0 else other._range
# check whether intervals intersect
# deals with in- and decreasing ranges
int_low = max(first.start, second.start)
int_high = min(first.stop, second.stop)
if int_high <= int_low:
return self._simple_new(_empty_range)
# Method hint: linear Diophantine equation
# solve intersection problem
# performance hint: for identical step sizes, could use
# cheaper alternative
gcd, s, t = self._extended_gcd(first.step, second.step)
# check whether element sets intersect
if (first.start - second.start) % gcd:
return self._simple_new(_empty_range)
# calculate parameters for the RangeIndex describing the
# intersection disregarding the lower bounds
tmp_start = first.start + (second.start - first.start) * first.step // gcd * s
new_step = first.step * second.step // gcd
new_range = range(tmp_start, int_high, new_step)
new_index = self._simple_new(new_range)
# adjust index to limiting interval
new_start = new_index._min_fitting_element(int_low)
new_range = range(new_start, new_index.stop, new_index.step)
new_index = self._simple_new(new_range)
if (self.step < 0 and other.step < 0) is not (new_index.step < 0):
new_index = new_index[::-1]
if sort is None:
new_index = new_index.sort_values()
return new_index
def _min_fitting_element(self, lower_limit: int) -> int:
"""Returns the smallest element greater than or equal to the limit"""
no_steps = -(-(lower_limit - self.start) // abs(self.step))
return self.start + abs(self.step) * no_steps
def _max_fitting_element(self, upper_limit: int) -> int:
"""Returns the largest element smaller than or equal to the limit"""
no_steps = (upper_limit - self.start) // abs(self.step)
return self.start + abs(self.step) * no_steps
def _extended_gcd(self, a, b):
"""
Extended Euclidean algorithms to solve Bezout's identity:
a*x + b*y = gcd(x, y)
Finds one particular solution for x, y: s, t
Returns: gcd, s, t
"""
s, old_s = 0, 1
t, old_t = 1, 0
r, old_r = b, a
while r:
quotient = old_r // r
old_r, r = r, old_r - quotient * r
old_s, s = s, old_s - quotient * s
old_t, t = t, old_t - quotient * t
return old_r, old_s, old_t
def _union(self, other, sort):
"""
Form the union of two Index objects and sorts if possible
Parameters
----------
other : Index or array-like
sort : False or None, default None
Whether to sort resulting index. ``sort=None`` returns a
monotonically increasing ``RangeIndex`` if possible or a sorted
``Int64Index`` if not. ``sort=False`` always returns an
unsorted ``Int64Index``
.. versionadded:: 0.25.0
Returns
-------
union : Index
"""
if not len(other) or self.equals(other) or not len(self):
return super()._union(other, sort=sort)
if isinstance(other, RangeIndex) and sort is None:
start_s, step_s = self.start, self.step
end_s = self.start + self.step * (len(self) - 1)
start_o, step_o = other.start, other.step
end_o = other.start + other.step * (len(other) - 1)
if self.step < 0:
start_s, step_s, end_s = end_s, -step_s, start_s
if other.step < 0:
start_o, step_o, end_o = end_o, -step_o, start_o
if len(self) == 1 and len(other) == 1:
step_s = step_o = abs(self.start - other.start)
elif len(self) == 1:
step_s = step_o
elif len(other) == 1:
step_o = step_s
start_r = min(start_s, start_o)
end_r = max(end_s, end_o)
if step_o == step_s:
if (
(start_s - start_o) % step_s == 0
and (start_s - end_o) <= step_s
and (start_o - end_s) <= step_s
):
return type(self)(start_r, end_r + step_s, step_s)
if (
(step_s % 2 == 0)
and (abs(start_s - start_o) <= step_s / 2)
and (abs(end_s - end_o) <= step_s / 2)
):
return type(self)(start_r, end_r + step_s / 2, step_s / 2)
elif step_o % step_s == 0:
if (
(start_o - start_s) % step_s == 0
and (start_o + step_s >= start_s)
and (end_o - step_s <= end_s)
):
return type(self)(start_r, end_r + step_s, step_s)
elif step_s % step_o == 0:
if (
(start_s - start_o) % step_o == 0
and (start_s + step_o >= start_o)
and (end_s - step_o <= end_o)
):
return type(self)(start_r, end_r + step_o, step_o)
return self._int64index._union(other, sort=sort)
def difference(self, other, sort=None):
# optimized set operation if we have another RangeIndex
self._validate_sort_keyword(sort)
self._assert_can_do_setop(other)
other, result_name = self._convert_can_do_setop(other)
if not isinstance(other, RangeIndex):
return super().difference(other, sort=sort)
res_name = ops.get_op_result_name(self, other)
first = self._range[::-1] if self.step < 0 else self._range
overlap = self.intersection(other)
if overlap.step < 0:
overlap = overlap[::-1]
if len(overlap) == 0:
return self._shallow_copy(name=res_name)
if len(overlap) == len(self):
return self[:0].rename(res_name)
if not isinstance(overlap, RangeIndex):
# We wont end up with RangeIndex, so fall back
return super().difference(other, sort=sort)
if overlap.step != first.step:
# In some cases we might be able to get a RangeIndex back,
# but not worth the effort.
return super().difference(other, sort=sort)
if overlap[0] == first.start:
# The difference is everything after the intersection
new_rng = range(overlap[-1] + first.step, first.stop, first.step)
elif overlap[-1] == first[-1]:
# The difference is everything before the intersection
new_rng = range(first.start, overlap[0], first.step)
else:
# The difference is not range-like
return super().difference(other, sort=sort)
new_index = type(self)._simple_new(new_rng, name=res_name)
if first is not self._range:
new_index = new_index[::-1]
return new_index
def symmetric_difference(self, other, result_name=None, sort=None):
if not isinstance(other, RangeIndex) or sort is not None:
return super().symmetric_difference(other, result_name, sort)
left = self.difference(other)
right = other.difference(self)
result = left.union(right)
if result_name is not None:
result = result.rename(result_name)
return result
# --------------------------------------------------------------------
@doc(Int64Index.join)
def join(self, other, how="left", level=None, return_indexers=False, sort=False):
if how == "outer" and self is not other:
# note: could return RangeIndex in more circumstances
return self._int64index.join(other, how, level, return_indexers, sort)
return super().join(other, how, level, return_indexers, sort)
def _concat(self, indexes, name):
"""
Overriding parent method for the case of all RangeIndex instances.
When all members of "indexes" are of type RangeIndex: result will be
RangeIndex if possible, Int64Index otherwise. E.g.:
indexes = [RangeIndex(3), RangeIndex(3, 6)] -> RangeIndex(6)
indexes = [RangeIndex(3), RangeIndex(4, 6)] -> Int64Index([0,1,2,4,5])
"""
if not all(isinstance(x, RangeIndex) for x in indexes):
return super()._concat(indexes, name)
start = step = next_ = None
# Filter the empty indexes
non_empty_indexes = [obj for obj in indexes if len(obj)]
for obj in non_empty_indexes:
rng: range = obj._range
if start is None:
# This is set by the first non-empty index
start = rng.start
if step is None and len(rng) > 1:
step = rng.step
elif step is None:
# First non-empty index had only one element
if rng.start == start:
result = Int64Index(np.concatenate([x._values for x in indexes]))
return result.rename(name)
step = rng.start - start
non_consecutive = (step != rng.step and len(rng) > 1) or (
next_ is not None and rng.start != next_
)
if non_consecutive:
result = Int64Index(np.concatenate([x._values for x in indexes]))
return result.rename(name)
if step is not None:
next_ = rng[-1] + step
if non_empty_indexes:
# Get the stop value from "next" or alternatively
# from the last non-empty index
stop = non_empty_indexes[-1].stop if next_ is None else next_
return RangeIndex(start, stop, step).rename(name)
# Here all "indexes" had 0 length, i.e. were empty.
# In this case return an empty range index.
return RangeIndex(0, 0).rename(name)
def __len__(self) -> int:
"""
return the length of the RangeIndex
"""
return len(self._range)
@property
def size(self) -> int:
return len(self)
def __getitem__(self, key):
"""
Conserve RangeIndex type for scalar and slice keys.
"""
if isinstance(key, slice):
new_range = self._range[key]
return self._simple_new(new_range, name=self.name)
elif is_integer(key):
new_key = int(key)
try:
return self._range[new_key]
except IndexError as err:
raise IndexError(
f"index {key} is out of bounds for axis 0 with size {len(self)}"
) from err
elif is_scalar(key):
raise IndexError(
"only integers, slices (`:`), "
"ellipsis (`...`), numpy.newaxis (`None`) "
"and integer or boolean "
"arrays are valid indices"
)
# fall back to Int64Index
return super().__getitem__(key)
@unpack_zerodim_and_defer("__floordiv__")
def __floordiv__(self, other):
if is_integer(other) and other != 0:
if len(self) == 0 or self.start % other == 0 and self.step % other == 0:
start = self.start // other
step = self.step // other
stop = start + len(self) * step
new_range = range(start, stop, step or 1)
return self._simple_new(new_range, name=self.name)
if len(self) == 1:
start = self.start // other
new_range = range(start, start + 1, 1)
return self._simple_new(new_range, name=self.name)
return self._int64index // other
# --------------------------------------------------------------------
# Reductions
def all(self, *args, **kwargs) -> bool:
return 0 not in self._range
def any(self, *args, **kwargs) -> bool:
return any(self._range)
# --------------------------------------------------------------------
def _cmp_method(self, other, op):
if isinstance(other, RangeIndex) and self._range == other._range:
# Both are immutable so if ._range attr. are equal, shortcut is possible
return super()._cmp_method(self, op)
return super()._cmp_method(other, op)
def _arith_method(self, other, op):
"""
Parameters
----------
other : Any
op : callable that accepts 2 params
perform the binary op
"""
if isinstance(other, ABCTimedeltaIndex):
# Defer to TimedeltaIndex implementation
return NotImplemented
elif isinstance(other, (timedelta, np.timedelta64)):
# GH#19333 is_integer evaluated True on timedelta64,
# so we need to catch these explicitly
return op(self._int64index, other)
elif is_timedelta64_dtype(other):
# Must be an np.ndarray; GH#22390
return op(self._int64index, other)
if op in [
operator.pow,
ops.rpow,
operator.mod,
ops.rmod,
ops.rfloordiv,
divmod,
ops.rdivmod,
]:
return op(self._int64index, other)
step = False
if op in [operator.mul, ops.rmul, operator.truediv, ops.rtruediv]:
step = op
other = extract_array(other, extract_numpy=True)
attrs = self._get_attributes_dict()
left, right = self, other
try:
# apply if we have an override
if step:
with np.errstate(all="ignore"):
rstep = step(left.step, right)
# we don't have a representable op
# so return a base index
if not is_integer(rstep) or not rstep:
raise ValueError
else:
rstep = left.step
with np.errstate(all="ignore"):
rstart = op(left.start, right)
rstop = op(left.stop, right)
result = type(self)(rstart, rstop, rstep, **attrs)
# for compat with numpy / Int64Index
# even if we can represent as a RangeIndex, return
# as a Float64Index if we have float-like descriptors
if not all(is_integer(x) for x in [rstart, rstop, rstep]):
result = result.astype("float64")
return result
except (ValueError, TypeError, ZeroDivisionError):
# Defer to Int64Index implementation
return op(self._int64index, other)
# TODO: Do attrs get handled reliably?
|
[
"[email protected]"
] | |
d1659d658ee81928f513e875f7d9f2e78a75d540
|
4b7e282fe480415f5d52c0fc0429f144156190fe
|
/google/ads/googleads/v7/services/services/ad_group_service/transports/base.py
|
ce6b4b5bb906932a8cc9e5393ecafc79ed352104
|
[
"Apache-2.0"
] |
permissive
|
Z2Xsoft/google-ads-python
|
c4750357bb19da91bb3b6bf2fa84bef9d2df36d3
|
1779d52a0446c8afb2437b0a9e103dcb849f5590
|
refs/heads/main
| 2023-08-18T15:22:17.840364 | 2021-09-26T04:08:53 | 2021-09-26T04:08:53 | 410,444,398 | 0 | 0 |
Apache-2.0
| 2021-09-26T04:08:53 | 2021-09-26T03:55:38 | null |
UTF-8
|
Python
| false | false | 3,978 |
py
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import abc
import typing
import pkg_resources
from google import auth
from google.api_core import gapic_v1 # type: ignore
from google.api_core import retry as retries # type: ignore
from google.auth import credentials # type: ignore
from google.ads.googleads.v7.resources.types import ad_group
from google.ads.googleads.v7.services.types import ad_group_service
try:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
gapic_version=pkg_resources.get_distribution("google-ads",).version,
)
except pkg_resources.DistributionNotFound:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo()
class AdGroupServiceTransport(metaclass=abc.ABCMeta):
"""Abstract transport class for AdGroupService."""
AUTH_SCOPES = ("https://www.googleapis.com/auth/adwords",)
def __init__(
self,
*,
host: str = "googleads.googleapis.com",
credentials: credentials.Credentials = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
"""
# Save the hostname. Default to port 443 (HTTPS) if none is specified.
if ":" not in host:
host += ":443"
self._host = host
# If no credentials are provided, then determine the appropriate
# defaults.
if credentials is None:
credentials, _ = auth.default(scopes=self.AUTH_SCOPES)
# Save the credentials.
self._credentials = credentials
# Lifted into its own function so it can be stubbed out during tests.
self._prep_wrapped_messages(client_info)
def _prep_wrapped_messages(self, client_info):
# Precomputed wrapped methods
self._wrapped_methods = {
self.get_ad_group: gapic_v1.method.wrap_method(
self.get_ad_group,
default_timeout=None,
client_info=client_info,
),
self.mutate_ad_groups: gapic_v1.method.wrap_method(
self.mutate_ad_groups,
default_timeout=None,
client_info=client_info,
),
}
@property
def get_ad_group(
self,
) -> typing.Callable[
[ad_group_service.GetAdGroupRequest], ad_group.AdGroup
]:
raise NotImplementedError
@property
def mutate_ad_groups(
self,
) -> typing.Callable[
[ad_group_service.MutateAdGroupsRequest],
ad_group_service.MutateAdGroupsResponse,
]:
raise NotImplementedError
__all__ = ("AdGroupServiceTransport",)
|
[
"[email protected]"
] | |
2a557af36bca6b8dc27982b2f226933bc64feca2
|
acb8e84e3b9c987fcab341f799f41d5a5ec4d587
|
/langs/8/sQ3.py
|
f1249399cdc60251785d96d9fdff6bf4b63c27e5
|
[] |
no_license
|
G4te-Keep3r/HowdyHackers
|
46bfad63eafe5ac515da363e1c75fa6f4b9bca32
|
fb6d391aaecb60ab5c4650d4ae2ddd599fd85db2
|
refs/heads/master
| 2020-08-01T12:08:10.782018 | 2016-11-13T20:45:50 | 2016-11-13T20:45:50 | 73,624,224 | 0 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 486 |
py
|
import sys
def printFunction(lineRemaining):
if lineRemaining[0] == '"' and lineRemaining[-1] == '"':
if len(lineRemaining) > 2:
#data to print
lineRemaining = lineRemaining[1:-1]
print ' '.join(lineRemaining)
else:
print
def main(fileName):
with open(fileName) as f:
for line in f:
data = line.split()
if data[0] == 'sQ3':
printFunction(data[1:])
else:
print 'ERROR'
return
if __name__ == '__main__':
main(sys.argv[1])
|
[
"[email protected]"
] | |
62d51938566b1760ee0a7773969750876072c711
|
a03b30ee77b49e19a72b647e984b98f878c2847a
|
/Anaconda-files/Program_1d.py
|
b629bba7ac959034d527e2162c17ccbb88a23184
|
[
"BSD-2-Clause"
] |
permissive
|
SSalaPla/dynamical-systems-with-applications-using-python
|
d47f46dfbe7195d2446cdee7f874cc3e4a5ab90a
|
c80582ae3559230d12e2aee15f94c465e367fdda
|
refs/heads/master
| 2021-05-03T16:00:31.561907 | 2018-02-05T15:16:13 | 2018-02-05T15:16:13 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 650 |
py
|
# Program 1d: Subplots.
# See Figure 1.15.
import matplotlib.pyplot as plt
import numpy as np
def f(t):
return np.exp(-t) * np.cos(2*np.pi*t)
t1=np.arange(0.0,5.0,0.1)
t2=np.arange(0.0,5.0,0.02)
plt.figure(1)
plt.subplot(211) #subplot(num rows,num cols,fig num)
plt.plot(t1,f(t1),'bo',t2,f(t2),'k',label='damping')
plt.xlabel('time (s)')
plt.ylabel('amplitude (m)')
plt.title('Damped pendulum')
legend = plt.legend(loc='upper center',shadow=True)
plt.subplot(212)
plt.plot(t2, np.cos(2*np.pi*t2),'g--',linewidth=4)
plt.xlabel('time (s)')
plt.ylabel('amplitude (m)')
plt.title('Undamped pendulum')
plt.subplots_adjust(hspace=0.8)
plt.show()
|
[
"[email protected]"
] | |
1e0810823638b12185d64ebe70744a50b7bdcd48
|
717ae7ee216675ba0fb31358000dde3d2fc11c5c
|
/chart_of_accounts_builder/config/desktop.py
|
598a493fd9079f73d008fdb162af58221aa05684
|
[
"MIT"
] |
permissive
|
sihaysistema/chart_of_accounts_builder
|
7cf2bfb23eeb254d89b083dccd146fc60736eb9b
|
23a94ddbdae4a36c6d318e148c47e68a36eb177b
|
refs/heads/master
| 2020-05-31T21:16:47.958250 | 2019-06-06T15:31:48 | 2019-06-06T15:31:48 | 190,494,216 | 1 | 0 | null | 2019-06-06T01:35:37 | 2019-06-06T01:35:37 | null |
UTF-8
|
Python
| false | false | 281 |
py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from frappe import _
def get_data():
return {
"Chart Of Accounts Builder": {
"color": "grey",
"icon": "octicon octicon-file-directory",
"type": "module",
"label": _("Chart Of Accounts Builder")
}
}
|
[
"[email protected]"
] | |
6fd9f40b3a240f53ce47870b44e87e64f7bffccf
|
c9273bbb39b2f9bade816ae0d4d57ba664f599c7
|
/setup.py
|
1858551b34353e1bcc19f247f13c75e79207cc58
|
[] |
no_license
|
alunduil/singularity
|
0eaefdbee20880146cd07fae7445387c16ab861b
|
600f864628743472226755ad0fe7a4c7a0d2ef28
|
refs/heads/master
| 2021-01-21T11:45:16.242749 | 2014-06-22T17:01:03 | 2014-06-22T17:01:03 | 5,211,203 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,874 |
py
|
# Copyright (C) 2012 by Alex Brandt <[email protected]>
#
# singularity is freely distributable under the terms of an MIT-style license.
# See COPYING or http://www.opensource.org/licenses/mit-license.php.
from distutils.core import setup
try:
from singularity import information
from singularity import helpers
except ImportError:
sys.path.append(os.path.abspath(os.path.dirname(__file__)))
from singularity import information
from singularity import helpers
PARAMS = {}
PARAMS["name"] = information.NAME
PARAMS["version"] = information.VERSION
PARAMS["description"] = information.DESCRIPTION
PARAMS["long_description"] = information.LONG_DESCRIPTION
PARAMS["author"] = information.AUTHOR
PARAMS["author_email"] = information.AUTHOR_EMAIL
PARAMS["url"] = information.URL
PARAMS["license"] = information.LICENSE
PARAMS["scripts"] = [
"bin/singularity",
]
PARAMS["packages"] = [
"singularity",
"singularity.configurators",
"singularity.configurators.gentoo",
"singularity.communicators",
"singularity.helpers",
"singularity.parameters",
]
PARAMS["data_files"] = [
("share/doc/{P[name]}-{P[version]}".format(P = PARAMS), [
"README.md",
]),
("share/doc/{P[name]}-{P[version]}/config".format(P = PARAMS), [
"config/singularity.conf",
"config/init.gentoo",
]),
("share/man/man8", [
"doc/man/man8/singularity.8",
"doc/man/man8/singularity-apply.8",
"doc/man/man8/singularity-daemon.8",
]),
("share/man/man5", [
"doc/man/man5/singularity.conf.5",
]),
]
PARAMS["requires"] = [
"daemon",
"Crypto",
]
if helpers.VIRTUAL == "xenU":
PARAMS["requires"].append("xen")
setup(**PARAMS)
|
[
"[email protected]"
] | |
36fe5a2469a8db223fcc5567527737d6653e366c
|
d488f052805a87b5c4b124ca93494bc9b78620f7
|
/google-cloud-sdk/.install/.backup/lib/surface/container/clusters/get_iam_policy.py
|
6e672f702942bf58fceb409a2804549e7be639d2
|
[
"MIT",
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] |
permissive
|
PacktPublishing/DevOps-Fundamentals
|
5ce1fc938db66b420691aa8106ecfb3f9ceb1ace
|
60597e831e08325c7e51e8557591917f7c417275
|
refs/heads/master
| 2023-02-02T04:48:15.346907 | 2023-01-30T08:33:35 | 2023-01-30T08:33:35 | 131,293,311 | 13 | 19 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,625 |
py
|
# Copyright 2018 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Command for getting IAM policies for clusters."""
from googlecloudsdk.calliope import base
@base.Hidden
@base.ReleaseTracks(base.ReleaseTrack.ALPHA)
class GetIAMPolicy(base.Command):
"""Get the IAM policy for a cluster."""
@staticmethod
def Args(parser):
"""Register flags for this command.
Args:
parser: An argparse.ArgumentParser-like object. It is mocked out in order
to capture some information, but behaves like an ArgumentParser.
"""
parser.add_argument('name', help='The name of this cluster.')
def Run(self, args):
"""This is what gets called when the user runs this command.
Args:
args: an argparse namespace. All the arguments that were provided to this
command invocation.
Returns:
Some value that we want to have printed later.
"""
adapter = self.context['api_adapter']
location_get = self.context['location_get']
location = location_get(args)
return adapter.GetIamPolicy(adapter.ParseCluster(args.name, location))
|
[
"[email protected]"
] | |
6161a504a113f3319583c244962a422646113b54
|
5bd1381e5515061b4fdd7284f80f89d0aad3c4e6
|
/www/unicooo/views.py
|
06970d5519d28f80bee6c5af73a4a63d6bec5113
|
[] |
no_license
|
Windsooon/Unicooo-django
|
1c1a7643151dffc15cea6ff94b9a80453d1fcfb2
|
7db3f2807bc4802b686f1a4d6bd6fd5b7436611b
|
refs/heads/master
| 2022-12-21T11:16:22.855365 | 2017-12-09T09:24:01 | 2017-12-09T09:24:01 | 47,067,358 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 195 |
py
|
from django.shortcuts import render
def front_page(request):
return render(request, "frontpage.html")
def public_activities(request):
return render(request, "public_activities.html")
|
[
"[email protected]"
] | |
705a4ea02f2b08df2528ae1bb2b39cb9b998c9bd
|
28ca060efe83304c6174c3411cd9105537adf6bd
|
/fs_image/rpm/tests/test_rpm_metadata.py
|
dba5130b5b6b441078f54df13cb8562ac0f85bf7
|
[
"MIT"
] |
permissive
|
singhaditya28/fs_image
|
6eee93a3663f36894f2e26efef9f2f961f11d76b
|
3d122da48eab8b26e5add6754cc1f91296139c58
|
refs/heads/master
| 2022-09-25T04:52:58.206356 | 2020-06-05T18:27:39 | 2020-06-05T18:29:57 | 269,931,605 | 0 | 0 |
MIT
| 2020-06-06T09:24:29 | 2020-06-06T09:24:28 | null |
UTF-8
|
Python
| false | false | 5,792 |
py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import importlib.resources
import os
import re
import shutil
import unittest
from fs_image.fs_utils import temp_dir
from .temp_repos import get_test_signing_key, temp_repos_steps, Repo, Rpm
from ..rpm_metadata import compare_rpm_versions, RpmMetadata, _compare_values
from fs_image.find_built_subvol import find_built_subvol
class RpmMetadataTestCase(unittest.TestCase):
def _load_canonical_tests(self):
STMT = re.compile(
r"(.*)RPMVERCMP\(([^, ]*) *, *([^, ]*) *, *([^\)]*)\).*")
for line in importlib.resources.open_text(
'fs_image.rpm', 'version-compare-tests').readlines():
m = STMT.match(line)
if m:
yield m.group(2), m.group(3), int(m.group(4))
def test_rpm_metadata_from_subvol(self):
layer_path = os.path.join(os.path.dirname(__file__), 'child-layer')
child_subvol = find_built_subvol(layer_path)
a = RpmMetadata.from_subvol(child_subvol, 'rpm-test-mice')
self.assertEqual(a.name, 'rpm-test-mice')
self.assertEqual(a.epoch, 0)
self.assertEqual(a.version, '0.1')
self.assertEqual(a.release, 'a')
# not installed
with self.assertRaises(RuntimeError):
a = RpmMetadata.from_subvol(child_subvol, 'rpm-test-carrot')
# subvol with no RPM DB
layer_path = os.path.join(os.path.dirname(__file__), 'hello-layer')
hello_subvol = find_built_subvol(layer_path)
with self.assertRaisesRegex(ValueError, ' does not exist$'):
a = RpmMetadata.from_subvol(hello_subvol, 'rpm-test-mice')
def test_rpm_metadata_from_file(self):
with temp_repos_steps(
gpg_signing_key=get_test_signing_key(),
repo_change_steps=[{
'repo': Repo([Rpm('sheep', '0.3.5.beta', 'l33t.deadbeef.777')]),
}],
) as repos_root, temp_dir() as td:
src_rpm_path = repos_root / ('0/repo/repo-pkgs/' +
'rpm-test-sheep-0.3.5.beta-l33t.deadbeef.777.x86_64.rpm')
dst_rpm_path = td / 'arbitrary_unused_name.rpm'
shutil.copy(src_rpm_path, dst_rpm_path)
a = RpmMetadata.from_file(dst_rpm_path)
self.assertEqual(a.name, 'rpm-test-sheep')
self.assertEqual(a.epoch, 0)
self.assertEqual(a.version, '0.3.5.beta')
self.assertEqual(a.release, 'l33t.deadbeef.777')
# non-existent file
with self.assertRaisesRegex(RuntimeError, '^Error querying RPM:'):
a = RpmMetadata.from_file(b'idontexist.rpm')
# missing extension
with self.assertRaisesRegex(ValueError, ' needs to end with .rpm$'):
a = RpmMetadata.from_file(b'idontendwithdotrpm')
def test_rpm_query_arg_check(self):
with self.assertRaisesRegex(ValueError, '^Must pass only '):
RpmMetadata._repo_query(RpmMetadata, b"dbpath", None, b"path")
def test_rpm_compare_versions(self):
# name mismatch
a = RpmMetadata('test-name1', 1, '2', '3')
b = RpmMetadata('test-name2', 1, '2', '3')
with self.assertRaises(ValueError):
compare_rpm_versions(a, b)
# Taste data was generated with:
# rpmdev-vercmp <epoch1> <ver1> <release1> <epoch2> <ver2> <release2>
# which also uses the same Python rpm lib.
#
# This number of test cases is excessive but does show how interesting
# RPM version comparisons can be.
test_evr_data = [
# Non-alphanumeric (except ~) are ignored for equality
((1, '2', '3'), (1, '2', '3'), 0), # 1:2-3 == 1:2-3
((1, ':2>', '3'), (1, '-2-', '3'), 0), # 1::2>-3 == 1:-2--3
((1, '2', '3?'), (1, '2', '?3'), 0), # 1:2-?3 == 1:2-3?
# epoch takes precedence no matter what
((0, '2', '3'), (1, '2', '3'), -1), # 0:2-3 < 1:2-3
((1, '1', '3'), (0, '2', '3'), 1), # 1:1-3 > 0:2-3
# version and release trigger the real comparison rules
((0, '1', '3'), (0, '2', '3'), -1), # 0:1-3 < 0:2-3
((0, '~2', '3'), (0, '1', '3'), -1), # 0:~2-3 < 0:1-3
((0, '~', '3'), (0, '1', '3'), -1), # 0:~-3 < 0:1-3
((0, '1', '3'), (0, '~', '3'), 1), # 0:1-3 > 0:~-3
((0, '^1', '3'), (0, '^', '3'), 1), # 0:^1-3 > 0:^-3
((0, '^', '3'), (0, '^1', '3'), -1), # 0:^-3 < 0:^1-3
((0, '0333', 'b'), (0, '0033', 'b'), 1), # 0:0333-b > 0:0033-b
((0, '0033', 'b'), (0, '0333', 'b'), -1), # 0:0033-b < 0:0333-b
((0, '3', '~~'), (0, '3', '~~~'), 1), # 0:3-~~ > 0:3-~~~
((0, '3', '~~~'), (0, '3', '~~'), -1), # 0:3-~~~ < 0:3-~~
((0, '3', '~~~'), (0, '3', '~~~'), 0), # 0:3-~~~ == 0:3-~~~
((0, 'a2aa', 'b'), (0, 'a2a', 'b'), 1), # 0:a2aa-b > 0:a2a-b
((0, '33', 'b'), (0, 'aaa', 'b'), 1), # 0:33-b > 0:aaa-b
]
for evr1, evr2, expected in test_evr_data:
a = RpmMetadata('test-name', *evr1)
b = RpmMetadata('test-name', *evr2)
self.assertEqual(compare_rpm_versions(a, b),
expected, f'failed: {evr1}, {evr2}, {expected}')
# Test against some more canonical tests. These are derived from
# actual tests used for rpm itself.
for ver1, ver2, expected in self._load_canonical_tests():
self.assertEqual(_compare_values(ver1, ver2),
expected, f'failed: {ver1}, {ver2}, {expected}')
|
[
"[email protected]"
] | |
0f7f483f687b8b4897064f7ac52ff924a951cd9d
|
53fab060fa262e5d5026e0807d93c75fb81e67b9
|
/backup/user_321/ch14_2020_03_02_17_36_10_914758.py
|
e0c3dc8afc80fcd598ed32def9202927e6ac0a30
|
[] |
no_license
|
gabriellaec/desoft-analise-exercicios
|
b77c6999424c5ce7e44086a12589a0ad43d6adca
|
01940ab0897aa6005764fc220b900e4d6161d36b
|
refs/heads/main
| 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 159 |
py
|
import math
def calcula_distancia_do_projetil (v,o,y):
return (math.pow(v,2)/2*9.8)*(1+math.sqrt(1+(2*9.8*y)/math.pow(v,2)*math.sin(o)**2))*(math.sin(2*o))
|
[
"[email protected]"
] | |
2af32f7a4d9ae29e4db70a69549fc1bbab5cd4ac
|
b59f66a9c4b5492b95c767b7ca76cd026f6f572a
|
/aac/metrics/rouge_l.py
|
33b0b9c2183a7a51bf1d84b3ce3d73b265416c3c
|
[] |
no_license
|
Labbeti/dcase2021task6
|
b50f51370af15c241bd9f257920e2df4bc925669
|
2e792749bd9b2a495fa4b870f6190f6fb389fc56
|
refs/heads/main
| 2023-06-11T07:10:50.179348 | 2021-07-05T09:28:11 | 2021-07-05T09:28:11 | 377,414,427 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,048 |
py
|
from rouge_metric import PyRouge
from torch.nn import Module
class RougeL(Module):
def __init__(self):
"""
Recall Oriented Understudy of Gisting Evaluation.
Output values are in range [0, 1]. Higher is better.
Use 'rouge-metric' package as backend.
Original paper: https://www.aclweb.org/anthology/W04-1013.pdf
"""
super().__init__()
self.rouge = PyRouge(rouge_l=True)
def forward(self, hypothesis: list[list[str]], references: list[list[list[str]]]) -> float:
if len(hypothesis) != len(references):
raise ValueError(f'Number of hypothesis and references are different ({len(hypothesis)} != {len(references)}).')
hypothesis_join = [' '.join(hyp) for hyp in hypothesis]
references_join = [[' '.join(ref) for ref in refs] for refs in references]
scores = self.rouge.evaluate(hypotheses=hypothesis_join, multi_references=references_join)
rouge_l_scores = scores['rouge-l']
# 3 scores = Recall r, Precision p, FScore f
# {'r': ..., 'p': ..., 'f': ...}
f_score = rouge_l_scores['f']
return f_score
|
[
"[email protected]"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.