blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
616
| content_id
stringlengths 40
40
| detected_licenses
sequencelengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 777
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 149
values | src_encoding
stringclasses 26
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 3
10.2M
| extension
stringclasses 188
values | content
stringlengths 3
10.2M
| authors
sequencelengths 1
1
| author_id
stringlengths 1
132
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
66cef5cbd17f68249077184c9a1078b3126a3213 | dce4a52986ddccea91fbf937bd89e0ae00b9d046 | /jni-build/jni/include/tensorflow/python/training/server_lib_test.py | f72e96f5af4b264e6fbcdc962ecc0d3a0e21838e | [
"MIT"
] | permissive | Lab603/PicEncyclopedias | 54a641b106b7bb2d2f71b2dacef1e5dbeaf773a6 | 6d39eeb66c63a6f0f7895befc588c9eb1dd105f9 | refs/heads/master | 2022-11-11T13:35:32.781340 | 2018-03-15T05:53:07 | 2018-03-15T05:53:07 | 103,941,664 | 6 | 3 | MIT | 2022-10-28T05:31:37 | 2017-09-18T13:20:47 | C++ | UTF-8 | Python | false | false | 14,369 | py | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tf.GrpcServer."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
import numpy as np
import tensorflow as tf
class GrpcServerTest(tf.test.TestCase):
def testRunStep(self):
server = tf.train.Server.create_local_server()
with tf.Session(server.target) as sess:
c = tf.constant([[2, 1]])
d = tf.constant([[1], [2]])
e = tf.matmul(c, d)
self.assertAllEqual([[4]], sess.run(e))
# TODO(mrry): Add `server.stop()` and `server.join()` when these work.
def testMultipleSessions(self):
server = tf.train.Server.create_local_server()
c = tf.constant([[2, 1]])
d = tf.constant([[1], [2]])
e = tf.matmul(c, d)
sess_1 = tf.Session(server.target)
sess_2 = tf.Session(server.target)
self.assertAllEqual([[4]], sess_1.run(e))
self.assertAllEqual([[4]], sess_2.run(e))
sess_1.close()
sess_2.close()
# TODO(mrry): Add `server.stop()` and `server.join()` when these work.
# Verifies behavior of multiple variables with multiple sessions connecting to
# the same server.
def testSameVariablesNoClear(self):
server = tf.train.Server.create_local_server()
with tf.Session(server.target) as sess_1:
v0 = tf.Variable([[2, 1]], name="v0")
v1 = tf.Variable([[1], [2]], name="v1")
v2 = tf.matmul(v0, v1)
sess_1.run([v0.initializer, v1.initializer])
self.assertAllEqual([[4]], sess_1.run(v2))
with tf.Session(server.target) as sess_2:
new_v0 = tf.get_default_graph().get_tensor_by_name("v0:0")
new_v1 = tf.get_default_graph().get_tensor_by_name("v1:0")
new_v2 = tf.matmul(new_v0, new_v1)
self.assertAllEqual([[4]], sess_2.run(new_v2))
# Verifies behavior of tf.Session.reset().
def testSameVariablesClear(self):
server = tf.train.Server.create_local_server()
# Creates a graph with 2 variables.
v0 = tf.Variable([[2, 1]], name="v0")
v1 = tf.Variable([[1], [2]], name="v1")
v2 = tf.matmul(v0, v1)
# Verifies that both sessions connecting to the same target return
# the same results.
sess_1 = tf.Session(server.target)
sess_2 = tf.Session(server.target)
sess_1.run(tf.initialize_all_variables())
self.assertAllEqual([[4]], sess_1.run(v2))
self.assertAllEqual([[4]], sess_2.run(v2))
# Resets target. sessions abort. Use sess_2 to verify.
tf.Session.reset(server.target)
with self.assertRaises(tf.errors.AbortedError):
self.assertAllEqual([[4]], sess_2.run(v2))
# Connects to the same target. Device memory for the variables would have
# been released, so they will be unitialized.
sess_2 = tf.Session(server.target)
with self.assertRaises(tf.errors.FailedPreconditionError):
sess_2.run(v2)
# Reinitialzes the variables.
sess_2.run(tf.initialize_all_variables())
self.assertAllEqual([[4]], sess_2.run(v2))
sess_2.close()
# Verifies behavior of tf.Session.reset() with multiple containers using
# default container names as defined by the target name.
def testSameVariablesClearContainer(self):
# Starts two servers with different names so they map to different
# resource "containers".
server0 = tf.train.Server({"local0": ["localhost:0"]}, protocol="grpc",
start=True)
server1 = tf.train.Server({"local1": ["localhost:0"]}, protocol="grpc",
start=True)
# Creates a graph with 2 variables.
v0 = tf.Variable(1.0, name="v0")
v1 = tf.Variable(2.0, name="v0")
# Initializes the variables. Verifies that the values are correct.
sess_0 = tf.Session(server0.target)
sess_1 = tf.Session(server1.target)
sess_0.run(v0.initializer)
sess_1.run(v1.initializer)
self.assertAllEqual(1.0, sess_0.run(v0))
self.assertAllEqual(2.0, sess_1.run(v1))
# Resets container "local0". Verifies that v0 is no longer initialized.
tf.Session.reset(server0.target, ["local0"])
sess = tf.Session(server0.target)
with self.assertRaises(tf.errors.FailedPreconditionError):
sess.run(v0)
# Reinitializes v0 for the following test.
sess.run(v0.initializer)
# Verifies that v1 is still valid.
self.assertAllEqual(2.0, sess_1.run(v1))
# Resets container "local1". Verifies that v1 is no longer initialized.
tf.Session.reset(server1.target, ["local1"])
sess = tf.Session(server1.target)
with self.assertRaises(tf.errors.FailedPreconditionError):
sess.run(v1)
# Verifies that v0 is still valid.
sess = tf.Session(server0.target)
self.assertAllEqual(1.0, sess.run(v0))
# Verifies behavior of tf.Session.reset() with multiple containers using
# tf.container.
def testMultipleContainers(self):
with tf.container("test0"):
v0 = tf.Variable(1.0, name="v0")
with tf.container("test1"):
v1 = tf.Variable(2.0, name="v0")
server = tf.train.Server.create_local_server()
sess = tf.Session(server.target)
sess.run(tf.initialize_all_variables())
self.assertAllEqual(1.0, sess.run(v0))
self.assertAllEqual(2.0, sess.run(v1))
# Resets container. Session aborts.
tf.Session.reset(server.target, ["test0"])
with self.assertRaises(tf.errors.AbortedError):
sess.run(v1)
# Connects to the same target. Device memory for the v0 would have
# been released, so it will be unitialized. But v1 should still
# be valid.
sess = tf.Session(server.target)
with self.assertRaises(tf.errors.FailedPreconditionError):
sess.run(v0)
self.assertAllEqual(2.0, sess.run(v1))
# Verifies various reset failures.
def testResetFails(self):
# Creates variable with container name.
with tf.container("test0"):
v0 = tf.Variable(1.0, name="v0")
# Creates variable with default container.
v1 = tf.Variable(2.0, name="v1")
# Verifies resetting the non-existent target returns error.
with self.assertRaises(tf.errors.NotFoundError):
tf.Session.reset("nonexistent", ["test0"])
# Verifies resetting with config.
# Verifies that resetting target with no server times out.
with self.assertRaises(tf.errors.DeadlineExceededError):
tf.Session.reset("grpc://localhost:0", ["test0"],
config=tf.ConfigProto(operation_timeout_in_ms=5))
# Verifies no containers are reset with non-existent container.
server = tf.train.Server.create_local_server()
sess = tf.Session(server.target)
sess.run(tf.initialize_all_variables())
self.assertAllEqual(1.0, sess.run(v0))
self.assertAllEqual(2.0, sess.run(v1))
# No container is reset, but the server is reset.
tf.Session.reset(server.target, ["test1"])
# Verifies that both variables are still valid.
sess = tf.Session(server.target)
self.assertAllEqual(1.0, sess.run(v0))
self.assertAllEqual(2.0, sess.run(v1))
def testLargeConstant(self):
server = tf.train.Server.create_local_server()
with tf.Session(server.target) as sess:
const_val = np.empty([10000, 3000], dtype=np.float32)
const_val.fill(0.5)
c = tf.constant(const_val)
shape_t = tf.shape(c)
self.assertAllEqual([10000, 3000], sess.run(shape_t))
def testLargeFetch(self):
server = tf.train.Server.create_local_server()
with tf.Session(server.target) as sess:
c = tf.fill([10000, 3000], 0.5)
expected_val = np.empty([10000, 3000], dtype=np.float32)
expected_val.fill(0.5)
self.assertAllEqual(expected_val, sess.run(c))
def testLargeFeed(self):
server = tf.train.Server.create_local_server()
with tf.Session(server.target) as sess:
feed_val = np.empty([10000, 3000], dtype=np.float32)
feed_val.fill(0.5)
p = tf.placeholder(tf.float32, shape=[10000, 3000])
min_t = tf.reduce_min(p)
max_t = tf.reduce_max(p)
min_val, max_val = sess.run([min_t, max_t], feed_dict={p: feed_val})
self.assertEqual(0.5, min_val)
self.assertEqual(0.5, max_val)
def testCloseCancelsBlockingOperation(self):
server = tf.train.Server.create_local_server()
sess = tf.Session(server.target)
q = tf.FIFOQueue(10, [tf.float32])
enqueue_op = q.enqueue(37.0)
dequeue_t = q.dequeue()
sess.run(enqueue_op)
sess.run(dequeue_t)
def blocking_dequeue():
with self.assertRaises(tf.errors.CancelledError):
sess.run(dequeue_t)
blocking_thread = self.checkedThread(blocking_dequeue)
blocking_thread.start()
time.sleep(0.5)
sess.close()
blocking_thread.join()
def testSetConfiguration(self):
config = tf.ConfigProto(
gpu_options=tf.GPUOptions(per_process_gpu_memory_fraction=0.1))
# Configure a server using the default local server options.
server = tf.train.Server.create_local_server(config=config, start=False)
self.assertEqual(
0.1,
server.server_def.default_session_config
.gpu_options.per_process_gpu_memory_fraction)
# Configure a server using an explicit ServerDefd with an
# overridden config.
cluster_def = tf.train.ClusterSpec(
{"localhost": ["localhost:0"]}).as_cluster_def()
server_def = tf.train.ServerDef(
cluster=cluster_def, job_name="localhost", task_index=0,
protocol="grpc")
server = tf.train.Server(server_def, config=config, start=False)
self.assertEqual(
0.1,
server.server_def.default_session_config
.gpu_options.per_process_gpu_memory_fraction)
def testInvalidHostname(self):
with self.assertRaisesRegexp(tf.errors.InvalidArgumentError, "port"):
_ = tf.train.Server({"local": ["localhost"]},
job_name="local",
task_index=0)
def testInteractiveSession(self):
server = tf.train.Server.create_local_server()
# TODO(b/29900832): Remove this assertion when the bug is fixed.
a = tf.constant(1.0)
with self.assertRaisesRegexp(tf.errors.UnimplementedError, "pruned"):
sess = tf.InteractiveSession(target=server.target)
sess.run(a)
# TODO(b/29900832): The following code fails (without the unimplemented
# check in `tensorflow::MasterSession`):
# a = tf.constant(1.0)
# b = tf.constant(2.0)
# self.assertEqual(1.0, sess.run(a))
# self.assertEqual(2.0, sess.run(b))
class ServerDefTest(tf.test.TestCase):
def testLocalServer(self):
cluster_def = tf.train.ClusterSpec(
{"local": ["localhost:2222"]}).as_cluster_def()
server_def = tf.train.ServerDef(
cluster=cluster_def, job_name="local", task_index=0, protocol="grpc")
self.assertProtoEquals("""
cluster {
job { name: 'local' tasks { key: 0 value: 'localhost:2222' } }
}
job_name: 'local' task_index: 0 protocol: 'grpc'
""", server_def)
# Verifies round trip from Proto->Spec->Proto is correct.
cluster_spec = tf.train.ClusterSpec(cluster_def)
self.assertProtoEquals(cluster_def, cluster_spec.as_cluster_def())
def testTwoProcesses(self):
cluster_def = tf.train.ClusterSpec(
{"local": ["localhost:2222", "localhost:2223"]}).as_cluster_def()
server_def = tf.train.ServerDef(
cluster=cluster_def, job_name="local", task_index=1, protocol="grpc")
self.assertProtoEquals("""
cluster {
job { name: 'local' tasks { key: 0 value: 'localhost:2222' }
tasks { key: 1 value: 'localhost:2223' } }
}
job_name: 'local' task_index: 1 protocol: 'grpc'
""", server_def)
# Verifies round trip from Proto->Spec->Proto is correct.
cluster_spec = tf.train.ClusterSpec(cluster_def)
self.assertProtoEquals(cluster_def, cluster_spec.as_cluster_def())
def testTwoJobs(self):
cluster_def = tf.train.ClusterSpec(
{"ps": ["ps0:2222", "ps1:2222"],
"worker": ["worker0:2222", "worker1:2222", "worker2:2222"]}
).as_cluster_def()
server_def = tf.train.ServerDef(
cluster=cluster_def, job_name="worker", task_index=2, protocol="grpc")
self.assertProtoEquals("""
cluster {
job { name: 'ps' tasks { key: 0 value: 'ps0:2222' }
tasks { key: 1 value: 'ps1:2222' } }
job { name: 'worker' tasks { key: 0 value: 'worker0:2222' }
tasks { key: 1 value: 'worker1:2222' }
tasks { key: 2 value: 'worker2:2222' } }
}
job_name: 'worker' task_index: 2 protocol: 'grpc'
""", server_def)
# Verifies round trip from Proto->Spec->Proto is correct.
cluster_spec = tf.train.ClusterSpec(cluster_def)
self.assertProtoEquals(cluster_def, cluster_spec.as_cluster_def())
def testClusterSpec(self):
cluster_spec = tf.train.ClusterSpec(
{"ps": ["ps0:2222", "ps1:2222"],
"worker": ["worker0:2222", "worker1:2222", "worker2:2222"]})
expected_proto = """
job { name: 'ps' tasks { key: 0 value: 'ps0:2222' }
tasks { key: 1 value: 'ps1:2222' } }
job { name: 'worker' tasks { key: 0 value: 'worker0:2222' }
tasks { key: 1 value: 'worker1:2222' }
tasks { key: 2 value: 'worker2:2222' } }
"""
self.assertProtoEquals(expected_proto, cluster_spec.as_cluster_def())
self.assertProtoEquals(
expected_proto, tf.train.ClusterSpec(cluster_spec).as_cluster_def())
self.assertProtoEquals(
expected_proto,
tf.train.ClusterSpec(cluster_spec.as_cluster_def()).as_cluster_def())
self.assertProtoEquals(
expected_proto,
tf.train.ClusterSpec(cluster_spec.as_dict()).as_cluster_def())
if __name__ == "__main__":
tf.test.main()
| [
"[email protected]"
] | |
1d1f0142466d34eff9b75716ab2c1484f3656a7e | 50fb25631cdc03a868f09061e76f4dedf85f2d3f | /crawler_sys/site_crawler/crawler_sogou.py | a5329fd56d4cff00cd248b908d874e33674e74c5 | [] | no_license | silade/crawler | 20a88c0eb6471f79a5d5daf947dcbff681d11e6e | fbfe3c4feca8be61186aec986b600b36f513f7f4 | refs/heads/main | 2023-03-10T10:06:21.097103 | 2021-02-19T16:00:45 | 2021-02-19T16:00:45 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,663 | py | # -*- coding: utf-8 -*-
"""
Created on Wed Aug 22 09:30:20 2018
@author: fangyucheng
"""
import time
import requests
from bs4 import BeautifulSoup
from crawler.crawler_sys.utils.trans_strtime_to_timestamp import trans_strtime_to_timestamp
cookie = ('YYID=2FFBDAA6D4FBA37438F4067C8123E98B; IMEVER=8.5.0.1322;'
'SUID=3D03FF723865860A59795A5F000BB71F;'
'SUV=00C039A172FF033D5993ADBD770E7410; usid=lF0F7il0yWbXF5c9;'
'IPLOC=CN1100; sct=11; SMYUV=1512954490386200;'
'ad=19fxxkllll2zKxvnlllllVHr6$UllllltsDRlyllll9llllljgDll5@@@@@@@@@@;'
'SNUID=D0DE5A671A1E68C31FB628911B8277A5; wuid=AAGPcSphIAAAAAqLE2OSTQgAGwY=;'
'UM_distinctid=16449b02797449-0c5d9293f4a833-143f7040-1fa400-16449b02799881;'
'CXID=794EC592A14CE76F5DF3F3A3BDDDD787;'
'ld=Kyllllllll2bWX10QTIdJOHDsvSbWX1uK94Vhkllll9lllllVklll5@@@@@@@@@@;'
'cd=1534754086&17502a3f56c02f72dfd43a17cbb19663;'
'rd=Vyllllllll2bBEqoQLWCNCHfKv2bWX1uzX0atkllllwllllRVllll5@@@@@@@@@@;'
'LSTMV=173%2C72; LCLKINT=1570')
headers = {'Host': 'news.sogou.com',
'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:61.0) Gecko/20100101 Firefox/61.0',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Accept-Language': 'zh-CN,zh;q=0.8,zh-TW;q=0.7,zh-HK;q=0.5,en-US;q=0.3,en;q=0.2',
'Accept-Encoding': 'gzip, deflate',
'Cookie': cookie,
'Connection': 'keep-alive',
'Upgrade-Insecure-Requests': '1',
'Cache-Control': 'max-age=0'}
def sogou_info_page(keyword):
result_lst = []
for page_num in range(1,11):
search_url = 'http://news.sogou.com/news?&query='+keyword+'&page='+str(page_num)
get_page = requests.get(search_url, headers=headers)
page = get_page.text
soup = BeautifulSoup(page, 'html.parser')
news_lst = soup.find_all('div', {'class': 'vrwrap'})
for line in news_lst:
try:
title = line.div.h3.a.text
url = line.div.h3.a['href']
source_and_release_time = line.find('p', {'class': 'news-from'}).text
source_and_release_time_lst = source_and_release_time.split('\xa0')
source = source_and_release_time_lst[0]
release_time_str = source_and_release_time_lst[-1]
release_time = trans_strtime_to_timestamp(release_time_str)
try:
content = line.find('span').text
except:
print('no content at %s' % title)
content = 'missing'
fetch_time = int(time.time()*1000)
try:
similar_news = line.find('a', {'id': 'news_similar'}).text
except:
print('no similar news at %s' % title)
similar_news = 'missing'
news_info = {'title': title,
'url': url,
'source': source,
'release_time': release_time,
'fetch_time': fetch_time,
'content': content,
'similar_news': similar_news,
'keyword': keyword}
result_lst.append(news_info)
print('get data at page %s' % page_num)
except:
('the error occured at position %s' % news_lst.index(line))
return result_lst
if __name__=='__main__':
keyword = '中超'
test_sogou = sogou_info_page(keyword)
| [
"[email protected]"
] | |
b8b00509599fec72275e7b0df844db765b98d0f4 | bb313586d9894a6b1a985d2f8b0a1f8e62907481 | /videocap1.py | 3c449b3f6fcfcb91ea2de0cc70bd2b53eb3c944c | [] | no_license | HelloDivyani/OpenCV | c3538fc8e8985fcbb24bf951f16977b1d23e93a5 | 96fa8aa563393cfbb15913fd0df863c891d00717 | refs/heads/master | 2021-01-12T03:45:35.227275 | 2016-11-10T12:14:29 | 2016-11-10T12:14:29 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 303 | py | import numpy as np
import cv2
video_path = ""
cap = cv2.VideoCapture(video_path)
while(True):
ret, frame = cap.read()
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
cv2.imshow('frame',gray)
if cv2.waitKey(0) & 0xFF == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
| [
"[email protected]"
] | |
75d5a74d1e78fae22fe705de6e87225c93e25cdc | c6f15aa103de030f7eea6c1aaf6e7ad0ec88dbc1 | /add/features/10_binmap/viewer/app/gui/MainFrame.py | fbff879a00f19d0ac8a16c8b54567d556f59fbf3 | [] | no_license | sysdeep/dcat | 6f3478348113b0d1206f82456f5bd80431282daf | f8c801173ace4447018c3034c56254ab1a6d4089 | refs/heads/master | 2023-05-03T16:04:28.027335 | 2023-04-17T15:04:04 | 2023-04-17T15:04:04 | 320,551,696 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 696 | py | # -*- coding: utf-8 -*-
from PyQt5.QtWidgets import QApplication, QMainWindow, QLabel, QHBoxLayout, QVBoxLayout, QWidget, QGroupBox, QGridLayout
from app.shared import get_storage
from .VolumeInfo import VolumeInfo
from .explorer.Explorer import Explorer
class MainFrame(QWidget):
def __init__(self, parent=None):
super(MainFrame, self).__init__(parent)
layout = QVBoxLayout()
self.setLayout(layout)
self.__volume_info = VolumeInfo()
self.__explorer = Explorer()
layout.addWidget(self.__volume_info)
layout.addWidget(self.__explorer)
def start(self):
storage = get_storage()
self.__volume_info.set_info(storage.volume.volume_header)
self.__explorer.show_root() | [
"[email protected]"
] | |
369ccaa36c9e88ee6f2b28a35a7d5edc5337d3e5 | eedde715576cbbc195c3f6049636b7115895b138 | /pandas_doit/pandas_doit_graph/pandas_doit_graphes.py | 8dbe02409cf10365758de802b50cd103399f2c54 | [] | no_license | azegun/workspace_python | e75de706c08edfe7c5c8d88e8b4679578d6aa68f | 8edf1683a57e468a2e665be0092bc516a28571fd | refs/heads/master | 2023-07-05T10:42:07.955389 | 2021-08-13T03:27:52 | 2021-08-13T03:27:52 | 388,668,245 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 923 | py | import seaborn as sns
import matplotlib.pyplot as plt
tips = sns.load_dataset('tips')
print(tips)
# 막대 그래프
fig = plt.figure()
axes1 = fig.add_subplot(1, 1, 1)
axes1.hist(tips['total_bill'], bins=10) # bins 지정시 x축의 간격을 10으로 조정
axes1.set_title('Histogram of Total Bill')
axes1.set_xlabel('Frequency')
axes1.set_ylabel('Total Bill')
# 산계형 그래프
scatter_plot = plt.figure()
axes1 = scatter_plot.add_subplot(1, 1, 1)
axes1.scatter(tips['total_bill'], tips['tip'])
axes1.set_title('Scatterplot of Total Bill Vs Tip')
axes1.set_xlabel('Total Bill')
axes1.set_ylabel('Tip')
boxplot = plt.figure()
axes1 = boxplot.add_subplot(1, 1, 1)
axes1.boxplot([tips[tips['sex'] == 'Female']['tip'],
tips[tips['sex'] == 'Male']['tip']],
labels=['Female', 'Male'])
axes1.set_xlabel('Sex')
axes1.set_ylabel('Tip')
axes1.set_title('Boxplot of Tips by Sex')
plt.show()
| [
"[email protected]"
] | |
4497ef2a39a12ced95825c28aeb9e9f4ef2b860a | 45e34a88f9a70f92b17d9e037f7d719dacc94176 | /devel/lib/python2.7/dist-packages/opencv_apps/srv/_FaceRecognitionTrain.py | c4936fec476a88897b452fd98a7dc58589c2d3fc | [] | no_license | lipilian/ROS-RealSense | 7f9c40e056b8c9704eee7584cab1bef87874bb59 | df1a717fe78d7d994a7b743911ba4940dfc97c8a | refs/heads/master | 2020-12-15T05:41:06.900613 | 2020-02-09T22:38:07 | 2020-02-09T22:38:07 | 235,009,699 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 19,116 | py | # This Python file uses the following encoding: utf-8
"""autogenerated by genpy from opencv_apps/FaceRecognitionTrainRequest.msg. Do not edit."""
import sys
python3 = True if sys.hexversion > 0x03000000 else False
import genpy
import struct
import std_msgs.msg
import opencv_apps.msg
import sensor_msgs.msg
class FaceRecognitionTrainRequest(genpy.Message):
_md5sum = "ba188b4bf792edbaf69c7f296a16e0ec"
_type = "opencv_apps/FaceRecognitionTrainRequest"
_has_header = False #flag to mark the presence of a Header object
_full_text = """sensor_msgs/Image[] images
Rect[] rects
string[] labels
================================================================================
MSG: sensor_msgs/Image
# This message contains an uncompressed image
# (0, 0) is at top-left corner of image
#
Header header # Header timestamp should be acquisition time of image
# Header frame_id should be optical frame of camera
# origin of frame should be optical center of cameara
# +x should point to the right in the image
# +y should point down in the image
# +z should point into to plane of the image
# If the frame_id here and the frame_id of the CameraInfo
# message associated with the image conflict
# the behavior is undefined
uint32 height # image height, that is, number of rows
uint32 width # image width, that is, number of columns
# The legal values for encoding are in file src/image_encodings.cpp
# If you want to standardize a new string format, join
# [email protected] and send an email proposing a new encoding.
string encoding # Encoding of pixels -- channel meaning, ordering, size
# taken from the list of strings in include/sensor_msgs/image_encodings.h
uint8 is_bigendian # is this data bigendian?
uint32 step # Full row length in bytes
uint8[] data # actual matrix data, size is (step * rows)
================================================================================
MSG: std_msgs/Header
# Standard metadata for higher-level stamped data types.
# This is generally used to communicate timestamped data
# in a particular coordinate frame.
#
# sequence ID: consecutively increasing ID
uint32 seq
#Two-integer timestamp that is expressed as:
# * stamp.sec: seconds (stamp_secs) since epoch (in Python the variable is called 'secs')
# * stamp.nsec: nanoseconds since stamp_secs (in Python the variable is called 'nsecs')
# time-handling sugar is provided by the client library
time stamp
#Frame this data is associated with
# 0: no frame
# 1: global frame
string frame_id
================================================================================
MSG: opencv_apps/Rect
# opencv Rect data type, x-y is center point
float64 x
float64 y
float64 width
float64 height
"""
__slots__ = ['images','rects','labels']
_slot_types = ['sensor_msgs/Image[]','opencv_apps/Rect[]','string[]']
def __init__(self, *args, **kwds):
"""
Constructor. Any message fields that are implicitly/explicitly
set to None will be assigned a default value. The recommend
use is keyword arguments as this is more robust to future message
changes. You cannot mix in-order arguments and keyword arguments.
The available fields are:
images,rects,labels
:param args: complete set of field values, in .msg order
:param kwds: use keyword arguments corresponding to message field names
to set specific fields.
"""
if args or kwds:
super(FaceRecognitionTrainRequest, self).__init__(*args, **kwds)
#message fields cannot be None, assign default values for those that are
if self.images is None:
self.images = []
if self.rects is None:
self.rects = []
if self.labels is None:
self.labels = []
else:
self.images = []
self.rects = []
self.labels = []
def _get_types(self):
"""
internal API method
"""
return self._slot_types
def serialize(self, buff):
"""
serialize message into buffer
:param buff: buffer, ``StringIO``
"""
try:
length = len(self.images)
buff.write(_struct_I.pack(length))
for val1 in self.images:
_v1 = val1.header
buff.write(_get_struct_I().pack(_v1.seq))
_v2 = _v1.stamp
_x = _v2
buff.write(_get_struct_2I().pack(_x.secs, _x.nsecs))
_x = _v1.frame_id
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
_x = val1
buff.write(_get_struct_2I().pack(_x.height, _x.width))
_x = val1.encoding
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
_x = val1
buff.write(_get_struct_BI().pack(_x.is_bigendian, _x.step))
_x = val1.data
length = len(_x)
# - if encoded as a list instead, serialize as bytes instead of string
if type(_x) in [list, tuple]:
buff.write(struct.pack('<I%sB'%length, length, *_x))
else:
buff.write(struct.pack('<I%ss'%length, length, _x))
length = len(self.rects)
buff.write(_struct_I.pack(length))
for val1 in self.rects:
_x = val1
buff.write(_get_struct_4d().pack(_x.x, _x.y, _x.width, _x.height))
length = len(self.labels)
buff.write(_struct_I.pack(length))
for val1 in self.labels:
length = len(val1)
if python3 or type(val1) == unicode:
val1 = val1.encode('utf-8')
length = len(val1)
buff.write(struct.pack('<I%ss'%length, length, val1))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize(self, str):
"""
unpack serialized message in str into this message instance
:param str: byte array of serialized message, ``str``
"""
try:
if self.images is None:
self.images = None
if self.rects is None:
self.rects = None
end = 0
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
self.images = []
for i in range(0, length):
val1 = sensor_msgs.msg.Image()
_v3 = val1.header
start = end
end += 4
(_v3.seq,) = _get_struct_I().unpack(str[start:end])
_v4 = _v3.stamp
_x = _v4
start = end
end += 8
(_x.secs, _x.nsecs,) = _get_struct_2I().unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
_v3.frame_id = str[start:end].decode('utf-8')
else:
_v3.frame_id = str[start:end]
_x = val1
start = end
end += 8
(_x.height, _x.width,) = _get_struct_2I().unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
val1.encoding = str[start:end].decode('utf-8')
else:
val1.encoding = str[start:end]
_x = val1
start = end
end += 5
(_x.is_bigendian, _x.step,) = _get_struct_BI().unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
val1.data = str[start:end]
self.images.append(val1)
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
self.rects = []
for i in range(0, length):
val1 = opencv_apps.msg.Rect()
_x = val1
start = end
end += 32
(_x.x, _x.y, _x.width, _x.height,) = _get_struct_4d().unpack(str[start:end])
self.rects.append(val1)
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
self.labels = []
for i in range(0, length):
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
val1 = str[start:end].decode('utf-8')
else:
val1 = str[start:end]
self.labels.append(val1)
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
def serialize_numpy(self, buff, numpy):
"""
serialize message with numpy array types into buffer
:param buff: buffer, ``StringIO``
:param numpy: numpy python module
"""
try:
length = len(self.images)
buff.write(_struct_I.pack(length))
for val1 in self.images:
_v5 = val1.header
buff.write(_get_struct_I().pack(_v5.seq))
_v6 = _v5.stamp
_x = _v6
buff.write(_get_struct_2I().pack(_x.secs, _x.nsecs))
_x = _v5.frame_id
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
_x = val1
buff.write(_get_struct_2I().pack(_x.height, _x.width))
_x = val1.encoding
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
_x = val1
buff.write(_get_struct_BI().pack(_x.is_bigendian, _x.step))
_x = val1.data
length = len(_x)
# - if encoded as a list instead, serialize as bytes instead of string
if type(_x) in [list, tuple]:
buff.write(struct.pack('<I%sB'%length, length, *_x))
else:
buff.write(struct.pack('<I%ss'%length, length, _x))
length = len(self.rects)
buff.write(_struct_I.pack(length))
for val1 in self.rects:
_x = val1
buff.write(_get_struct_4d().pack(_x.x, _x.y, _x.width, _x.height))
length = len(self.labels)
buff.write(_struct_I.pack(length))
for val1 in self.labels:
length = len(val1)
if python3 or type(val1) == unicode:
val1 = val1.encode('utf-8')
length = len(val1)
buff.write(struct.pack('<I%ss'%length, length, val1))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize_numpy(self, str, numpy):
"""
unpack serialized message in str into this message instance using numpy for array types
:param str: byte array of serialized message, ``str``
:param numpy: numpy python module
"""
try:
if self.images is None:
self.images = None
if self.rects is None:
self.rects = None
end = 0
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
self.images = []
for i in range(0, length):
val1 = sensor_msgs.msg.Image()
_v7 = val1.header
start = end
end += 4
(_v7.seq,) = _get_struct_I().unpack(str[start:end])
_v8 = _v7.stamp
_x = _v8
start = end
end += 8
(_x.secs, _x.nsecs,) = _get_struct_2I().unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
_v7.frame_id = str[start:end].decode('utf-8')
else:
_v7.frame_id = str[start:end]
_x = val1
start = end
end += 8
(_x.height, _x.width,) = _get_struct_2I().unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
val1.encoding = str[start:end].decode('utf-8')
else:
val1.encoding = str[start:end]
_x = val1
start = end
end += 5
(_x.is_bigendian, _x.step,) = _get_struct_BI().unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
val1.data = str[start:end]
self.images.append(val1)
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
self.rects = []
for i in range(0, length):
val1 = opencv_apps.msg.Rect()
_x = val1
start = end
end += 32
(_x.x, _x.y, _x.width, _x.height,) = _get_struct_4d().unpack(str[start:end])
self.rects.append(val1)
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
self.labels = []
for i in range(0, length):
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
val1 = str[start:end].decode('utf-8')
else:
val1 = str[start:end]
self.labels.append(val1)
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
_struct_I = genpy.struct_I
def _get_struct_I():
global _struct_I
return _struct_I
_struct_4d = None
def _get_struct_4d():
global _struct_4d
if _struct_4d is None:
_struct_4d = struct.Struct("<4d")
return _struct_4d
_struct_2I = None
def _get_struct_2I():
global _struct_2I
if _struct_2I is None:
_struct_2I = struct.Struct("<2I")
return _struct_2I
_struct_BI = None
def _get_struct_BI():
global _struct_BI
if _struct_BI is None:
_struct_BI = struct.Struct("<BI")
return _struct_BI
# This Python file uses the following encoding: utf-8
"""autogenerated by genpy from opencv_apps/FaceRecognitionTrainResponse.msg. Do not edit."""
import sys
python3 = True if sys.hexversion > 0x03000000 else False
import genpy
import struct
class FaceRecognitionTrainResponse(genpy.Message):
_md5sum = "14d6fca830116fb9833d983a296f00ed"
_type = "opencv_apps/FaceRecognitionTrainResponse"
_has_header = False #flag to mark the presence of a Header object
_full_text = """bool ok
string error
"""
__slots__ = ['ok','error']
_slot_types = ['bool','string']
def __init__(self, *args, **kwds):
"""
Constructor. Any message fields that are implicitly/explicitly
set to None will be assigned a default value. The recommend
use is keyword arguments as this is more robust to future message
changes. You cannot mix in-order arguments and keyword arguments.
The available fields are:
ok,error
:param args: complete set of field values, in .msg order
:param kwds: use keyword arguments corresponding to message field names
to set specific fields.
"""
if args or kwds:
super(FaceRecognitionTrainResponse, self).__init__(*args, **kwds)
#message fields cannot be None, assign default values for those that are
if self.ok is None:
self.ok = False
if self.error is None:
self.error = ''
else:
self.ok = False
self.error = ''
def _get_types(self):
"""
internal API method
"""
return self._slot_types
def serialize(self, buff):
"""
serialize message into buffer
:param buff: buffer, ``StringIO``
"""
try:
buff.write(_get_struct_B().pack(self.ok))
_x = self.error
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize(self, str):
"""
unpack serialized message in str into this message instance
:param str: byte array of serialized message, ``str``
"""
try:
end = 0
start = end
end += 1
(self.ok,) = _get_struct_B().unpack(str[start:end])
self.ok = bool(self.ok)
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.error = str[start:end].decode('utf-8')
else:
self.error = str[start:end]
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
def serialize_numpy(self, buff, numpy):
"""
serialize message with numpy array types into buffer
:param buff: buffer, ``StringIO``
:param numpy: numpy python module
"""
try:
buff.write(_get_struct_B().pack(self.ok))
_x = self.error
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize_numpy(self, str, numpy):
"""
unpack serialized message in str into this message instance using numpy for array types
:param str: byte array of serialized message, ``str``
:param numpy: numpy python module
"""
try:
end = 0
start = end
end += 1
(self.ok,) = _get_struct_B().unpack(str[start:end])
self.ok = bool(self.ok)
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.error = str[start:end].decode('utf-8')
else:
self.error = str[start:end]
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
_struct_I = genpy.struct_I
def _get_struct_I():
global _struct_I
return _struct_I
_struct_B = None
def _get_struct_B():
global _struct_B
if _struct_B is None:
_struct_B = struct.Struct("<B")
return _struct_B
class FaceRecognitionTrain(object):
_type = 'opencv_apps/FaceRecognitionTrain'
_md5sum = 'c47a3ceb75cbe248d69217439e66a8e2'
_request_class = FaceRecognitionTrainRequest
_response_class = FaceRecognitionTrainResponse
| [
"[email protected]"
] | |
fa6054203e5f3135b3d56a0bc695a47469cac9a5 | cb3bce599e657188c30366adb0af3007ff9b8f96 | /src/note/test_pachongbaidu.py | 47392a7ab5c0759490c9e2e271a9216663c6e7ea | [] | no_license | skk4/python_study | 534339e6c378d686c29af6d81429c472fca19d6d | 4bdd2a50f4bdfd28fdb89a881cb2ebb9eac26987 | refs/heads/master | 2021-01-01T04:36:52.037184 | 2017-12-08T01:04:27 | 2017-12-08T01:04:27 | 97,207,719 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 839 | py | # -*- coding:utf-8 -*-
import urllib
import urllib2
import json
while 1:
content = raw_input(">:")
headers = {
'Referer': 'http://fanyi.baidu.com/?aldtype=16047/',
'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3112.113 Safari/537.36'
}
data = {}
data['from'] = 'en'
data['to'] = 'zh'
data['query'] = content
data['transtype'] = 'translang'
data['simple_means_flag'] = '3'
url = 'http://fanyi.baidu.com/v2transapi'
values = urllib.urlencode(data)
rq = urllib2.Request(url, values, headers)
fd = urllib2.urlopen(rq)
#print fd.getcode()
html = fd.read()
#print html
#print html
dst = json.loads(html)
print dst['trans_result']['data'][0]['dst']
| [
"[email protected]"
] | |
36e3c9586af6106c678d5bcac19e2ab7d0f50adc | 44e8334e1b17fda7f60d9760f59868a9227e2ab0 | /ML/ch10_9.py | a477bc9725a42a97d551bd6ada02ecd600ffca8d | [] | no_license | MysteriousSonOfGod/python-3 | 47c2aa69a84ba78876c74bc6f2e7e6f3093df1e2 | a303a5284c40f3cb96a8082a1f5ed80773b66336 | refs/heads/master | 2023-02-16T18:21:46.153388 | 2021-01-13T10:55:14 | 2021-01-13T10:55:14 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,853 | py | import pandas as pd
import numpy as np
import mglearn
import matplotlib as mpl
import matplotlib.pyplot as plt
import sys, os
sys.path.append(os.path.dirname(os.path.abspath(os.path.dirname(__file__))))
import images.image
# 9. 두 개의 클래스를 가진 2차원 데이터셋 make_moons
from sklearn.datasets import make_moons
X, y = make_moons(n_samples=200, noise=0.05, random_state=0)
print("X.shape: {}".format(X.shape))
print("y.shape: {}".format(y.shape))
print("X 타입: {}".format(type(X)))
print("y 타입: {}".format(type(y)))
print(X[:5], y[:5])
###############################################################################
# 1. 타깃값으로 군집 평가하기 : 군집 알고리즘의 결과를 실제 정답 클러스터와 비교하여 평가할 수 있는 지표
# 1. ARI (adjusted rand index)
# ARI : 1(최적일 때)와 0(무작위로 분류될 때)
# 2. NMI (normalized mutual information)
#
from sklearn.preprocessing import StandardScaler
from sklearn.cluster import KMeans
from sklearn.cluster import AgglomerativeClustering
from sklearn.cluster import DBSCAN
scaler = StandardScaler()
scaler.fit(X)
X_scaled = scaler.transform(X)
fig, axes = plt.subplots(1, 4, figsize=(15, 3), subplot_kw={'xticks':(), 'yticks':()})
# 3가지 알고리즘들 리스트
algos = [KMeans(n_clusters=2), AgglomerativeClustering(n_clusters=2), DBSCAN()]
random_state = np.random.RandomState(seed=0)
random_clusters = random_state.randint(low=0, high=2, size=len(X))
# 무작위로 할당한 클러스터
from sklearn.metrics.cluster import adjusted_rand_score
axes[0].scatter(X_scaled[:, 0], X_scaled[:, 1], c=random_clusters, cmap=mglearn.cm3, s=60, edgecolors='black')
axes[0].set_title("random assign - ARI : {:.2f}".format(adjusted_rand_score(y, random_clusters)))
for ax, algo in zip(axes[1:], algos):
clusters = algo.fit_predict(X_scaled)
ax.scatter(X_scaled[:, 0], X_scaled[:, 1], c=clusters, cmap=mglearn.cm3, s=60, edgecolors='black')
ax.set_title("{} - ARI: {:.2f}".format(algo.__class__.__name__, adjusted_rand_score(y, clusters)))
# plt.title('복잡한 모양의 클러스터 군집 알고리즘 비교')
images.image.save_fig("10.9.moons_spiral_scatter_adjusted_rand_score")
plt.show()
# 2. 타깃값 없이 군집 평가하기 - 실루엣 계수
# 군집 알고리즘을 적용할 때 보통 그 결과와 비교할 타깃값이 없다.
# 타깃값이 필요 없는 군집용 지표로는 실루엣 계수 (silhouette coefficient)가 있다.
# 그러나 이 지표는 실제로 잘 동작하진 않는다.
# 실루엣 점수는 클러스터의 밀집 정도를 계산하는 것으로, 높을수록 좋으며, 최대 점수는 1이다.
# 실루엣 계수 사용하여 k-평균, 병합군집, DBSCAN 알고리즘을 비교
fig, axes = plt.subplots(1, 4, figsize=(15, 3), subplot_kw={'xticks':(), 'yticks':()})
# 3가지 알고리즘들 리스트
# algos = [KMeans(n_clusters=2), AgglomerativeClustering(n_clusters=2), DBSCAN()]
# random_state = np.random.RandomState(seed=0)
# random_clusters = random_state.randint(low=0, high=2, size=len(X))
# 무작위로 할당한 클러스터
from sklearn.metrics.cluster import silhouette_score
axes[0].scatter(X_scaled[:, 0], X_scaled[:, 1], c=random_clusters, cmap=mglearn.cm3, s=60, edgecolors='black')
axes[0].set_title("random assign : {:.2f}".format(silhouette_score(X_scaled, random_clusters)))
for ax, algo in zip(axes[1:], algos):
clusters = algo.fit_predict(X_scaled)
ax.scatter(X_scaled[:, 0], X_scaled[:, 1], c=clusters, cmap=mglearn.cm3, s=60, edgecolors='black')
ax.set_title("{} : {:.2f}".format(algo.__class__.__name__, silhouette_score(X_scaled, clusters)))
# plt.title('복잡한 모양의 클러스터 군집 알고리즘 비교')
images.image.save_fig("10.9.moons_spiral_scatter_silhouette_score")
plt.show()
| [
"[email protected]"
] | |
d2a3053fd64857bb28bf86e6ff1a7e69900c528b | 1ad12a71c3d5d2b3810ce03e8bd138c4ffb66eb8 | /xlsxwriter/test/comparison/test_chart_pie02.py | 772e70f53f3fe101ff7ea093d7ef19814e4f5983 | [
"BSD-2-Clause-Views"
] | permissive | idreamsfy/XlsxWriter | b52929229b16e2ee1eaca0cda9980a5a0aad5769 | 129044ed821de67895b4562c6b71f90eba5be6b4 | refs/heads/master | 2021-01-02T20:39:20.415882 | 2020-02-07T21:07:55 | 2020-02-07T21:07:55 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,189 | py | ###############################################################################
#
# Tests for XlsxWriter.
#
# Copyright (c), 2013-2020, John McNamara, [email protected]
#
from ..excel_comparsion_test import ExcelComparisonTest
from ...workbook import Workbook
class TestCompareXLSXFiles(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename('chart_pie02.xlsx')
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
chart = workbook.add_chart({'type': 'pie'})
data = [
[2, 4, 6],
[60, 30, 10],
]
worksheet.write_column('A1', data[0])
worksheet.write_column('B1', data[1])
chart.add_series({
'categories': '=Sheet1!$A$1:$A$3',
'values': '=Sheet1!$B$1:$B$3',
})
chart.set_legend({'font': {'bold': 1, 'italic': 1, 'baseline': -1}})
worksheet.insert_chart('E9', chart)
workbook.close()
self.assertExcelEqual()
| [
"[email protected]"
] | |
bca4a0189f323184e4d9d842f593edc9890ec469 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02658/s377864583.py | a4bd4508fb86c22e9ea006ed431ef20ad6050566 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 256 | py | def main():
n = int(input())
a = list(map(int, input().split()))
ans = 1
if 0 in a:
print(0)
return
else:
flag = True
for i in a:
ans *= i
if ans > (10 ** 18):
print(-1)
return
print(ans)
main()
| [
"[email protected]"
] | |
a9f9d1d30ff078ad375c253215133f498c5895b9 | 9d29861e44389e02762e6eb0457c6a415a54e26c | /samples/images/export_task.py | 64e408194a49b511eccab70c08ad4f8b855b7f79 | [
"MIT"
] | permissive | itsmemattchung/pyrax | e787d67f8a79036834575f951f8c9e81d64d8b8f | e8eff127a5c9b6e64a9a42593d5e889c3c03f81d | refs/heads/master | 2021-01-18T10:14:31.752469 | 2015-05-16T16:44:35 | 2015-05-16T16:44:35 | 21,360,598 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,354 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c)2014 Rackspace US, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import print_function
import os
import pyrax
pyrax.set_setting("identity_type", "rackspace")
creds_file = os.path.expanduser("~/.rackspace_cloud_credentials")
pyrax.set_credential_file(creds_file)
imgs = pyrax.images
cf = pyrax.cloudfiles
print("You will need to select an image to export, and a Container into which "
"the exported image will be placed.")
images = imgs.list(visibility="private")
print()
print("Select an image to export:")
for pos, image in enumerate(images):
print("[%s] %s" % (pos, image.name))
snum = raw_input("Enter the number of the image you want to share: ")
if not snum:
exit()
try:
num = int(snum)
except ValueError:
print("'%s' is not a valid number." % snum)
exit()
if not 0 <= num < len(images):
print("'%s' is not a valid image number." % snum)
exit()
image = images[num]
conts = cf.list()
print()
print("Select the target container to place the exported image:")
for pos, cont in enumerate(conts):
print("[%s] %s" % (pos, cont.name))
snum = raw_input("Enter the number of the container: ")
if not snum:
exit()
try:
num = int(snum)
except ValueError:
print("'%s' is not a valid number." % snum)
exit()
if not 0 <= num < len(conts):
print("'%s' is not a valid container number." % snum)
exit()
cont = conts[num]
task = imgs.export_task(image, cont)
print("Task ID=%s" % task.id)
print()
answer = raw_input("Do you want to track the task until completion? This may "
"take several minutes. [y/N]: ")
if answer and answer[0].lower() == "y":
pyrax.utils.wait_until(task, "status", ["success", "failure"],
verbose=True, interval=30)
| [
"[email protected]"
] | |
86df25291145f18d5fc9e052389ac8390fba23ec | 5e80f0b1af9fbf9dc774dbb68aa603574e4ae0ba | /algorithm-study/baekjun/1927.py | 6029af1ee152bdfa3b91b592febe7a1fbf701b55 | [] | no_license | namujinju/study-note | 4271b4248b3c4ac1b96ef1da484d86569a030762 | 790b21e5318a326e434dc836f5f678a608037a8c | refs/heads/master | 2023-02-04T13:25:55.418896 | 2020-12-26T10:47:11 | 2020-12-26T10:47:11 | 275,279,138 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 314 | py | import heapq as hq
import sys
hq_arr = []
n = int(input()) # 연산 갯수
for _ in range(n):
i = int(sys.stdin.readline()) # https://www.acmicpc.net/blog/view/56
if i:
hq.heappush(hq_arr, i)
else:
if hq_arr:
print(hq.heappop(hq_arr))
else:
print(0)
| [
"[email protected]"
] | |
93f739b3253aa3891ea450f662d32d5466856a30 | ca23b411c8a046e98f64b81f6cba9e47783d2584 | /learning_parameter_allocation/omniglot/omniglot_eval.py | 57302ece23bb7e33211ed04e68884facbf8e35a4 | [
"CC-BY-4.0",
"Apache-2.0"
] | permissive | pdybczak/google-research | 1fb370a6aa4820a42a5d417a1915687a00613f9c | 0714e9a5a3934d922c0b9dd017943a8e511eb5bc | refs/heads/master | 2023-03-05T23:16:11.246574 | 2021-01-04T11:30:28 | 2021-01-04T11:30:28 | 326,629,357 | 1 | 0 | Apache-2.0 | 2021-02-01T12:39:09 | 2021-01-04T09:17:36 | Jupyter Notebook | UTF-8 | Python | false | false | 6,063 | py | # coding=utf-8
# Copyright 2020 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Evaluation job for the Omniglot experiments."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
from absl import app
from absl import flags
from learning_parameter_allocation import data
from learning_parameter_allocation import models
from learning_parameter_allocation import utils
from learning_parameter_allocation.pathnet import components as pn_components
from learning_parameter_allocation.pathnet import pathnet_lib as pn
from learning_parameter_allocation.pathnet.utils import create_uniform_layer
import tensorflow.compat.v1 as tf
_OMNIGLOT_INPUT_SHAPE = [105, 105, 1]
# Delay in seconds to wait before rechecking if there are new checkpoints.
_CHECK_FOR_CHECKPOINTS_FREQUENCY = 15
# If there are no checkpoints for this number of seconds give up and finish.
_MAX_WAIT_FOR_NEW_CHECKPOINTS = 3 * 60 * 60
FLAGS = flags.FLAGS
flags.DEFINE_string(
'logdir', '/tmp/summary_dir/',
'Path to the directory to save logs and summaries.')
flags.DEFINE_string(
'method', 'gumbel_matrix',
'Approach to use to determine which tasks gets which components, '
'one of "shared_bottom", "no_sharing", "gumbel_matrix".')
def loss_fn(labels, logits):
return tf.nn.sparse_softmax_cross_entropy_with_logits(
labels=labels, logits=logits)
def build_pathnet_eval_graph(
task_names, batch_size, num_classes_for_tasks, router_fn):
"""Constructs the PathNet eval graph.
Args:
task_names: (list of strings) names of tasks.
batch_size: (int) batch size to use.
num_classes_for_tasks: (list of ints) number of classes for each task.
router_fn: function that, given a single argument `num_components`, returns
a router (see routers in `pathnet/pathnet_lib.py`) for a layer containing
`num_components` components.
Returns:
A tuple of (`p_inputs`, `p_task_id`, `out_logits`). `p_inputs` and
`p_task_id` are placeholders for input image and scalar task id,
respectively. `out_logits` are the final network output (classification
logits).
"""
num_tasks = len(task_names)
# PathNet layers
keras_layers = models.get_keras_layers_for_omniglot_experiment()
pathnet_layers = models.build_model_from_keras_layers(
_OMNIGLOT_INPUT_SHAPE, num_tasks, keras_layers, router_fn)
# Task-specific linear heads
pathnet_layers.append(
utils.create_layer_with_task_specific_linear_heads(num_classes_for_tasks))
# Output components
pathnet_layers.append(create_uniform_layer(
num_components=num_tasks,
component_fn=lambda: pn_components.ModelHeadComponent(loss_fn=loss_fn),
combiner_fn=pn.SelectCombiner,
router_fn=lambda: None))
pathnet = pn.PathNet(
pathnet_layers, tf.contrib.training.HParams(batch_size=batch_size))
p_inputs, _, p_task_id, _, out_logits = utils.build_pathnet_graph(
pathnet, _OMNIGLOT_INPUT_SHAPE, training=False)
return p_inputs, p_task_id, out_logits
def main(_):
num_alphabets = 20
task_names = ['Omniglot-%d' % task_id for task_id in range(num_alphabets)]
task_data, num_classes = data.get_data_for_multitask_omniglot_setup(
num_alphabets)
batch_size = 16
for task_id in range(num_alphabets):
task_data[task_id] = data.batch_all(task_data[task_id], batch_size)
router_fn = utils.get_router_fn_by_name(num_alphabets, FLAGS.method)
session = tf.Session(graph=tf.get_default_graph())
tf.train.get_or_create_global_step()
summary_writer = tf.contrib.summary.create_file_writer(FLAGS.logdir)
summary_writer.set_as_default()
tf.contrib.summary.initialize(session=session)
p_inputs, p_task_id, out_logits = build_pathnet_eval_graph(
task_names, batch_size, num_classes, router_fn)
evaluate_on = ['train', 'validation', 'test']
p_task_accuracies = {}
accuracy_summary_op = {}
for data_split in evaluate_on:
(p_task_accuracies[data_split], accuracy_summary_op[data_split]) =\
utils.create_accuracy_summary_ops(
task_names, summary_name_prefix='eval_%s' % data_split)
# This `Saver` is not used to save variables, only to restore them from
# the checkpoints.
saver = tf.train.Saver(tf.global_variables())
previous_checkpoint_path = ''
time_waited_for_checkpoints = 0
while time_waited_for_checkpoints < _MAX_WAIT_FOR_NEW_CHECKPOINTS:
latest_checkpoint_path = tf.train.latest_checkpoint(FLAGS.logdir)
if latest_checkpoint_path in [None, previous_checkpoint_path]:
print('Found no new checkpoints')
time_waited_for_checkpoints += _CHECK_FOR_CHECKPOINTS_FREQUENCY
time.sleep(_CHECK_FOR_CHECKPOINTS_FREQUENCY)
continue
else:
time_waited_for_checkpoints = 0
print('Reloading checkpoint: %s' % latest_checkpoint_path)
previous_checkpoint_path = latest_checkpoint_path
saver.restore(session, latest_checkpoint_path)
for data_split in evaluate_on:
eval_data = [
dataset[data_split].make_one_shot_iterator().get_next()
for dataset in task_data
]
print('Evaluating on: %s' % data_split)
task_accuracies = utils.run_pathnet_evaluation(
session, p_inputs, p_task_id, out_logits, task_names, eval_data)
utils.run_accuracy_summary_ops(
session,
p_task_accuracies[data_split],
task_accuracies,
accuracy_summary_op[data_split])
if __name__ == '__main__':
app.run(main)
| [
"[email protected]"
] | |
02d2173600e6ba222744b73a6ac1cf959bc7a32f | d96787f92bd86c8d8bcf01a4e7ec8f7feec24194 | /kattis/kutevi/solution.py | 9a687ed8e3f0ebe800bdb8b0ba3849c123defb8f | [] | no_license | iandioch/solutions | 133cbc3af58fadcde0b2e981fb0e7d05801070a7 | 8b3e458b3c01179ddf776bfbb897f263f22f3693 | refs/heads/master | 2023-04-09T03:39:16.952817 | 2023-03-15T20:00:53 | 2023-03-15T20:00:53 | 47,693,495 | 48 | 40 | null | 2019-10-22T14:52:59 | 2015-12-09T13:36:55 | Python | UTF-8 | Python | false | false | 526 | py | from collections import deque
n, m = map(int, input().split())
poss = [False for _ in range(360)]
given = list(map(int, input().split()))
q = deque()
q.append(given[0])
while len(q):
a = q.pop()
if poss[a]:
continue
poss[a] = True
for o in given:
b = abs(a - o)
if not poss[b]:
q.append(b)
c = (a+o)%360
if not poss[c]:
q.append(c)
for a in input().split():
ok = poss[int(a)]
if ok:
print('YES')
else:
print('NO')
| [
"[email protected]"
] | |
2005e615b5cc1e94cd02c98c46a797303e28e8b4 | 3ed50263057c1695330009f9f5b122e412e1c02f | /bn/distribs/multivariate_distribution.py | 353b2f69ef7384b8f6ede8151da65d51d4e14d4f | [
"MIT"
] | permissive | ppijbb/PyOpenDial | 5528aa584190dcf08b892ec92a5ce8c2b82eb845 | c9bca653c18ccc082dc8b86b4a8feee9ed00a75b | refs/heads/master | 2022-02-16T01:27:39.667661 | 2019-07-24T10:51:41 | 2019-07-24T10:51:41 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,027 | py | import abc
from multipledispatch import dispatch
from datastructs.assignment import Assignment
class MultivariateDistribution:
"""
Representation of a multivariate probability distribution P(X1,...Xn), where
X1,...Xn are random variables.
"""
__metaclass__ = abc.ABCMeta
@dispatch()
@abc.abstractmethod
def get_variables(self):
"""
Returns the names of the random variables in the distribution
:return: the set of variable names.
"""
raise NotImplementedError()
@dispatch()
@abc.abstractmethod
def get_values(self):
"""
Returns the set of possible assignments for the random variables.
:return: the set of possible assignment
"""
raise NotImplementedError()
@dispatch(Assignment)
@abc.abstractmethod
def get_prob(self, values):
"""
Returns the probability of a particular assignment of values.
:param values: the assignment of values to X1,...Xn.
:return: the corresponding probability
"""
raise NotImplementedError()
@dispatch()
@abc.abstractmethod
def sample(self):
"""
Returns a sample assignment for X1,...Xn.
:return: the sampled assignment
"""
raise NotImplementedError()
@dispatch(str)
@abc.abstractmethod
def get_marginal(self, variable):
"""
Returns the marginal probability distribution P(Xi) for a random variable Xi
in X1,...Xn.
:param variable: the random variable Xi
:return: the marginal distribution P(Xi)
"""
raise NotImplementedError()
@dispatch(str, str)
@abc.abstractmethod
def modify_variable_id(self, old_variable_id, new_variable_id):
"""
Modifies the variable identifier in the distribution
:param old_variable_id: the old identifier
:param new_variable_id: the new identifier
"""
raise NotImplementedError()
@dispatch()
@abc.abstractmethod
def to_discrete(self):
"""
Returns a representation of the distribution as a multivariate table.
:return: the multivariate table.
"""
raise NotImplementedError()
@abc.abstractmethod
def __copy__(self):
"""
Returns a copy of the distribution.
:return: the copy
"""
raise NotImplementedError()
@dispatch(float)
@abc.abstractmethod
def prune_values(self, threshold):
"""
Prunes all values assignment whose probability falls below the threshold.
:param threshold: the threshold to apply
:return: true if at least one value has been removed, false otherwise
"""
raise NotImplementedError()
@dispatch()
@abc.abstractmethod
def get_best(self):
"""
Returns the value with maximum probability.
:return: the value with maximum probability
"""
raise NotImplementedError()
| [
"[email protected]"
] | |
bbcc0cc75155387e6ce8b9a138302984c6c21481 | 37f1563cdacf4b37b5b927b892538218aae79c77 | /medium/trees/flatten.py | 3a5d24227b7f8b528e32e799246ebdb3f20589e1 | [] | no_license | unsortedtosorted/elgoog | 9dee49a20f981305910a8924d86e8f2a16fe14c2 | 5be9fab24c0c1fd9d5dc7a7bdaca105f1ca873ee | refs/heads/master | 2020-04-15T00:51:12.114249 | 2019-05-19T04:37:24 | 2019-05-19T04:37:24 | 164,254,079 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,079 | py | """
114. Flatten Binary Tree to Linked List
Steps:
if leaf node, do nothing
if not leaf node:
flatten left subtree
flatten right subtree
connect right child to right most leaf of left child
make left child as right child
make left child None
RunTime : O(N^2)
Space : O(N)
"""
class Solution(object):
def flatten(self, root):
def convert(root):
if root:
if not root.left and not root.right:
return
#flatten left and right child
convert(root.left)
convert(root.right)
l = root.left
r = root.right
#make left child as new right child
root.right = l
root.left = None
temp = root
#get right most leaf of new right child
while temp.right:
temp = temp.right
temp.right = r
convert(root)
| [
"[email protected]"
] | |
0cad74dfab81fccf5ba4454fd2435cc789439934 | 6b181f5640e2c3df91d1a6d5c95cf1989012f0d5 | /RPi-stub/spidev.py | c598627f4304ee6d4f63e4b17637f97d0978bb97 | [
"MIT"
] | permissive | GamesCreatorsClub/GCC-Rover | 9b84dcd84cce60c321906223f8c24f99722d1bae | 25a69f62a1bb01fc421924ec39f180f50d6a640b | refs/heads/master | 2021-01-11T18:04:05.876976 | 2019-10-01T15:20:30 | 2019-10-01T15:20:30 | 79,477,472 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,320 | py |
import os
import socket
_SPI_CPHA = 0x01
_SPI_CPOL = 0x02
# _SPI_MODE_0 = 0
# _SPI_MODE_1 = SPI_CPHA
# _SPI_MODE_2 = SPI_CPOL
# _SPI_MODE_3 = SPI_CPOL | SPI_CPHA
# _SPI_MODES = [_SPI_MODE_0, _SPI_MODE_1, _SPI_MODE_2, _SPI_MODE_3]
_SPI_CS_HIGH = 0x04
_SPI_LSB_FIRST = 0x08
_SPI_3WIRE = 0x10
_SPI_LOOP = 0x20
_SPI_NO_CS = 0x40
_SPI_READY = 0x80
class SpiDev:
_socket = None
_bits_per_word = 0
# cshigh = False
# loop = None
# lsbfirst = False
_max_speed_hz = 0
_mode = 0
# threewire = False
def __init__(self):
port = 8789
ip = os.environ["RASPBERRY_IP"]
if "RASPBERRY_PORT" in os.environ:
port = int(os.environ["RASPBERRY_PORT"])
self._socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self._socket.connect((ip, port))
def __del__(self):
if self._socket is not None:
try:
self._socket.close()
except Exception as e:
pass
def open(self, bus, device):
b = bytearray()
b.append(ord("o"))
b.append(bus)
b.append(device)
self._socket.send(b)
def xfer(self, data, speed_hz=0, delay_usec=0, bits_per_word=8):
b = bytearray()
b.append(ord("x"))
b.append(len(data) & 255)
b.append(len(data) >> 8 & 255)
for d in data:
b.append(d)
self._socket.send(b)
rec = self._socket.recv(len(data))
resp = []
for bb in rec:
resp.append(bb)
return resp
def xfer2(self, data, speed_hz=0, delay_usec=0, bits_per_word=8):
pass
def close(self):
self._mode = 0;
self._bits_per_word = 0;
self._max_speed_hz = 0;
b = bytearray()
b.append(ord("c"))
self._socket.send(b)
def readbytes(self, n):
pass
def writebytes(self, data):
pass
@property
def cshigh(self):
return self._mode & _SPI_CS_HIGH != 0
@cshigh.setter
def cshigh(self, cshigh):
if cshigh:
self._mode = self._mode | _SPI_CS_HIGH
else:
self._mode = self._mode & ~_SPI_CS_HIGH
@property
def lsbfirst(self):
return self._mode & _SPI_LSB_FIRST != 0
@cshigh.setter
def lsbfirst(self, lsbfirst):
if lsbfirst:
self._mode = self._mode | _SPI_LSB_FIRST
else:
self._mode = self._mode & ~_SPI_LSB_FIRST
@property
def threewire(self):
return self._mode & _SPI_3WIRE != 0
@threewire.setter
def threewire(self, threewire):
if threewire:
self._mode = self._mode | _SPI_3WIRE
else:
self._mode = self._mode & ~_SPI_3WIRE
@property
def loop(self):
return self._mode & _SPI_3WIRE != 0
@loop.setter
def loop(self, loop):
if loop:
self._mode = self._mode | _SPI_LOOP
else:
self._mode = self._mode & ~_SPI_LOOP
@property
def bits_per_word(self):
return self._bits_per_word
@bits_per_word.setter
def bits_per_word(self, bits_per_word):
if bits_per_word < 8 or bits_per_word > 16:
raise ValueError("invalid bits_per_word (8 to 16)")
self._bits_per_word = bits_per_word
@property
def max_speed_hz(self):
return self.max_speed_hz
@max_speed_hz.setter
def bits_per_word(self, max_speed_hz):
self.max_speed_hz
@property
def mode(self):
return self._mode & (_SPI_CPHA | _SPI_CPOL)
@mode.setter
def loop(self, mode):
self._mode = (self._mode & ~(_SPI_CPHA | _SPI_CPOL)) | mode
if __name__ == "__main__":
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind(("0.0.0.0", 8789))
s.listen(1)
def startListen():
import threading
def session(con):
while True:
# print("Waiting to command")
cmd = ord(con.recv(1))
if cmd == ord("c"):
print("Close")
elif cmd == ord("o"):
bus = ord(con.recv(1))
device = ord(con.recv(1))
print("Opening " + str(bus) + "." + str(device))
elif cmd == ord("x"):
l = ord(con.recv(1))
h = ord(con.recv(1))
size = l + h << 8
print("Receiving " + str(size) +" bytes")
data = con.recv(size)
print("Received " + str(data))
con.send(data)
else:
print("Unknown command " + str(cmd))
def listen():
while True:
con, addr = s.accept()
t = threading.Thread(target=session, args=[con])
t.daemon = True
t.start()
thread = threading.Thread(target=listen)
thread.daemon = True
thread.start()
try:
startListen()
os.environ["RASPBERRY_IP"] = "127.0.0.1"
spi = SpiDev()
print("opening spi")
spi.open(1, 2)
print("sending data")
spi.xfer(b"Hello")
print("closing")
spi.close()
finally:
s.close()
s.detach() | [
"[email protected]"
] | |
a2595b2efb0dc2cdd2387f296ae8a5b72d28c811 | 974c5a4f101d0e6f4dfa5fc2f7c641c9d2bd8184 | /sdk/containerservice/azure-mgmt-containerservice/azure/mgmt/containerservice/v2021_03_01/aio/operations/_operations.py | 9652fce008b36636921de47c082edd72b0107162 | [
"MIT",
"LicenseRef-scancode-generic-cla",
"LGPL-2.1-or-later"
] | permissive | gaoyp830/azure-sdk-for-python | 4816f04c554dcffb7510a6b7044b0c86a2dd32e1 | 1c66defa502b754abcc9e5afa444ca03c609342f | refs/heads/master | 2022-10-20T21:33:44.281041 | 2022-09-29T17:03:13 | 2022-09-29T17:03:13 | 250,355,505 | 0 | 0 | MIT | 2020-03-26T19:42:13 | 2020-03-26T19:42:12 | null | UTF-8 | Python | false | false | 5,460 | py | # pylint: disable=too-many-lines
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Optional, TypeVar
from urllib.parse import parse_qs, urljoin, urlparse
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import (
ClientAuthenticationError,
HttpResponseError,
ResourceExistsError,
ResourceNotFoundError,
ResourceNotModifiedError,
map_error,
)
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.utils import case_insensitive_dict
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models as _models
from ..._vendor import _convert_request
from ...operations._operations import build_list_request
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class Operations:
"""
.. warning::
**DO NOT** instantiate this class directly.
Instead, you should access the following operations through
:class:`~azure.mgmt.containerservice.v2021_03_01.aio.ContainerServiceClient`'s
:attr:`operations` attribute.
"""
models = _models
def __init__(self, *args, **kwargs) -> None:
input_args = list(args)
self._client = input_args.pop(0) if input_args else kwargs.pop("client")
self._config = input_args.pop(0) if input_args else kwargs.pop("config")
self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer")
self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer")
@distributed_trace
def list(self, **kwargs: Any) -> AsyncIterable["_models.OperationValue"]:
"""Gets a list of compute operations.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either OperationValue or the result of cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.containerservice.v2021_03_01.models.OperationValue]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2021-03-01")) # type: str
cls = kwargs.pop("cls", None) # type: ClsType[_models.OperationListResult]
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
def prepare_request(next_link=None):
if not next_link:
request = build_list_request(
api_version=api_version,
template_url=self.list.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
else:
# make call to next link with the client's api-version
_parsed_next_link = urlparse(next_link)
_next_request_params = case_insensitive_dict(parse_qs(_parsed_next_link.query))
_next_request_params["api-version"] = self._config.api_version
request = HttpRequest("GET", urljoin(next_link, _parsed_next_link.path), params=_next_request_params)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("OperationListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(get_next, extract_data)
list.metadata = {"url": "/providers/Microsoft.ContainerService/operations"} # type: ignore
| [
"[email protected]"
] | |
b5acde1079fe57a98f775f9003f4e8c68c326e4e | 41bea39563c74621924d79723f8ba84889958365 | /nkamg_pcap/server/antimal/misc/trails/feeds/bambenekconsultingdga.py | 698420381e69cf7f7c349a1e65d60c536efd4101 | [
"MIT"
] | permissive | NKQiuKF/pcap_update | abee0c13cb583fddb89eb9e86a487279bdc18f1d | 679e3f116367394a5f58eb4f95b5318e80fee331 | refs/heads/master | 2022-10-21T17:49:30.706480 | 2019-09-02T09:22:06 | 2019-09-02T09:22:06 | 205,816,421 | 1 | 0 | null | 2022-10-06T18:33:32 | 2019-09-02T08:55:55 | JavaScript | UTF-8 | Python | false | false | 647 | py | #!/usr/bin/env python
"""
Copyright (c) 2014-2016 Miroslav Stampar (@stamparm)
See the file 'LICENSE' for copying permission
"""
import re
from core.common import retrieve_content
__url__ = "http://osint.bambenekconsulting.com/feeds/dga-feed.txt"
__check__ = "Domain used by"
__reference__ = "bambenekconsulting.com"
def fetch():
retval = {}
content = retrieve_content(__url__)
if __check__ in content:
for match in re.finditer(r"(?m)^([^,\s]+),Domain used by ([^ ]+)", content):
retval[match.group(1)] = ("%s dga (malware)" % match.group(2).lower(), __reference__)
return retval
| [
"[email protected]"
] | |
6fb7d3bd1ca93b6147bb8eb30c92161cb11b930b | d867398bd54ef772a624a72e283a6b2bb546f693 | /signbank/docker_wsgi.py | 40135228152c02660b293ec2c07228e7b2188eec | [
"BSD-3-Clause"
] | permissive | stevecassidy/signbank-modular | b5d6aa3fd05b0aeeed9c7bb4688be6d0e5cb7b16 | f00e30814c8a08fe00eb28df231791f6fc18ce7f | refs/heads/master | 2023-01-13T09:32:39.503182 | 2022-01-12T04:18:59 | 2022-01-12T04:18:59 | 79,797,386 | 1 | 1 | BSD-3-Clause | 2022-12-26T20:58:06 | 2017-01-23T11:13:45 | Python | UTF-8 | Python | false | false | 1,333 | py | """
WSGI config for signbank project.
This module contains the WSGI application used by Django's development server
and any production WSGI deployments. It should expose a module-level variable
named ``application``. Django's ``runserver`` and ``runfcgi`` commands discover
this application via the ``WSGI_APPLICATION`` setting.
Usually you will have the standard Django WSGI application here, but it also
might make sense to replace the whole Django WSGI application with a custom one
that later delegates to the Django one. For example, you could introduce WSGI
middleware here, or combine a Django application with an application of another
framework.
"""
import os
# Determine if there are live settings (not commited to source control) and load that if it exists instead of the default settings
code_path = os.path.dirname(os.path.realpath(__file__))
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "signbank.settings.docker")
# This application object is used by any WSGI server configured to use this
# file. This includes Django's development server, if the WSGI_APPLICATION
# setting points here.
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
# Apply WSGI middleware here.
# from helloworld.wsgi import HelloWorldApplication
# application = HelloWorldApplication(application)
| [
"[email protected]"
] | |
0751f1fdaa3b2d55769f3fc59c2d654fde132400 | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/331/usersdata/302/94330/submittedfiles/funcoes1.py | 46414bcab930f2ecf75035fde4caf630153d831d | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,102 | py | # -*- coding: utf-8 -*-
'''
def crescente (lista):
if lista == sorted(lista):
return True
else:
return False
def decrescente (lista):
if lista == sorted(lista, reverse = True):
return True
else:
return False
def consectivos (lista,n):
for i in range(0,n,1):
if i < n:
if lista[i-1] =! lista[i]
return False
continue
else:
return True
'''
#escreva o código da função crescente aqui
#escreva as demais funções
#escreva o programa principal
n = int(input('Digite o número de elementos das listas: '))
a = []
b = []
c = []
for i in range (0,n,1):
a.append(int(input('Digite a%d: '%(i+1))))
for i in range(0,n,1):
if i < n:
if a[i-1] == a[i]:
print('S')
break
else:
print('N')
break
'''
print(a)
for i in range (0,n,1):
b.append(int(input('Digite b%d: '%(i+1))))
print(b)
for i in range (0,n,1):
c.append(int(input('Digite c%d: '%(i+1))))
print(c)
'''
| [
"[email protected]"
] | |
493abda9b939ba922dde6ee476d341dceaf83a2f | 89a90707983bdd1ae253f7c59cd4b7543c9eda7e | /effective_python/item_52/recursive_import_bad/dialog.py | 8138738a13171344c2d6677a8de506fafcda19cb | [] | no_license | timothyshull/python_reference_code | 692a7c29608cadfd46a6cc409a000023e95b9458 | f3e2205dd070fd3210316f5f470d371950945028 | refs/heads/master | 2021-01-22T20:44:07.018811 | 2017-03-17T19:17:22 | 2017-03-17T19:17:22 | 85,346,735 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 950 | py | #!/usr/bin/env python3
# Copyright 2014 Brett Slatkin, Pearson Education Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Preamble to mimick book environment
import logging
from pprint import pprint
from sys import stdout as STDOUT
# Example 2
import app
class Dialog(object):
def __init__(self, save_dir):
self.save_dir = save_dir
save_dialog = Dialog(app.prefs.get('save_dir'))
def show():
print('Showing the dialog!')
| [
"[email protected]"
] | |
ee48ec2d8e4f8e0d3b65128e0d92c29aa39b01c6 | 0facb323be8a76bb4c168641309972fa77cbecf2 | /Configurations/HWWSemiLepHighMass/nanoAODv5/2018/Mix/MassPoints/samples_2018limit_M800.py | 05c27f3ed419caa0a34d0b08bea9be09fa084db5 | [] | no_license | bhoh/SNuAnalytics | ef0a1ba9fa0d682834672a831739dfcfa1e7486b | 34d1fc062e212da152faa83be50561600819df0e | refs/heads/master | 2023-07-06T03:23:45.343449 | 2023-06-26T12:18:28 | 2023-06-26T12:18:28 | 242,880,298 | 0 | 1 | null | 2020-02-25T01:17:50 | 2020-02-25T01:17:49 | null | UTF-8 | Python | false | false | 19,238 | py |
#-----Variable Deinition-----#
supercut = 'nLepton>0'
eleWP='mvaFall17V1Iso_WP90'
muWP='cut_Tight_HWWW'
LepWPCut='(Lepton_isTightElectron_'+eleWP+'[0]>0.5 || Lepton_isTightMuon_'+muWP+'[0]>0.5)'
#------End of Variable Definition-----#
import os
import glob
import copy
import subprocess
import string
from LatinoAnalysis.Tools.commonTools import *
samples={}
SITE=os.uname()[1]
xrootdPath=''
if 'iihe' in SITE :
xrootdPath = 'dcap://maite.iihe.ac.be/'
treeBaseDir = '/pnfs/iihe/cms/store/user/xjanssen/HWW2015/'
elif 'cern' in SITE :
treeBaseDir = '/eos/cms/store/group/phys_higgs/cmshww/amassiro/HWWNano/'
elif 'sdfarm' in SITE:
xrootdPath = 'root://cms-xrdr.private.lo:2094'
treeBaseDir = "/xrootd/store/user/jhchoi/Latino/HWWNano/"
CAMPAIGN='Autumn18_102X_nAODv5_Full2018v5'
STEP="MCl1loose2018v5__MCCorr2018v5__Semilep2018_whad30__CorrFatJetMass__HMlnjjSelBWR"
CAMPAIGN_DATA='Run2018_102X_nAODv5_Full2018v5'
STEP_DATA="DATAl1loose2018v5__Semilep2018_whad30__HMlnjjSel"
directory=treeBaseDir+CAMPAIGN+'/'+STEP
LepWPCut='(Lepton_isTightElectron_'+eleWP+'[0]>0.5 || Lepton_isTightMuon_'+muWP+'[0]>0.5)'
LepWPweight='(((Lepton_isTightElectron_'+eleWP+'[0]>0.5)*(Lepton_tightElectron_'+eleWP+'_IdIsoSF'+'[0]'+')) + ((Lepton_isTightMuon_'+muWP+'[0]>0.5)*(Lepton_tightMuon_'+muWP+'_IdIsoSF'+'[0]'+')))'
XSWeight = 'XSWeight'
#SFweight = 'SFweight'+Nlep+'l*'+LepWPweight+'*'+LepWPCut
#SFweight = 'puWeight*\
#TriggerEffWeight_1l*\
#Lepton_RecoSF[0]*\
#EMTFbug_veto'
SFweight = 'puWeight*\
TriggerEffWeight_1l*\
Lepton_RecoSF[0]*\
EMTFbug_veto*\
PUJetIdSF*\
tau21SF\
'
SFweight=SFweight+'*'+LepWPweight+'*'+LepWPCut
#GenLepMatch = 'GenLepMatch'+Nlep+'l'
GenLepMatch = 'Lepton_genmatched[0]'
################################################
############### B-Tag WP ######################
################################################
SFweight=SFweight+'*'+'btagSF'
################################################
############### B-Tag WP ######################
################################################
#pfCombinedInclusiveSecondaryVertexV2BJetTags (CSV) algorithm [26] loose working point.
################################################
############ MET FILTERS ###################
################################################
METFilter_MC = 'METFilter_MC'
METFilter_DATA = 'METFilter_DATA'
################################################
############ DATA DECLARATION ##################
################################################
DataRun = [
['A','Run2018A-Nano1June2019-v1'] ,
['B','Run2018B-Nano1June2019-v1'] ,
['C','Run2018C-Nano1June2019-v1'] ,
['D','Run2018D-Nano1June2019_ver2-v1'] ,
]
DataSets = ['SingleMuon',\
'EGamma'
]
DataTrig = {
'SingleMuon' : 'Trigger_sngMu' ,
'EGamma' : 'Trigger_sngEl && !Trigger_sngMu' ,
}
###########################################
############### SIGNAL ####################
###########################################
'''
samples['ggHWWlnuqq_M800'] = { 'name' : getSampleFiles(directory,'GluGluHToWWToLNuQQ_M800',False,'nanoLatino_'),
'weight' : XSWeight+'*'+SFweight+'*'+GenLepMatch+'*'+METFilter_MC,
#'weight' : XSWeight,
'FilesPerJob' : 5,
}
'''
samples['ggHWWlnuqq_M800_S_B_I'] = { 'name' : getSampleFiles(directory,'GluGluHToWWToLNuQQ_M800',False,'nanoLatino_'),
'weight' : XSWeight+'*'+SFweight+'*'+GenLepMatch+'*'+METFilter_MC+'*(MSSModel+MSSModel_I+MSSModel_B+MSSModel_H+MSSModel_I_HB)',
'FilesPerJob' : 50,
}
samples['ggHWWlnuqq_M800_S'] = { 'name' : getSampleFiles(directory,'GluGluHToWWToLNuQQ_M800',False,'nanoLatino_'),
'weight' : XSWeight+'*'+SFweight+'*'+GenLepMatch+'*'+METFilter_MC+'*MSSModel',
'FilesPerJob' : 50,
}
samples['ggWW_MELA'] = { 'name' : getSampleFiles(directory,'GluGluHToWWToLNuQQ_M800',False,'nanoLatino_'),
'weight' : XSWeight+'*'+SFweight+'*'+GenLepMatch+'*'+METFilter_MC+'*(MSSModel_B+MSSModel_H+MSSModel_I_HB)',
'FilesPerJob' : 50,
}
samples['VBFHToWWToLNuQQ_M800_S_B_I'] = { 'name' : getSampleFiles(directory,'VBFHToWWToLNuQQ_M800',False,'nanoLatino_'),
'weight' : XSWeight+'*'+SFweight+'*'+GenLepMatch+'*'+METFilter_MC+'*(MSSModel+MSSModel_I+MSSModel_B+MSSModel_H+MSSModel_I_HB)',
'FilesPerJob' : 50,
}
samples['VBFHToWWToLNuQQ_M800_S'] = { 'name' : getSampleFiles(directory,'VBFHToWWToLNuQQ_M800',False,'nanoLatino_'),
'weight' : XSWeight+'*'+SFweight+'*'+GenLepMatch+'*'+METFilter_MC+'*MSSModel',
'FilesPerJob' : 50,
}
samples['qqWW_MELA'] = { 'name' : getSampleFiles(directory,'VBFHToWWToLNuQQ_M800',False,'nanoLatino_'),
'weight' : XSWeight+'*'+SFweight+'*'+GenLepMatch+'*'+METFilter_MC+'*(MSSModel_B+MSSModel_H+MSSModel_I_HB)',
'FilesPerJob' : 50,
}
###########################################
############# BACKGROUNDS ###############
###########################################
samples['Wjets'] = { 'name' : getSampleFiles(directory,'WJetsToLNu-0J',False,'nanoLatino_')
+getSampleFiles(directory,'WJetsToLNu-1J',False,'nanoLatino_')
+getSampleFiles(directory,'WJetsToLNu-2J',False,'nanoLatino_')
,
'weight' : XSWeight+'*'+SFweight+'*'+GenLepMatch+'*'+METFilter_MC,
'FilesPerJob' : 20,
}
############ DY ############
ptllDYW_NLO = '((0.623108 + 0.0722934*gen_ptll - 0.00364918*gen_ptll*gen_ptll + 6.97227e-05*gen_ptll*gen_ptll*gen_ptll - 4.52903e-07*gen_ptll*gen_ptll*gen_ptll*gen_ptll)*(gen_ptll<45)*(gen_ptll>0) + 1*(gen_ptll>=45))'
ptllDYW_LO = '((0.632927+0.0456956*gen_ptll-0.00154485*gen_ptll*gen_ptll+2.64397e-05*gen_ptll*gen_ptll*gen_ptll-2.19374e-07*gen_ptll*gen_ptll*gen_ptll*gen_ptll+6.99751e-10*gen_ptll*gen_ptll*gen_ptll*gen_ptll*gen_ptll)*(gen_ptll>0)*(gen_ptll<100)+(1.41713-0.00165342*gen_ptll)*(gen_ptll>=100)*(gen_ptll<300)+1*(gen_ptll>=300))'
samples['DY'] = { 'name' : #getSampleFiles(directory,'DYJetsToLL_M-50',False,'nanoLatino_')
getSampleFiles(directory,'DYJetsToLL_M-50-LO',False,'nanoLatino_')
+ getSampleFiles(directory,'DYJetsToLL_M-10to50-LO',False,'nanoLatino_'),
'weight' : XSWeight+'*'+SFweight+'*'+GenLepMatch+'*'+METFilter_MC,
'FilesPerJob' : 20,
}
#addSampleWeight(samples,'DY','DYJetsToLL_M-50',ptllDYW_NLO)
addSampleWeight(samples,'DY','DYJetsToLL_M-50-LO',ptllDYW_LO)
addSampleWeight(samples,'DY','DYJetsToLL_M-10to50-LO',ptllDYW_LO)
samples['top'] = { 'name' : getSampleFiles(directory,'TTToSemiLeptonic',False,'nanoLatino_')
+ getSampleFiles(directory,'ST_t-channel_top',False,'nanoLatino_')
+ getSampleFiles(directory,'ST_t-channel_antitop',False,'nanoLatino_')
+ getSampleFiles(directory,'ST_s-channel_ext1',False,'nanoLatino_')
+ getSampleFiles(directory,'ST_tW_antitop_ext1',False,'nanoLatino_')
+ getSampleFiles(directory,'ST_tW_top_ext1',False,'nanoLatino_')
,
'weight' : XSWeight+'*'+SFweight+'*'+GenLepMatch+'*'+METFilter_MC,
'FilesPerJob' : 5,
}
#samples['VV'] = { 'name' : getSampleFiles(directory,'WZ',False,'nanoLatino_')
# + getSampleFiles(directory,'ZZ',False,'nanoLatino_')
# ,
# 'weight' : XSWeight+'*'+SFweight+'*'+GenLepMatch+'*'+METFilter_MC,
# 'FilesPerJob' : 5,
# }
samples['QCD_MU'] = { 'name' : getSampleFiles(directory,'QCD_Pt-15to20_MuEnrichedPt5',False,'nanoLatino_')
+getSampleFiles(directory,'QCD_Pt-20to30_MuEnrichedPt5',False,'nanoLatino_')
+getSampleFiles(directory,'QCD_Pt-30to50_MuEnrichedPt5',False,'nanoLatino_')
+getSampleFiles(directory,'QCD_Pt-50to80_MuEnrichedPt5',False,'nanoLatino_')
+getSampleFiles(directory,'QCD_Pt-80to120_MuEnrichedPt5',False,'nanoLatino_')
+getSampleFiles(directory,'QCD_Pt-80to120_MuEnrichedPt5_ext1',False,'nanoLatino_')
+getSampleFiles(directory,'QCD_Pt-120to170_MuEnrichedPt5',False,'nanoLatino_')
+getSampleFiles(directory,'QCD_Pt-120to170_MuEnrichedPt5_ext1',False,'nanoLatino_')
+getSampleFiles(directory,'QCD_Pt-170to300_MuEnrichedPt5',False,'nanoLatino_')
+getSampleFiles(directory,'QCD_Pt-300to470_MuEnrichedPt5',False,'nanoLatino_')
+getSampleFiles(directory,'QCD_Pt-300to470_MuEnrichedPt5_ext3',False,'nanoLatino_')
+getSampleFiles(directory,'QCD_Pt-470to600_MuEnrichedPt5',False,'nanoLatino_')
+getSampleFiles(directory,'QCD_Pt-470to600_MuEnrichedPt5_ext1',False,'nanoLatino_')
+getSampleFiles(directory,'QCD_Pt-600to800_MuEnrichedPt5',False,'nanoLatino_')
+getSampleFiles(directory,'QCD_Pt-800to1000_MuEnrichedPt5',False,'nanoLatino_')
+getSampleFiles(directory,'QCD_Pt-1000toInf_MuEnrichedPt5',False,'nanoLatino_')
,
'weight' : XSWeight+'*'+SFweight+'*'+METFilter_MC,
'FilesPerJob' : 20,
}
samples['QCD_EM'] = { 'name' : getSampleFiles(directory,'QCD_Pt-15to20_EMEnriched',False,'nanoLatino_')
+getSampleFiles(directory,'QCD_Pt-20to30_EMEnriched',False,'nanoLatino_')
+getSampleFiles(directory,'QCD_Pt-30to50_EMEnriched',False,'nanoLatino_')
+getSampleFiles(directory,'QCD_Pt-50to80_EMEnriched',False,'nanoLatino_')
+getSampleFiles(directory,'QCD_Pt-80to120_EMEnriched',False,'nanoLatino_')
+getSampleFiles(directory,'QCD_Pt-120to170_EMEnriched',False,'nanoLatino_')
+getSampleFiles(directory,'QCD_Pt-170to300_EMEnriched',False,'nanoLatino_')
+getSampleFiles(directory,'QCD_Pt-300toInf_EMEnriched',False,'nanoLatino_')
,
'weight' : XSWeight+'*'+SFweight+'*'+METFilter_MC,
'FilesPerJob' : 20,
}
addSampleWeight(samples, 'QCD_MU', 'QCD_Pt-15to20_MuEnrichedPt5', '0.0022')
addSampleWeight(samples, 'QCD_MU', 'QCD_Pt-20to30_MuEnrichedPt5', '0.0045')
addSampleWeight(samples, 'QCD_MU', 'QCD_Pt-30to50_MuEnrichedPt5', '0.00974')
addSampleWeight(samples, 'QCD_MU', 'QCD_Pt-50to80_MuEnrichedPt5', '0.0196')
addSampleWeight(samples, 'QCD_MU', 'QCD_Pt-80to120_MuEnrichedPt5', '0.0322')
addSampleWeight(samples, 'QCD_MU', 'QCD_Pt-80to120_MuEnrichedPt5_ext1', '0.0322')
###EXT
w1=str(getEventSumw(directory,'QCD_Pt-80to120_MuEnrichedPt5','nanoLatino_'))
w2=str(getEventSumw(directory,'QCD_Pt-80to120_MuEnrichedPt5_ext1','nanoLatino_'))
totalw=str(float(w1)+float(w2))
######
addSampleWeight(samples,'QCD_MU','QCD_Pt-80to120_MuEnrichedPt5',w1+"/"+totalw)
addSampleWeight(samples,'QCD_MU','QCD_Pt-80to120_MuEnrichedPt5_ext1',w2+"/"+totalw)
addSampleWeight(samples, 'QCD_MU', 'QCD_Pt-120to170_MuEnrichedPt5', '0.04518')
addSampleWeight(samples, 'QCD_MU', 'QCD_Pt-120to170_MuEnrichedPt5_ext1', '0.04518')
###EXT
w1=str(getEventSumw(directory,'QCD_Pt-120to170_MuEnrichedPt5','nanoLatino_'))
w2=str(getEventSumw(directory,'QCD_Pt-120to170_MuEnrichedPt5_ext1','nanoLatino_'))
totalw=str(float(w1)+float(w2))
addSampleWeight(samples,'QCD_MU','QCD_Pt-120to170_MuEnrichedPt5',w1+"/"+totalw)
addSampleWeight(samples,'QCD_MU','QCD_Pt-120to170_MuEnrichedPt5_ext1',w2+"/"+totalw)
######
addSampleWeight(samples, 'QCD_MU', 'QCD_Pt-170to300_MuEnrichedPt5', '0.0598')
addSampleWeight(samples, 'QCD_MU', 'QCD_Pt-300to470_MuEnrichedPt5', '0.10196')
addSampleWeight(samples, 'QCD_MU', 'QCD_Pt-300to470_MuEnrichedPt5_ext3', '0.10196')
###EXT
w1=str(getEventSumw(directory,'QCD_Pt-300to470_MuEnrichedPt5','nanoLatino_'))
w2=str(getEventSumw(directory,'QCD_Pt-300to470_MuEnrichedPt5_ext3','nanoLatino_'))
totalw=str(float(w1)+float(w2))
addSampleWeight(samples,'QCD_MU','QCD_Pt-300to470_MuEnrichedPt5',w1+"/"+totalw)
addSampleWeight(samples,'QCD_MU','QCD_Pt-300to470_MuEnrichedPt5_ext3',w2+"/"+totalw)
###
addSampleWeight(samples, 'QCD_MU', 'QCD_Pt-470to600_MuEnrichedPt5', '0.08722')
addSampleWeight(samples, 'QCD_MU', 'QCD_Pt-470to600_MuEnrichedPt5_ext1', '0.08722')
###EXT
w1=str(getEventSumw(directory,'QCD_Pt-470to600_MuEnrichedPt5','nanoLatino_'))
w2=str(getEventSumw(directory,'QCD_Pt-470to600_MuEnrichedPt5_ext1','nanoLatino_'))
totalw=str(float(w1)+float(w2))
addSampleWeight(samples,'QCD_MU','QCD_Pt-470to600_MuEnrichedPt5',w1+"/"+totalw)
addSampleWeight(samples,'QCD_MU','QCD_Pt-470to600_MuEnrichedPt5_ext1',w2+"/"+totalw)
###
addSampleWeight(samples, 'QCD_MU', 'QCD_Pt-600to800_MuEnrichedPt5', '0.13412')
addSampleWeight(samples, 'QCD_MU', 'QCD_Pt-800to1000_MuEnrichedPt5', '0.14552')
addSampleWeight(samples, 'QCD_MU', 'QCD_Pt-1000toInf_MuEnrichedPt5', '0.15544')
addSampleWeight(samples, 'QCD_EM', 'QCD_Pt-15to20_EMEnriched', '0.0096*0.1101')
addSampleWeight(samples, 'QCD_EM', 'QCD_Pt-15to20_EMEnriched_ext1', '0.0096*0.1101')
###EXT
#w1=str(getEventSumw(directory,'QCD_Pt-15to20_EMEnriched','nanoLatino_'))
#w2=str(getEventSumw(directory,'QCD_Pt-15to20_EMEnriched_ext1','nanoLatino_'))
#totalw=str(float(w1)+float(w2))
#addSampleWeight(samples,'QCD_EM','QCD_Pt-15to20_EMEnriched',w1+"/"+totalw)
#addSampleWeight(samples,'QCD_EM','QCD_Pt-15to20_EMEnriched_ext1',w2+"/"+totalw)
###
addSampleWeight(samples, 'QCD_EM', 'QCD_Pt-20to30_EMEnriched', '0.008875251076')
addSampleWeight(samples, 'QCD_EM', 'QCD_Pt-30to50_EMEnriched', '0.0470')
addSampleWeight(samples, 'QCD_EM', 'QCD_Pt-50to80_EMEnriched', '0.100')
addSampleWeight(samples, 'QCD_EM', 'QCD_Pt-50to80_EMEnriched_ext1', '0.100')
##EXT
#w1=str(getEventSumw(directory,'QCD_Pt-50to80_EMEnriched','nanoLatino_'))
#w2=str(getEventSumw(directory,'QCD_Pt-50to80_EMEnriched_ext1','nanoLatino_'))
#totalw=str(float(w1)+float(w2))
#addSampleWeight(samples,'QCD_EM','QCD_Pt-50to80_EMEnriched',w1+"/"+totalw)
#addSampleWeight(samples,'QCD_EM','QCD_Pt-50to80_EMEnriched_ext1',w2+"/"+totalw)
###
addSampleWeight(samples, 'QCD_EM', 'QCD_Pt-80to120_EMEnriched', '0.1359064286')
addSampleWeight(samples, 'QCD_EM', 'QCD_Pt-120to170_EMEnriched', '0.1396945073')
addSampleWeight(samples, 'QCD_EM', 'QCD_Pt-170to300_EMEnriched', '0.1829736842')
addSampleWeight(samples, 'QCD_EM', 'QCD_Pt-300toInf_EMEnriched', '0.15')
samples['QCD_bcToE'] = { 'name' : #getSampleFiles(directory,'QCD_Pt_20to30_bcToE',False,'nanoLatino_')
getSampleFiles(directory,'QCD_Pt_30to80_bcToE',False,'nanoLatino_')
#+getSampleFiles(directory,'QCD_Pt_80to170_bcToE',False,'nanoLatino_')
#+getSampleFiles(directory,'QCD_Pt_170to250_bcToE',False,'nanoLatino_')
#+getSampleFiles(directory,'QCD_Pt_250toInf_bcToE',False,'nanoLatino_')
,
'weight' : XSWeight+'*'+SFweight+'*'+METFilter_MC,
'FilesPerJob' : 20,
}
#samples['WW'] = { 'name' : getSampleFiles(directory,'WW-LO',False,'nanoLatino_')
# ,
# 'weight' : XSWeight+'*'+SFweight+'*'+GenLepMatch+'*'+METFilter_MC,
# 'FilesPerJob' : 5,
# }
#samples['WWToLNuQQ'] = { 'name' : getSampleFiles(directory,'WWToLNuQQ',False,'nanoLatino_') ,
# 'weight' : XSWeight+'*'+SFweight+'*'+GenLepMatch+'*'+METFilter_MC+'*'+LepWPweight ,
# }
#def getEventSumw(directory,sample,prefix):
#Wjets_w1=str(getEventSumw(directory,'WJetsToLNu','nanoLatino_'))
#Wjets_w2=str(getEventSumw(directory,'WJetsToLNu_ext2','nanoLatino_'))
#Wjets_totalw=str(float(Wjets_w1)+float(Wjets_w2))
#print "Wjets_w1="+Wjets_w1
#print "Wjets_w2="+Wjets_w2
#print "Wjets_totalw="+Wjets_totalw
###########################################
################## DATA ###################
###########################################
samples['DATA'] = { 'name': [ ] ,
'weight' : METFilter_DATA+'*'+LepWPCut ,
'weights' : [ ],
'isData': ['all'],
'FilesPerJob' : 20,
}
#print samples['DATA']
for Run in DataRun :
directory = treeBaseDir+CAMPAIGN_DATA+'/'+STEP_DATA
for DataSet in DataSets :
FileTarget = getSampleFiles(directory,DataSet+'_'+Run[1],True,'nanoLatino_')
for iFile in FileTarget:
#print(iFile)
samples['DATA']['name'].append(iFile)
samples['DATA']['weights'].append(DataTrig[DataSet])
| [
"[email protected]"
] | |
b1bfe0cbfab8073e75cb7be278c8efa2c751956e | 169edd2e971f78b261c78eb6e927efce7499237a | /2017_01_21_297/serializeAndDeserializeBT.py | dbaa31e137ab7e81e2b1f075f533908f61ec44c2 | [] | no_license | wk633/crack_leetcode | ce75fc92160a5e618f9cd84f5a6ab4871021f17b | e8559773069447f9e986712c45f6a5a53eaeb123 | refs/heads/master | 2021-01-13T07:38:50.957328 | 2017-05-12T01:51:21 | 2017-05-12T01:51:21 | 78,284,395 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,205 | py | # Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Codec:
def serialize(self, root):
"""Encodes a tree to a single string.
:type root: TreeNode
:rtype: str
"""
def preorder(node):
if node:
vals.append(str(node.val))
preorder(node.left)
preorder(node.right)
else:
vals.append("#")
vals = []
preorder(root)
return " ".join(vals)
def deserialize(self, data):
"""Decodes your encoded data to tree.
:type data: str
:rtype: TreeNode
"""
def dePreorder():
val = next(vals)
if val == "#":
return None
root = TreeNode(val)
root.left = dePreorder()
root.right = dePreorder()
return root
print data
vals = iter(data.split(" "))
return dePreorder()
# Your Codec object will be instantiated and called as such:
# codec = Codec()
# codec.deserialize(codec.serialize(root))
| [
"[email protected]"
] | |
a26332d09362043d07118787666ef90c13046967 | b63142e8540cb30bb0c663332e29a4112721073e | /1112_set_mismatch.py | b9a4afe54d4b78dccd9dfe2c00c7e86f2356ed95 | [] | no_license | HaydenInEdinburgh/LintCode | 025bb2f0d75686097061de324c0fd292536dbb14 | dbeae2bf631e57667d1415164d452d5ca2df7447 | refs/heads/master | 2023-08-18T19:52:54.561623 | 2021-10-06T21:46:50 | 2021-10-06T21:46:50 | 370,733,548 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 658 | py | class Solution:
"""
@param nums: an array
@return: the number occurs twice and the number that is missing
"""
def findErrorNums(self, nums):
# Write your code here
if not nums:
return
cnt = {}
for n in nums:
cnt[n] = cnt.get(n, 0) +1
print(cnt)
lost, dup = None, None
for i in range(1, len(nums)+1):
if i not in cnt:
lost = i
continue
if cnt[i] > 1:
dup = i
return [dup, lost]
if __name__ == '__main__':
s = Solution()
nums = [1, 1]
print(s.findErrorNums(nums)) | [
"[email protected]"
] | |
989a78a26e8477e2179a5074d77b7c51c2f41e38 | b3a2beaab1ac676c96e93a48d4f35ff6ed6799d0 | /anyex/exmo.py | 04a6da12b6988532f22314b2b2c28247dceda6cd | [
"MIT",
"LicenseRef-scancode-warranty-disclaimer"
] | permissive | ttwishing/anyex | 40c06cf34e4d8f96bb968e8b7be3d2da5e6023f8 | cfd1f2f04ab992b790add4843aafff91e5773cbf | refs/heads/master | 2020-05-23T12:07:58.615432 | 2019-05-15T05:09:46 | 2019-05-15T05:09:46 | 186,751,745 | 0 | 0 | MIT | 2019-05-15T04:57:08 | 2019-05-15T04:57:08 | null | UTF-8 | Python | false | false | 24,105 | py | # -*- coding: utf-8 -*-
# PLEASE DO NOT EDIT THIS FILE, IT IS GENERATED AND WILL BE OVERWRITTEN:
# https://github.com/anyex/anyex/blob/master/CONTRIBUTING.md#how-to-contribute-code
from anyex.base.exchange import Exchange
# -----------------------------------------------------------------------------
try:
basestring # Python 3
except NameError:
basestring = str # Python 2
import hashlib
import json
from anyex.base.errors import ExchangeError
from anyex.base.errors import AuthenticationError
from anyex.base.errors import InsufficientFunds
from anyex.base.errors import InvalidOrder
from anyex.base.errors import OrderNotFound
from anyex.base.errors import ExchangeNotAvailable
from anyex.base.errors import InvalidNonce
class exmo (Exchange):
def describe(self):
return self.deep_extend(super(exmo, self).describe(), {
'id': 'exmo',
'name': 'EXMO',
'countries': ['ES', 'RU'], # Spain, Russia
'rateLimit': 350, # once every 350 ms ≈ 180 requests per minute ≈ 3 requests per second
'version': 'v1',
'has': {
'CORS': False,
'fetchClosedOrders': 'emulated',
'fetchOpenOrders': True,
'fetchOrder': 'emulated',
'fetchOrders': 'emulated',
'fetchOrderTrades': True,
'fetchOrderBooks': True,
'fetchMyTrades': True,
'fetchTickers': True,
'withdraw': True,
},
'urls': {
'logo': 'https://user-images.githubusercontent.com/1294454/27766491-1b0ea956-5eda-11e7-9225-40d67b481b8d.jpg',
'api': 'https://api.exmo.com',
'www': 'https://exmo.me',
'doc': [
'https://exmo.me/en/api_doc',
'https://github.com/exmo-dev/exmo_api_lib/tree/master/nodejs',
],
'fees': 'https://exmo.com/en/docs/fees',
},
'api': {
'public': {
'get': [
'currency',
'order_book',
'pair_settings',
'ticker',
'trades',
],
},
'private': {
'post': [
'user_info',
'order_create',
'order_cancel',
'user_open_orders',
'user_trades',
'user_cancelled_orders',
'order_trades',
'required_amount',
'deposit_address',
'withdraw_crypt',
'withdraw_get_txid',
'excode_create',
'excode_load',
'wallet_history',
],
},
},
'fees': {
'trading': {
'maker': 0.2 / 100,
'taker': 0.2 / 100,
},
'funding': {
'withdraw': {
'BTC': 0.001,
'LTC': 0.01,
'DOGE': 1,
'DASH': 0.01,
'ETH': 0.01,
'WAVES': 0.001,
'ZEC': 0.001,
'USDT': 25,
'XMR': 0.05,
'XRP': 0.02,
'KICK': 350,
'ETC': 0.01,
'BCH': 0.001,
},
'deposit': {
'USDT': 15,
'KICK': 50,
},
},
},
'exceptions': {
'40005': AuthenticationError, # Authorization error, incorrect signature
'40009': InvalidNonce, #
'40015': ExchangeError, # API function do not exist
'40016': ExchangeNotAvailable, # Maintenance work in progress
'40017': AuthenticationError, # Wrong API Key
'50052': InsufficientFunds,
'50054': InsufficientFunds,
'50304': OrderNotFound, # "Order was not found '123456789'"(fetching order trades for an order that does not have trades yet)
'50173': OrderNotFound, # "Order with id X was not found."(cancelling non-existent, closed and cancelled order)
'50319': InvalidOrder, # Price by order is less than permissible minimum for self pair
'50321': InvalidOrder, # Price by order is more than permissible maximum for self pair
},
})
def fetch_markets(self):
markets = self.publicGetPairSettings()
keys = list(markets.keys())
result = []
for p in range(0, len(keys)):
id = keys[p]
market = markets[id]
symbol = id.replace('_', '/')
base, quote = symbol.split('/')
result.append({
'id': id,
'symbol': symbol,
'base': base,
'quote': quote,
'active': True,
'limits': {
'amount': {
'min': self.safe_float(market, 'min_quantity'),
'max': self.safe_float(market, 'max_quantity'),
},
'price': {
'min': self.safe_float(market, 'min_price'),
'max': self.safe_float(market, 'max_price'),
},
'cost': {
'min': self.safe_float(market, 'min_amount'),
'max': self.safe_float(market, 'max_amount'),
},
},
'precision': {
'amount': 8,
'price': 8,
},
'info': market,
})
return result
def fetch_balance(self, params={}):
self.load_markets()
response = self.privatePostUserInfo()
result = {'info': response}
currencies = list(self.currencies.keys())
for i in range(0, len(currencies)):
currency = currencies[i]
account = self.account()
if currency in response['balances']:
account['free'] = float(response['balances'][currency])
if currency in response['reserved']:
account['used'] = float(response['reserved'][currency])
account['total'] = self.sum(account['free'], account['used'])
result[currency] = account
return self.parse_balance(result)
def fetch_order_book(self, symbol, limit=None, params={}):
self.load_markets()
market = self.market(symbol)
request = self.extend({
'pair': market['id'],
}, params)
if limit is not None:
request['limit'] = limit
response = self.publicGetOrderBook(request)
result = response[market['id']]
return self.parse_order_book(result, None, 'bid', 'ask')
def fetch_order_books(self, symbols=None, params={}):
self.load_markets()
ids = None
if not symbols:
ids = ','.join(self.ids)
# max URL length is 2083 symbols, including http schema, hostname, tld, etc...
if len(ids) > 2048:
numIds = len(self.ids)
raise ExchangeError(self.id + ' has ' + str(numIds) + ' symbols exceeding max URL length, you are required to specify a list of symbols in the first argument to fetchOrderBooks')
else:
ids = self.market_ids(symbols)
ids = ','.join(ids)
response = self.publicGetOrderBook(self.extend({
'pair': ids,
}, params))
result = {}
ids = list(response.keys())
for i in range(0, len(ids)):
id = ids[i]
symbol = self.find_symbol(id)
result[symbol] = self.parse_order_book(response[id], None, 'bid', 'ask')
return result
def parse_ticker(self, ticker, market=None):
timestamp = ticker['updated'] * 1000
symbol = None
if market:
symbol = market['symbol']
last = float(ticker['last_trade'])
return {
'symbol': symbol,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'high': float(ticker['high']),
'low': float(ticker['low']),
'bid': float(ticker['buy_price']),
'bidVolume': None,
'ask': float(ticker['sell_price']),
'askVolume': None,
'vwap': None,
'open': None,
'close': last,
'last': last,
'previousClose': None,
'change': None,
'percentage': None,
'average': float(ticker['avg']),
'baseVolume': float(ticker['vol']),
'quoteVolume': float(ticker['vol_curr']),
'info': ticker,
}
def fetch_tickers(self, symbols=None, params={}):
self.load_markets()
response = self.publicGetTicker(params)
result = {}
ids = list(response.keys())
for i in range(0, len(ids)):
id = ids[i]
market = self.markets_by_id[id]
symbol = market['symbol']
ticker = response[id]
result[symbol] = self.parse_ticker(ticker, market)
return result
def fetch_ticker(self, symbol, params={}):
self.load_markets()
response = self.publicGetTicker(params)
market = self.market(symbol)
return self.parse_ticker(response[market['id']], market)
def parse_trade(self, trade, market):
timestamp = trade['date'] * 1000
return {
'id': str(trade['trade_id']),
'info': trade,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'symbol': market['symbol'],
'order': self.safe_string(trade, 'order_id'),
'type': None,
'side': trade['type'],
'price': float(trade['price']),
'amount': float(trade['quantity']),
'cost': self.safe_float(trade, 'amount'),
}
def fetch_trades(self, symbol, since=None, limit=None, params={}):
self.load_markets()
market = self.market(symbol)
response = self.publicGetTrades(self.extend({
'pair': market['id'],
}, params))
return self.parse_trades(response[market['id']], market, since, limit)
def fetch_my_trades(self, symbol=None, since=None, limit=None, params={}):
self.load_markets()
request = {}
market = None
if symbol is not None:
market = self.market(symbol)
request['pair'] = market['id']
response = self.privatePostUserTrades(self.extend(request, params))
if market is not None:
response = response[market['id']]
return self.parse_trades(response, market, since, limit)
def create_order(self, symbol, type, side, amount, price=None, params={}):
self.load_markets()
prefix = (type + '_') if (type == 'market') else ''
market = self.market(symbol)
if (type == 'market') and(price is None):
price = 0
request = {
'pair': market['id'],
'quantity': self.amount_to_string(symbol, amount),
'type': prefix + side,
'price': self.price_to_precision(symbol, price),
}
response = self.privatePostOrderCreate(self.extend(request, params))
id = self.safe_string(response, 'order_id')
timestamp = self.milliseconds()
amount = float(amount)
price = float(price)
status = 'open'
order = {
'id': id,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'lastTradeTimestamp': None,
'status': status,
'symbol': symbol,
'type': type,
'side': side,
'price': price,
'cost': price * amount,
'amount': amount,
'remaining': amount,
'filled': 0.0,
'fee': None,
'trades': None,
}
self.orders[id] = order
return self.extend({'info': response}, order)
def cancel_order(self, id, symbol=None, params={}):
self.load_markets()
response = self.privatePostOrderCancel({'order_id': id})
if id in self.orders:
self.orders[id]['status'] = 'canceled'
return response
def fetch_order(self, id, symbol=None, params={}):
self.load_markets()
try:
response = self.privatePostOrderTrades({
'order_id': str(id),
})
return self.parse_order(response)
except Exception as e:
if isinstance(e, OrderNotFound):
if id in self.orders:
return self.orders[id]
raise OrderNotFound(self.id + ' fetchOrder order id ' + str(id) + ' not found in cache.')
def fetch_order_trades(self, id, symbol=None, since=None, limit=None, params={}):
market = None
if symbol is not None:
market = self.market(symbol)
response = self.privatePostOrderTrades({
'order_id': str(id),
})
return self.parse_trades(response, market, since, limit)
def update_cached_orders(self, openOrders, symbol):
# update local cache with open orders
for j in range(0, len(openOrders)):
id = openOrders[j]['id']
self.orders[id] = openOrders[j]
openOrdersIndexedById = self.index_by(openOrders, 'id')
cachedOrderIds = list(self.orders.keys())
result = []
for k in range(0, len(cachedOrderIds)):
# match each cached order to an order in the open orders array
# possible reasons why a cached order may be missing in the open orders array:
# - order was closed or canceled -> update cache
# - symbol mismatch(e.g. cached BTC/USDT, fetched ETH/USDT) -> skip
id = cachedOrderIds[k]
order = self.orders[id]
result.append(order)
if not(id in list(openOrdersIndexedById.keys())):
# cached order is not in open orders array
# if we fetched orders by symbol and it doesn't match the cached order -> won't update the cached order
if symbol is not None and symbol != order['symbol']:
continue
# order is cached but not present in the list of open orders -> mark the cached order as closed
if order['status'] == 'open':
order = self.extend(order, {
'status': 'closed', # likewise it might have been canceled externally(unnoticed by "us")
'cost': None,
'filled': order['amount'],
'remaining': 0.0,
})
if order['cost'] is None:
if order['filled'] is not None:
order['cost'] = order['filled'] * order['price']
self.orders[id] = order
return result
def fetch_orders(self, symbol=None, since=None, limit=None, params={}):
self.load_markets()
response = self.privatePostUserOpenOrders(params)
marketIds = list(response.keys())
orders = []
for i in range(0, len(marketIds)):
marketId = marketIds[i]
market = None
if marketId in self.markets_by_id:
market = self.markets_by_id[marketId]
parsedOrders = self.parse_orders(response[marketId], market)
orders = self.array_concat(orders, parsedOrders)
self.update_cached_orders(orders, symbol)
return self.filter_by_symbol_since_limit(self.orders, symbol, since, limit)
def fetch_open_orders(self, symbol=None, since=None, limit=None, params={}):
self.fetch_orders(symbol, since, limit, params)
orders = self.filter_by(self.orders, 'status', 'open')
return self.filter_by_symbol_since_limit(orders, symbol, since, limit)
def fetch_closed_orders(self, symbol=None, since=None, limit=None, params={}):
self.fetch_orders(symbol, since, limit, params)
orders = self.filter_by(self.orders, 'status', 'closed')
return self.filter_by_symbol_since_limit(orders, symbol, since, limit)
def parse_order(self, order, market=None):
id = self.safe_string(order, 'order_id')
timestamp = self.safe_integer(order, 'created')
if timestamp is not None:
timestamp *= 1000
iso8601 = None
symbol = None
side = self.safe_string(order, 'type')
if market is None:
marketId = None
if 'pair' in order:
marketId = order['pair']
elif ('in_currency' in list(order.keys())) and('out_currency' in list(order.keys())):
if side == 'buy':
marketId = order['in_currency'] + '_' + order['out_currency']
else:
marketId = order['out_currency'] + '_' + order['in_currency']
if (marketId is not None) and(marketId in list(self.markets_by_id.keys())):
market = self.markets_by_id[marketId]
amount = self.safe_float(order, 'quantity')
if amount is None:
amountField = 'in_amount' if (side == 'buy') else 'out_amount'
amount = self.safe_float(order, amountField)
price = self.safe_float(order, 'price')
cost = self.safe_float(order, 'amount')
filled = 0.0
trades = []
transactions = self.safe_value(order, 'trades')
feeCost = None
if transactions is not None:
if isinstance(transactions, list):
for i in range(0, len(transactions)):
trade = self.parse_trade(transactions[i], market)
if id is None:
id = trade['order']
if timestamp is None:
timestamp = trade['timestamp']
if timestamp > trade['timestamp']:
timestamp = trade['timestamp']
filled += trade['amount']
if feeCost is None:
feeCost = 0.0
# feeCost += trade['fee']['cost']
if cost is None:
cost = 0.0
cost += trade['cost']
trades.append(trade)
if timestamp is not None:
iso8601 = self.iso8601(timestamp)
remaining = None
if amount is not None:
remaining = amount - filled
status = self.safe_string(order, 'status') # in case we need to redefine it for canceled orders
if filled >= amount:
status = 'closed'
else:
status = 'open'
if market is None:
market = self.get_market_from_trades(trades)
feeCurrency = None
if market is not None:
symbol = market['symbol']
feeCurrency = market['quote']
if cost is None:
if price is not None:
cost = price * filled
elif price is None:
if filled > 0:
price = cost / filled
fee = {
'cost': feeCost,
'currency': feeCurrency,
}
return {
'id': id,
'datetime': iso8601,
'timestamp': timestamp,
'lastTradeTimestamp': None,
'status': status,
'symbol': symbol,
'type': 'limit',
'side': side,
'price': price,
'cost': cost,
'amount': amount,
'filled': filled,
'remaining': remaining,
'trades': trades,
'fee': fee,
'info': order,
}
def get_market_from_trades(self, trades):
tradesBySymbol = self.index_by(trades, 'pair')
symbols = list(tradesBySymbol.keys())
numSymbols = len(symbols)
if numSymbols == 1:
return self.markets[symbols[0]]
return None
def calculate_fee(self, symbol, type, side, amount, price, takerOrMaker='taker', params={}):
market = self.markets[symbol]
rate = market[takerOrMaker]
cost = float(self.cost_to_precision(symbol, amount * rate))
key = 'quote'
if side == 'sell':
cost *= price
else:
key = 'base'
return {
'type': takerOrMaker,
'currency': market[key],
'rate': rate,
'cost': float(self.fee_to_precision(symbol, cost)),
}
def withdraw(self, currency, amount, address, tag=None, params={}):
self.load_markets()
request = {
'amount': amount,
'currency': currency,
'address': address,
}
if tag is not None:
request['invoice'] = tag
result = self.privatePostWithdrawCrypt(self.extend(request, params))
return {
'info': result,
'id': result['task_id'],
}
def sign(self, path, api='public', method='GET', params={}, headers=None, body=None):
url = self.urls['api'] + '/' + self.version + '/' + path
if api == 'public':
if params:
url += '?' + self.urlencode(params)
else:
self.check_required_credentials()
nonce = self.nonce()
body = self.urlencode(self.extend({'nonce': nonce}, params))
headers = {
'Content-Type': 'application/x-www-form-urlencoded',
'Key': self.apiKey,
'Sign': self.hmac(self.encode(body), self.encode(self.secret), hashlib.sha512),
}
return {'url': url, 'method': method, 'body': body, 'headers': headers}
def nonce(self):
return self.milliseconds()
def handle_errors(self, httpCode, reason, url, method, headers, body):
if not isinstance(body, basestring):
return # fallback to default error handler
if len(body) < 2:
return # fallback to default error handler
if (body[0] == '{') or (body[0] == '['):
response = json.loads(body)
if 'result' in response:
#
# {"result":false,"error":"Error 50052: Insufficient funds"}
#
success = self.safe_value(response, 'result', False)
if isinstance(success, basestring):
if (success == 'true') or (success == '1'):
success = True
else:
success = False
if not success:
code = None
message = self.safe_string(response, 'error')
errorParts = message.split(':')
numParts = len(errorParts)
if numParts > 1:
errorSubParts = errorParts[0].split(' ')
numSubParts = len(errorSubParts)
code = errorSubParts[1] if (numSubParts > 1) else errorSubParts[0]
feedback = self.id + ' ' + self.json(response)
exceptions = self.exceptions
if code in exceptions:
raise exceptions[code](feedback)
else:
raise ExchangeError(feedback)
| [
"[email protected]"
] | |
5969f9a3f58bcb9581bb2c8659c6b3ec621e71f5 | 995514f414eee6bbe9083ec39ecd3027cf9fd7d8 | /3.2/14_mandelbrotTime.py | 70eae2dba5c0725cca43753388faa48c6d600cd7 | [] | no_license | j16949/Programming-in-Python-princeton | e02376ebb714264a1329aacad30347e4ae79f909 | 392391b98815cc1ae2b49e1057a10bc5b37e801f | refs/heads/master | 2023-08-08T11:52:04.362780 | 2023-08-01T13:02:41 | 2023-08-01T13:02:41 | 313,943,770 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,391 | py | #-----------------------------------------------------------------------
# mandelbrot.py
#-----------------------------------------------------------------------
import sys
import stddraw
from color import Color
from picture import Picture
import complex as com
from stopwatch import Stopwatch
#-----------------------------------------------------------------------
# Compute the Mandelbrot iteration sequence starting at z0, and
# return the number of iterations for which the magnitude stays less
# than 2, up to the limit.
def mandel(z0, limit):
z = z0
for t in range(limit):
if abs(z) > 2.0:
return t
z = z * z + z0
return limit
#-----------------------------------------------------------------------
# Accept float command-line arguments xc, yc, and size that specify
# the center and size of a square region of interest. Make a digital
# image showing the result of sampling the Mandelbrot set in that
# region at a 512*512 grid of equally spaced pixels. Color each pixel
# with a grayscale value that is determined by counting the number of
# iterations before the Mandelbrot sequence for the corresponding
# complex number grows past 2.0, up to 255.
MAX = 255
#n = int(sys.argv[1])
#xc = float(sys.argv[2])
#yc = float(sys.argv[3])
#size = float(sys.argv[4])
n = 512
xc = -.5
yc = 0
size = 2
w1 = Stopwatch()
pic = Picture(n, n)
for col in range(n):
for row in range(n):
x0 = xc - (size / 2) + (size * col / n)
y0 = yc - (size / 2) + (size * row / n)
z0 = complex(x0, y0)
gray = MAX - mandel(z0, MAX)
color = Color(gray, gray, gray)
pic.set(col, n-1-row, color)
print(w1.elapsedTime())
w2 = Stopwatch()
pic = Picture(n, n)
for col in range(n):
for row in range(n):
x0 = xc - (size / 2) + (size * col / n)
y0 = yc - (size / 2) + (size * row / n)
z0 = com.Complex(x0, y0)
gray = MAX - mandel(z0, MAX)
color = Color(gray, gray, gray)
pic.set(col, n-1-row, color)
print(w2.elapsedTime())
#stddraw.setCanvasSize(n, n)
#stddraw.picture(pic)
#stddraw.show()
#-----------------------------------------------------------------------
#bai@ubuntu:~/pythonProject/princeton/3.2$ python3 14_mandelbrotTime.py
#pygame 1.9.6
#Hello from the pygame community. https://www.pygame.org/contribute.html
#5.372214317321777
#37.89339089393616
| [
"[email protected]"
] | |
8f90f0f74e299c60c30453d312fc3d1aba364719 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03013/s183344003.py | a8fdb4a198d1b4d4846bb4b99b7d9b53647648b2 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 326 | py |
N, M = map(int, input().split())
X = [int(input()) for _ in range(M)]
MOD = 10 ** 9 + 7
dp = [-1] * (N + 1)
dp[0] = 1
for i in range(M):
dp[X[i]] = 0
for i in range(N):
if dp[i + 1] < 0:
if i == 0:
dp[i + 1] = dp[i]
else:
dp[i + 1] = (dp[i] + dp[i - 1]) % MOD
print(dp[-1])
| [
"[email protected]"
] | |
355aefac356edd3ca3cfe23b9b410776566b9d49 | ff66dfb302dfdc5a519787cea8ad0ccfc2264334 | /python/ex6_support_vector_machines/ex6_spam.py | 2a5930371e0659dc42529d963af53411ce0ba45b | [
"MIT"
] | permissive | ashu-vyas-github/AndrewNg_MachineLearning_Coursera | 1c2d50e6a44e8e673203bf06a3f0165cac0a240e | 1be5124b07df61f7295dd1c5151b86b061bf50fc | refs/heads/main | 2023-07-11T14:30:52.057125 | 2021-08-17T06:04:30 | 2021-08-17T06:04:30 | 388,360,602 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,523 | py | # Machine Learning Online Class
# Exercise 6 | Spam Classification with SVMs
#
# Instructions
# ------------
#
# This file contains code that helps you get started on the
# exercise. You will need to complete the following functions:
#
# gaussianKernel.m
# dataset3Params.m
# processEmail.m
# emailFeatures.m
#
# For this exercise, you will not need to change any code in this file,
# or any other files other than those mentioned above.
#
import re # import regular expressions to process emails
import numpy
from scipy.io import loadmat
import svm_funcs
# ==================== Part 1: Email Preprocessing ====================
print("\nPreprocessing sample email (emailSample1.txt)\n")
# Extract Features
with open('./emailSample1.txt') as fid:
file_contents = fid.read()
word_indices = svm_funcs.process_email(file_contents, False)
# Print Stats
print('-------------')
print('Word Indices:')
print('-------------')
print(word_indices)
# ==================== Part 2: Feature Extraction ====================
print("\nExtracting features from sample email (emailSample1.txt)\n")
# Extract Features
features = svm_funcs.email_features(word_indices)
# Print Stats
print("Length of feature vector: %d" % len(features))
print("Number of non-zero entries: %d" % sum(features > 0))
# =========== Part 3: Train Linear SVM for Spam Classification ========
# Load the Spam Email dataset
# You will have X, y in your environment
data = loadmat("./spamTrain.mat")
x_train = data['X'].astype(float)
y_train = data['y']
y_train = y_train.reshape(-1)
num_examples, num_features = x_train.shape
print("Spam example Ex.6. training #examples:", num_examples, "#features:", num_features)
print("\nTraining Linear SVM (Spam Classification)")
print("This may take 1 to 2 minutes...\n")
reg_C = 0.1
model = svm_funcs.svm_train(svm_funcs.linear_kernel, x_train, y_train, reg_C, tol=1e-3, max_passes=20)
train_pred = svm_funcs.svm_predict(model, x_train) # Compute the training accuracy
train_acc = numpy.mean(train_pred == y_train)
print("Training Accuracy: %.2f" % (train_acc*100))
# =================== Part 4: Test Spam Classification ================
# Load the test dataset
# You will have Xtest, ytest in your environment
data = loadmat("./spamTest.mat")
x_test = data['Xtest'].astype(float)
y_test = data['ytest']
y_test = y_test.reshape(-1)
print("\nEvaluating the trained Linear SVM on a test set...")
test_pred = svm_funcs.svm_predict(model, x_test)
test_acc = numpy.mean(test_pred == y_test)
print("\nTest Accuracy: %.2f" % (test_acc*100))
# ================= Part 5: Top Predictors of Spam ====================
# Sort the weights and obtin the vocabulary list
# NOTE some words have the same weights, so their order might be different than in the text above
idx = numpy.argsort(model['w'])
top_idx = idx[-15:][::-1]
vocab_list = svm_funcs.get_vocab_list()
print("\nTop predictors of spam:")
print("%-15s %-15s" % ('word', 'weight'))
print("----" + " "*12 + "------")
for word, w in zip(numpy.array(vocab_list)[top_idx], model['w'][top_idx]):
print("%-15s %0.2f" % (word, w))
# # =================== Part 6: Try Your Own Emails =====================
filename = './emailSample1.txt'
with open(filename) as fid:
file_contents = fid.read()
word_indices = svm_funcs.process_email(file_contents, verbose=False)
x = svm_funcs.email_features(word_indices)
p = svm_funcs.svm_predict(model, x)
print("\nProcessed %s\nSpam Classification: %s" % (filename, 'spam' if p else 'not spam'))
| [
"[email protected]"
] | |
84406411b9aca252144887dbe784d8a128e5029b | 115ef7a9ffc88148b7439bd25ef3c97720be87e6 | /Backtester_v2.0/2.0/correlation.py | 421ae0d95b0fdcf95cf4f52eb0b2f350f8de07db | [
"MIT"
] | permissive | octicalpha/billions | 387bc0db600dd97915be0cece710237ff626b86c | 5465c527d614ae64789906197c1effe7ba94d373 | refs/heads/master | 2020-04-01T21:35:50.582694 | 2018-10-14T05:36:50 | 2018-10-14T05:36:50 | 153,664,919 | 0 | 3 | null | 2018-10-18T17:53:35 | 2018-10-18T17:53:34 | null | UTF-8 | Python | false | false | 3,929 | py | import pandas as pd
import numpy as np
import os
import re
from matplotlib import pyplot as plt
import argparse
class Corr(object):
def __init__(
self, pnl_dir_path, pnl_filepath, pos_dir_path, pos_filepath, range_
):
self.meta_info = {
"pnl": {
"dir_path": pnl_dir_path,
"filepath": pnl_filepath,
"range": 0
},
"pos": {
"dir_path": pos_dir_path,
"filepath": pos_filepath,
"range": range_
}
}
self.pnl_df, self.pnl_se = self.get_sequences("pnl")
self.pos_df, self.pos_se = self.get_sequences("pos")
self.result_dict = {
"PNL": self.get_corr_tuples(self.pnl_df, self.pnl_se),
"POS": self.get_corr_tuples(self.pos_df, self.pos_se)
}
def get_sequences(self, scope):
def get_filepaths():
p = re.compile(".*_"+scope+".csv")
all_names = os.listdir(dir_path)
filenames = sum([p.findall(s) for s in all_names], [])
return [os.path.join(dir_path, s) for s in filenames]
def get_se(filepath):
p = re.compile(".*/(.*)_"+scope+".csv")
df = pd.read_csv(filepath, index_col=0)[-range_:]
se = df["CumPnL"] if scope == "pnl" else df.mean()
se.name = p.findall(filepath)[0]
return se
meta = self.meta_info
dir_path = meta[scope]["dir_path"]
pivot_filepath = meta[scope]["filepath"]
range_ = meta[scope]["range"]
filepaths = get_filepaths()
pivot_se = get_se(pivot_filepath)
ses = [get_se(filepath) for filepath in filepaths]
df = pd.concat(ses, axis=1).drop([pivot_se.name], axis=1)
return df, pivot_se
def get_corr_tuples(self, sequence_df, pivot_se):
def calc_corr(name):
ses = [pivot_se, sequence_df[name]]
df = pd.concat(ses, axis=1).dropna()
corr_mat = np.corrcoef(df[pivot_se.name], df[name])
return corr_mat[1, 0]
corr_dict = {name: calc_corr(name) for name in sequence_df.columns}
names = sorted(sequence_df.columns, key=lambda name: corr_dict[name])
corr_tuples = [(name, corr_dict[name]) for name in names]
return corr_tuples
def display(self, scope):
def draw_hist():
plt.hist(map(lambda x: x[1], corr_tuples))
plt.show()
def _make_it_readable(t):
name = t[0]+" "*(max_name_length-len(t[0]))
corr =str(t[1])
return name + " | " + corr
def print_report():
print(scope+" Max 5")
for t in corr_tuples[:-6:-1]:
print(_make_it_readable(t))
print(scope+" Min 5")
for t in corr_tuples[:5]:
print(_make_it_readable(t))
corr_tuples = self.result_dict[scope]
max_name_length = max(map(lambda x: len(x[0]), corr_tuples))
draw_hist()
print_report()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--pnl_dir', type=str)
parser.add_argument('--pnlfile', type=str)
parser.add_argument('--pos_dir', type=str)
parser.add_argument('--posfile', type=str)
parser.add_argument('--range', type=int)
args = parser.parse_args()
INTERVAL = args.interval
PNL_DIR_PATH = args.pnl_dir
PNL_FILEPATH = args.pnlfile
POS_DIR_PATH = args.pos_dir
POS_FILEPATH = args.posfile
RANGE = args.range
corr = Corr(PNL_DIR_PATH, PNL_FILEPATH, POS_DIR_PATH, POS_FILEPATH, RANGE)
corr.display("PNL")
corr.display("POS") | [
"[email protected]"
] | |
062d988ee4c6b9c74fee068ea1501c5cbc67ff6f | 942ee5e8d54e8ebe9c5c841fbfdd1da652946944 | /2501-3000/2518.Number of Great Partitions.py | cb48ab1fc9ca60491f8fa5cb5332230e804326f0 | [] | no_license | kaiwensun/leetcode | 0129c174457f32887fbca078fb448adce46dd89d | 6b607f4aae3a4603e61f2e2b7480fdfba1d9b947 | refs/heads/master | 2023-08-31T07:30:50.459062 | 2023-08-27T07:59:16 | 2023-08-27T07:59:16 | 57,526,914 | 69 | 9 | null | 2023-08-20T06:34:41 | 2016-05-01T05:37:29 | Python | UTF-8 | Python | false | false | 814 | py | from functools import cache
MOD = 10 ** 9 + 7
class Solution:
def countPartitions(self, nums: List[int], k: int) -> int:
prefix_sum = [0] * len(nums)
for i in range(len(nums)):
prefix_sum[i] = nums[i] + prefix_sum[i - 1]
if prefix_sum[-1] < k * 2:
return 0
@cache
def dp(i, need1, need2):
if i < 0 and max(need1, need2) > 0:
return 0
if max(need1, need2) > prefix_sum[i]:
return 0
if need1 == need2 == 0:
return pow(2, (i + 1), MOD)
if need1 > need2:
return dp(i, need2, need1)
return (dp(i - 1, max(0, need1 - nums[i]), need2) + dp(i - 1, need1, max(0, need2 - nums[i]))) % MOD
return dp(len(nums) - 1, k, k)
| [
"[email protected]"
] | |
9c05be7ad041f9de5e1d1bce2dd31350714c4619 | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/386/usersdata/353/92377/submittedfiles/ep1.py | 10718f855550587ca2a4c168c199e31760a04b62 | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,725 | py | # -*- coding: utf-8 -*-
'''
/**********************************************************/
/* Equipe: Igor Emanuel Lucas Farias, Victória Cruz Gouveia */
/* N ́umero de matriculas: 407553, 407582 */
/* Exercicio-Programa 1 -- Ra ́ızes de Equa ̧c~oes Quadr ́aticas */
/* ECI0007 ou EM0006 (EC/EM) -- 2017 -- Professor:Rafael */
/* Interpretador: Python vers~ao 3 */
/**********************************************************
'''
#COMECE SEU CODIGO NA LINHA ABAIXO.
def raiz2(x,epsilon):
rn=x
while True:
rm=(1/2)*(rn + (x/rn))
if abs (rm-rn)<epsilon:
return(rm)
rn=rm
def baskara(a,b,c):
delta=(b**2) - 4*a*c
if delta>=0:
x1=((-b)+(raiz2(delta,epsilon)))/(2*a)
x2=((-b)-(raiz2(delta,epsilon)))/(2*a)
if delta>0:
return('reais simples', '%2.0f'%x1, '%2.0f'%x2)
elif delta==0:
return('real dupla', '%2.0f'%x1, '%2.0f'%x2)
else:
delta=delta*(-1)
x3=((raiz2(delta,epsilon))/(2*a))
x1=((-b)/(2*a))
x2=((-b)/(2*a))
return('complexas', complex('%4.0f'%x1,'%4.0f'%x3), complex('%4.0f'%x2,'%4.0f'%x3))
epsilon=float(input('Digite o epsilon de controle: '))
nequacoes=int(input('Digite o número de equações: '))
for equação in range(0,nequacoes,1):
a=float(input('Digite o a da equação: '))
b=float(input('Digite o b da equação: '))
c=float(input('Digite o c da equação: '))
if a!=0:
print('%.2f'%a), print('%.2f'%b), print('%.2f'%c), print(baskara(a,b,c))
else:
print('***ERRO: equação não é do segundo grau! ***') | [
"[email protected]"
] | |
8843b68fa4debf3686b1f6037b8afaea4ee71840 | b739fefa06d46a60fe053f7fe0fe2c62a52242b2 | /pages/admin/edit_news_page.py | 1417cc238ade39fddacc51eaa0b28d1870ddfc0f | [] | no_license | icorso/gkr-web-tests | c59d3b0f7e371e887c6699cd09b6a87a71dd762e | fdf25ad700d75230f1af74b646a6a8a18f3d0b18 | refs/heads/master | 2021-01-13T05:15:37.913759 | 2017-02-08T07:44:04 | 2017-02-08T07:44:04 | 81,296,963 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 923 | py | # coding=utf-8
from pages import page, Input, Checkbox, BasePage, BaseElement, By
from pages.gkr_page import GkrPage
@page(u"Форма редактирования новости", By.XPATH, "//form[..//child::*[contains(text(),'Редактирование новости')]]")
class EditNewsPage(GkrPage):
TITLE = BaseElement("Поле 'Заголовок новости'", By.ID, "title")
DESC = BaseElement("Поле 'Текст новости'", By.ID, "text")
PUBLISH_DATE = Input("Поле 'Дата создания'", By.XPATH, ".//*[child::*[contains(text(),'Дата')]]")
IS_PUBLISHED = Checkbox("Чекбокс 'Опубликовать'", By.XPATH, ".//input[@type='checkbox']")
SUBMIT = BaseElement("Кнопка 'Создать'", By.XPATH, ".//button[@type='submit']")
ERROR = BaseElement("Сообщение об ошибке", By.XPATH, ".//span[contains(@style,'ff0000')]")
| [
"[email protected]"
] | |
dad102cb3faea7b8d1d9c97f542f4ce326fe7d2f | 3c44ddbe867d953a5f27c8c073e1ea5e995b5873 | /experiments/experiment_1/debugger.py | 7a9f1bcd3dee99eca66c48d12ca62b8f83a43330 | [] | no_license | cair/deep-warehouse | 37f6a3510638b36c276abb62b6b770d0ba6186af | 93cb7329c28733083b48ab6afd3de91676852175 | refs/heads/master | 2022-03-10T16:45:59.553325 | 2022-02-20T17:28:19 | 2022-02-20T17:28:19 | 167,932,576 | 5 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,279 | py | import sys
from deep_logistics.scheduler import OnDemandScheduler
from deep_logistics.spawn_strategy import LocationSpawnStrategy
from experiments.experiment_3.state_representations import State0
sys.path.append("/home/per/GIT/deep-logistics")
sys.path.append("/home/per/IdeaProjects/deep_logistics")
sys.path.append("/home/per/GIT/code/deep_logistics")
sys.path.append("/root")
from deep_logistics.environment import Environment
from deep_logistics.agent import InputAgent
if __name__ == "__main__":
env = Environment(
height=5,
width=5,
depth=3,
ups=None,
ticks_per_second=1,
taxi_n=1,
taxi_agent=InputAgent,
taxi_respawn=False,
taxi_control="constant",
scheduler=OnDemandScheduler,
delivery_locations=None,
spawn_strategy=LocationSpawnStrategy,
graphics_render=True,
graphics_tile_height=64,
graphics_tile_width=64
)
env.deploy_agents()
env.task_assignment()
state = State0(env)
agent = env.agents[0]
def on_event():
env.update()
y = state.generate(agent)
print(" - ".join([str(x) for x in y]))
agent.add_event_callback(on_event)
while True:
agent.automate()
env.render()
| [
"[email protected]"
] | |
a71cc98489fc1998a280603110b534f157129380 | 1a1b7f607c5e0783fd1c98c8bcff6460e933f09a | /core/charge/charge_types.py | 6f4f6bbf937df689e1cbbdc057d1a0b96b383e91 | [] | no_license | smrmohammadi/freeIBS | 14fb736fcadfaea24f0acdafeafd2425de893a2d | 7f612a559141622d5042614a62a2580a72a9479b | refs/heads/master | 2021-01-17T21:05:19.200916 | 2014-03-17T03:07:15 | 2014-03-17T03:07:15 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,648 | py | from core.charge.internet_charge import InternetCharge
from core.charge.voip_charge import VoipCharge
from core.charge.internet_charge_rule import InternetChargeRule
from core.charge.voip_charge_rule import VoipChargeRule
from core.lib.time_lib import *
from core.ibs_exceptions import *
def getChargeClassForType(_type):
if _type=="Internet":
return InternetCharge
elif _type=="VoIP":
return VoipCharge
else:
raise IBSException(errorText("CHARGES","INVALID_CHARGE_TYPE")%_type)
def getRulesTable(_type):
"""
return table that rules of _type charge_obj is available there
rule tables are diffrent based on charge type
"""
if _type=="Internet":
return "internet_charge_rules"
elif _type=="VoIP":
return "voip_charge_rules"
else:
raise IBSException(errorText("CHARGES","INVALID_CHARGE_TYPE")%_type)
def getChargeRuleObjForType(_type,rule_info,charge_obj,day_of_weeks,ports):
if _type=="Internet":
return InternetChargeRule(rule_info["charge_rule_id"],charge_obj,rule_info["cpm"],rule_info["cpk"],day_of_weeks,\
rule_info["start_time"],rule_info["end_time"],rule_info["bandwidth_limit_kbytes"],\
rule_info["bw_transmit_leaf_id"],rule_info["bw_receive_leaf_id"],rule_info["assumed_kps"],\
rule_info["ras_id"],ports)
elif _type=="VoIP":
return VoipChargeRule(rule_info["charge_rule_id"],charge_obj,\
day_of_weeks,rule_info["start_time"],rule_info["end_time"], \
rule_info["tariff_id"],rule_info["ras_id"],ports)
else:
raise IBSException(errorText("CHARGES","INVALID_CHARGE_TYPE")%_type)
| [
"farshad_kh"
] | farshad_kh |
6b8ead12bbffd5a38cf2d2bff5bce335c95ee51f | 1bd14e051251d08393731c03ccfb37a324227e1c | /troposphere_mate/elasticbeanstalk.py | 288cd4e0855ebe72cb5021cdb0fc981f760b6594 | [
"MIT"
] | permissive | tsuttsu305/troposphere_mate-project | f04bb6a3d137be3e265652c626008edfbb670b55 | 15ee94cc913efb32bc991979efcad943c992074c | refs/heads/master | 2023-06-07T15:07:47.041944 | 2021-07-05T02:02:00 | 2021-07-05T02:02:00 | 285,152,616 | 0 | 0 | MIT | 2020-08-05T02:08:01 | 2020-08-05T02:08:00 | null | UTF-8 | Python | false | false | 10,323 | py | # -*- coding: utf-8 -*-
"""
This code is auto generated from troposphere_mate.code_generator.__init__.py scripts.
"""
import sys
if sys.version_info.major >= 3 and sys.version_info.minor >= 5: # pragma: no cover
from typing import Union, List, Any
import troposphere.elasticbeanstalk
from troposphere.elasticbeanstalk import (
ApplicationResourceLifecycleConfig as _ApplicationResourceLifecycleConfig,
ApplicationVersionLifecycleConfig as _ApplicationVersionLifecycleConfig,
MaxAgeRule as _MaxAgeRule,
MaxCountRule as _MaxCountRule,
OptionSettings as _OptionSettings,
SourceBundle as _SourceBundle,
SourceConfiguration as _SourceConfiguration,
Tags as _Tags,
Tier as _Tier,
)
from troposphere import Template, AWSHelperFn
from troposphere_mate.core.mate import preprocess_init_kwargs, Mixin
from troposphere_mate.core.sentiel import REQUIRED, NOTHING
class MaxAgeRule(troposphere.elasticbeanstalk.MaxAgeRule, Mixin):
def __init__(self,
title=None,
DeleteSourceFromS3=NOTHING, # type: bool
Enabled=NOTHING, # type: bool
MaxAgeInDays=NOTHING, # type: int
**kwargs):
processed_kwargs = preprocess_init_kwargs(
title=title,
DeleteSourceFromS3=DeleteSourceFromS3,
Enabled=Enabled,
MaxAgeInDays=MaxAgeInDays,
**kwargs
)
super(MaxAgeRule, self).__init__(**processed_kwargs)
class MaxCountRule(troposphere.elasticbeanstalk.MaxCountRule, Mixin):
def __init__(self,
title=None,
DeleteSourceFromS3=NOTHING, # type: bool
Enabled=NOTHING, # type: bool
MaxCount=NOTHING, # type: int
**kwargs):
processed_kwargs = preprocess_init_kwargs(
title=title,
DeleteSourceFromS3=DeleteSourceFromS3,
Enabled=Enabled,
MaxCount=MaxCount,
**kwargs
)
super(MaxCountRule, self).__init__(**processed_kwargs)
class ApplicationVersionLifecycleConfig(troposphere.elasticbeanstalk.ApplicationVersionLifecycleConfig, Mixin):
def __init__(self,
title=None,
MaxAgeRule=NOTHING, # type: _MaxAgeRule
MaxCountRule=NOTHING, # type: _MaxCountRule
**kwargs):
processed_kwargs = preprocess_init_kwargs(
title=title,
MaxAgeRule=MaxAgeRule,
MaxCountRule=MaxCountRule,
**kwargs
)
super(ApplicationVersionLifecycleConfig, self).__init__(**processed_kwargs)
class SourceBundle(troposphere.elasticbeanstalk.SourceBundle, Mixin):
def __init__(self,
title=None,
S3Bucket=REQUIRED, # type: Union[str, AWSHelperFn]
S3Key=REQUIRED, # type: Union[str, AWSHelperFn]
**kwargs):
processed_kwargs = preprocess_init_kwargs(
title=title,
S3Bucket=S3Bucket,
S3Key=S3Key,
**kwargs
)
super(SourceBundle, self).__init__(**processed_kwargs)
class SourceConfiguration(troposphere.elasticbeanstalk.SourceConfiguration, Mixin):
def __init__(self,
title=None,
ApplicationName=REQUIRED, # type: Union[str, AWSHelperFn]
TemplateName=REQUIRED, # type: Union[str, AWSHelperFn]
**kwargs):
processed_kwargs = preprocess_init_kwargs(
title=title,
ApplicationName=ApplicationName,
TemplateName=TemplateName,
**kwargs
)
super(SourceConfiguration, self).__init__(**processed_kwargs)
class ApplicationResourceLifecycleConfig(troposphere.elasticbeanstalk.ApplicationResourceLifecycleConfig, Mixin):
def __init__(self,
title=None,
ServiceRole=NOTHING, # type: Union[str, AWSHelperFn]
VersionLifecycleConfig=NOTHING, # type: _ApplicationVersionLifecycleConfig
**kwargs):
processed_kwargs = preprocess_init_kwargs(
title=title,
ServiceRole=ServiceRole,
VersionLifecycleConfig=VersionLifecycleConfig,
**kwargs
)
super(ApplicationResourceLifecycleConfig, self).__init__(**processed_kwargs)
class OptionSettings(troposphere.elasticbeanstalk.OptionSettings, Mixin):
def __init__(self,
title=None,
Namespace=REQUIRED, # type: Union[str, AWSHelperFn]
OptionName=REQUIRED, # type: Union[str, AWSHelperFn]
Value=REQUIRED, # type: Union[str, AWSHelperFn]
ResourceName=NOTHING, # type: Union[str, AWSHelperFn]
**kwargs):
processed_kwargs = preprocess_init_kwargs(
title=title,
Namespace=Namespace,
OptionName=OptionName,
Value=Value,
ResourceName=ResourceName,
**kwargs
)
super(OptionSettings, self).__init__(**processed_kwargs)
class Application(troposphere.elasticbeanstalk.Application, Mixin):
def __init__(self,
title, # type: str
template=None, # type: Template
validation=True, # type: bool
ApplicationName=NOTHING, # type: Union[str, AWSHelperFn]
Description=NOTHING, # type: Union[str, AWSHelperFn]
ResourceLifecycleConfig=NOTHING, # type: _ApplicationResourceLifecycleConfig
**kwargs):
processed_kwargs = preprocess_init_kwargs(
title=title,
template=template,
validation=validation,
ApplicationName=ApplicationName,
Description=Description,
ResourceLifecycleConfig=ResourceLifecycleConfig,
**kwargs
)
super(Application, self).__init__(**processed_kwargs)
class ApplicationVersion(troposphere.elasticbeanstalk.ApplicationVersion, Mixin):
def __init__(self,
title, # type: str
template=None, # type: Template
validation=True, # type: bool
ApplicationName=REQUIRED, # type: Union[str, AWSHelperFn]
Description=NOTHING, # type: Union[str, AWSHelperFn]
SourceBundle=NOTHING, # type: _SourceBundle
**kwargs):
processed_kwargs = preprocess_init_kwargs(
title=title,
template=template,
validation=validation,
ApplicationName=ApplicationName,
Description=Description,
SourceBundle=SourceBundle,
**kwargs
)
super(ApplicationVersion, self).__init__(**processed_kwargs)
class ConfigurationTemplate(troposphere.elasticbeanstalk.ConfigurationTemplate, Mixin):
def __init__(self,
title, # type: str
template=None, # type: Template
validation=True, # type: bool
ApplicationName=REQUIRED, # type: Union[str, AWSHelperFn]
Description=NOTHING, # type: Union[str, AWSHelperFn]
EnvironmentId=NOTHING, # type: Union[str, AWSHelperFn]
OptionSettings=NOTHING, # type: List[_OptionSettings]
PlatformArn=NOTHING, # type: Union[str, AWSHelperFn]
SolutionStackName=NOTHING, # type: Union[str, AWSHelperFn]
SourceConfiguration=NOTHING, # type: _SourceConfiguration
**kwargs):
processed_kwargs = preprocess_init_kwargs(
title=title,
template=template,
validation=validation,
ApplicationName=ApplicationName,
Description=Description,
EnvironmentId=EnvironmentId,
OptionSettings=OptionSettings,
PlatformArn=PlatformArn,
SolutionStackName=SolutionStackName,
SourceConfiguration=SourceConfiguration,
**kwargs
)
super(ConfigurationTemplate, self).__init__(**processed_kwargs)
class Tier(troposphere.elasticbeanstalk.Tier, Mixin):
def __init__(self,
title=None,
Name=NOTHING, # type: Any
Type=NOTHING, # type: Any
Version=NOTHING, # type: Union[str, AWSHelperFn]
**kwargs):
processed_kwargs = preprocess_init_kwargs(
title=title,
Name=Name,
Type=Type,
Version=Version,
**kwargs
)
super(Tier, self).__init__(**processed_kwargs)
class Environment(troposphere.elasticbeanstalk.Environment, Mixin):
def __init__(self,
title, # type: str
template=None, # type: Template
validation=True, # type: bool
ApplicationName=REQUIRED, # type: Union[str, AWSHelperFn]
CNAMEPrefix=NOTHING, # type: Union[str, AWSHelperFn]
Description=NOTHING, # type: Union[str, AWSHelperFn]
EnvironmentName=NOTHING, # type: Union[str, AWSHelperFn]
OptionSettings=NOTHING, # type: List[_OptionSettings]
PlatformArn=NOTHING, # type: Union[str, AWSHelperFn]
SolutionStackName=NOTHING, # type: Union[str, AWSHelperFn]
Tags=NOTHING, # type: _Tags
TemplateName=NOTHING, # type: Union[str, AWSHelperFn]
Tier=NOTHING, # type: _Tier
VersionLabel=NOTHING, # type: Union[str, AWSHelperFn]
**kwargs):
processed_kwargs = preprocess_init_kwargs(
title=title,
template=template,
validation=validation,
ApplicationName=ApplicationName,
CNAMEPrefix=CNAMEPrefix,
Description=Description,
EnvironmentName=EnvironmentName,
OptionSettings=OptionSettings,
PlatformArn=PlatformArn,
SolutionStackName=SolutionStackName,
Tags=Tags,
TemplateName=TemplateName,
Tier=Tier,
VersionLabel=VersionLabel,
**kwargs
)
super(Environment, self).__init__(**processed_kwargs)
| [
"[email protected]"
] | |
ec5e8b11caa32c3c05e9e790c8640c5854a59efe | 308953409e1a3b828ac49b7301c1e751cbf762cf | /suite_ERC113C/tst_Offline_Value_Verification_After_Export/test.py | 0b61d6d28c8a932c2629a09e7db845a7ea357bd3 | [] | no_license | asthagaur1/danfoss-automation | 4dcc7d8f000917b67e4d6f46ff862a525ddcbc5e | 213a99d3375889cd0e0c801421a50e9fe6085879 | refs/heads/main | 2023-03-31T23:26:56.956107 | 2021-04-01T08:52:37 | 2021-04-01T08:52:37 | 353,627,845 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 404 | py | def main():
excel = r"C:\gitworkspace\TestAutomation-AKCC5XX\Test_Automation\SourceCode\Test_Suites\suite_ERC113C\shared\testdata\Offline_Export_Verifying_Values.xls";
#Mapping with Global scripts for Function library and key action.
source(findFile("scripts", "Functions.py"))
source(findFile("scripts", "Actions.py"))
source(findFile("scripts", "object_id.py"))
keyAction(excel) | [
"[email protected]"
] | |
693bf5679a15c573e4ebe87b0a134e654d96be1a | bdda458001808a029b171c09286f022a1384d180 | /crm/api/urls.py | 4432114d1a628d5ae2367f08e965ea609d506cbb | [] | no_license | bianchimro/crm-django | 4189f5c0c31f03d23a2b644a14403d63b8efdf0a | d8e4d18174cb050fd7a22d53fe8bb152e6e43120 | refs/heads/master | 2021-04-27T15:15:28.219887 | 2018-02-22T16:51:00 | 2018-02-22T16:51:00 | 122,466,604 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 387 | py | from django.urls import path
from .views import ExampleView, AziendaList, AziendaViewSet
from rest_framework.routers import DefaultRouter
router = DefaultRouter()
router.register(r'aziende', AziendaViewSet)
urlpatterns = [
path('example/', ExampleView.as_view(), name="example"),
path('aziende_list/', AziendaList.as_view(), name="aziende_list"),
]
urlpatterns += router.urls
| [
"[email protected]"
] | |
8a86a57c6ba570a80e5a56773f4aacac0bdfff77 | cc64b1b5deb4530a5bd3eaabd98ebd4daa2deea1 | /Aulas/Exercícios-Mundo2/Aula014/Ex064.py | c503ec40e5b592f3dae6498fbbfd25ca252956e6 | [
"MIT"
] | permissive | Sofista23/Aula1_Python | 239b9920353138ff99d99dd0af66a4788f1cbb22 | 129132d977058ac6f23cc95c7bb8b55d8a1bb429 | refs/heads/main | 2023-09-01T23:55:20.529528 | 2021-10-13T23:19:33 | 2021-10-13T23:19:33 | 416,924,760 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 235 | py | n=0
s=0
q=0
while n != 999:
n=int(input("Digite um número:"))
if n != 999:
s += n
q += 1
print("A soma de todos os números é {0}.".format(s))
print("A quantidade de números digitados foi de {0}.".format(q))
| [
"[email protected]"
] | |
ff25ffd31d796bc554c0a32e2113e547c0222a62 | cbd9b8f2dbd692d74eba6e92465e5f1dc1a807b3 | /ukpopulation/myedata.py | 33de0fab035316ed548c79e4507c2972d4735391 | [
"MIT"
] | permissive | geoadom/ukpopulation | 5a99f02c06c2b76464df2508a8f01f0b9ab0a803 | bfbd55097a4e9f458e2da6673a83576e37f5079b | refs/heads/master | 2020-03-21T07:59:37.195042 | 2018-06-21T15:02:31 | 2018-06-21T15:02:31 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,297 | py | """
MYEData - wrapper around Mid-Year Estimate data by LAD, SYoA and gender
"""
import pandas as pd
import ukcensusapi.Nomisweb as Api
import ukpopulation.utils as utils
class MYEData:
"""
Functionality for downloading and collating UK mid-year estimate (MYE) data
Nomisweb stores the data for the entire UK, from 1991-2016 inclusive
"""
# update as and when necessary (this is unlike (S)NPP where we query the data for the year range)
# the data is stored differently at nomisweb (year is part of the query)
MIN_YEAR = 1991
MAX_YEAR = 2016
def __init__(self, cache_dir=None):
if cache_dir is None:
cache_dir = utils.default_cache_dir()
self.cache_dir = cache_dir
self.data_api = Api.Nomisweb(self.cache_dir)
# store as a dictionary keyed by year (lazy retrieval)
self.data = {}
def min_year(self):
"""
Returns the first year in the data
"""
return MYEData.MIN_YEAR
def max_year(self):
"""
Returns the final year in the data
"""
return MYEData.MAX_YEAR
# TODO functionality for easy aggregration to E/W/EW/S/GB/NI/UK
def filter(self, years, geogs, ages=range(0,91), genders=[1,2]):
"""
Get MYE detailed data for a given year
"""
# ensure array inputs
if isinstance(years, int):
years = [years]
if isinstance(geogs, str):
geogs = [geogs]
if isinstance(ages, int):
ages = [ages]
if isinstance(genders, int):
genders = [genders]
result = pd.DataFrame()
for year in years:
# ensure the data is loaded
self.__fetch_data(year)
## ensure we return a copy!
part = self.data[year][(self.data[year].GEOGRAPHY_CODE.isin(geogs)) &
(self.data[year].C_AGE.isin(ages)) &
(self.data[year].GENDER.isin(genders))].copy()
part["PROJECTED_YEAR_NAME"] = year
result = result.append(part)
return result.reset_index(drop=True)
def aggregate(self, years, geog_codes, categories, ages=range(0,91), genders=[1,2]):
data = self.filter(years, geog_codes, ages, genders)
# invert categories (they're the ones to aggregate, not preserve)
return data.groupby(utils.check_and_invert(categories))["OBS_VALUE"].sum().reset_index()
def __fetch_data(self, year):
"""
Gets Mid-year population estimate data for a given year
Data is by single year of age by gender by local authority
"""
# if data already loaded return
if year in self.data:
return
table_internal = "NM_2002_1" # 2016-based MYE
query_params = {
"gender": "1,2",
"c_age": "101...191",
"MEASURES": "20100",
"select": "geography_code,gender,c_age,obs_value",
"geography": "1879048193...1879048573,1879048583,1879048574...1879048582"
}
if year < MYEData.MIN_YEAR or year > MYEData.MAX_YEAR:
raise ValueError("{} is outside the available years for MYE data ({}-{})".format(year, MIN_YEAR, MAX_YEAR))
query_params["date"] = "latest"
if year < MYEData.MAX_YEAR:
query_params["date"] += "MINUS" + str(2016-year)
self.data[year] = self.data_api.get_data(table_internal, query_params)
# renumber age so that 0 means [0,1)
self.data[year].C_AGE -= 101
return self.data[year]
| [
"[email protected]"
] | |
ddc2a74972b55f12dc91815381d50be81c7ebf36 | ba0a2b0d2d1534443ea34320675aadfa378457b6 | /Array/Q1267_Count Servers that Communicate.py | bd2da1840b785f2af80b49ee05d2babff826b118 | [] | no_license | Luolingwei/LeetCode | 73abd58af116f3ec59fd6c76f662beb2a413586c | 79d4824879d0faed117eee9d99615cd478432a14 | refs/heads/master | 2021-08-08T17:45:19.215454 | 2021-06-17T17:03:15 | 2021-06-17T17:03:15 | 152,186,910 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 513 | py |
# 思路: 计算每行每列的server个数,如果一个server与其他server连接,则该行或该列server个数大于1
class Solution:
# O(mn)
def countServers(self, grid):
res=0
m,n=len(grid),len(grid[0])
row,col=list(map(sum,grid)),list(map(sum,zip(*grid)))
for i in range(m):
for j in range(n):
if grid[i][j] and (row[i]>1 or col[j]>1):
res+=1
return res
a=Solution()
print(a.countServers([[1,0],[0,1]])) | [
"[email protected]"
] | |
c7e969446f55baab789853c7141aee407cfb5de5 | b2403817f9221ee3550130572a808194ef4f3fda | /Excersise/DiscoverMonk.py | d83e2599dd6f66c342a4d3eafec2a9d392354bbd | [] | no_license | xaviergoby/Python-Data-Structure | e962444ef5b1313c3facbf1fcc315af182b73a26 | eaaf31ea98d63e812a75c1d6ecb8722b9c0cf142 | refs/heads/master | 2020-04-13T00:24:40.896592 | 2018-11-27T11:51:36 | 2018-11-27T11:51:36 | 162,844,732 | 1 | 0 | null | 2018-12-22T21:46:29 | 2018-12-22T21:46:29 | null | UTF-8 | Python | false | false | 284 | py | __author__ = 'Sanjay'
def monk(n, args = []):
someArray = range(0,50,10)
for i in args:
if i in someArray:
print ("YES")
else:
print ("NO")
if __name__ == '__main__':
someList = range(0,100,10)
monk(len(someList), someList)
| [
"[email protected]"
] | |
8453de52904329d3aaf8fd34272d1308db93de74 | 7d283f0762d2f85b04148e5db5acebc44dbba606 | /get_post_info_dl.py | 89d24f3e55214c8c843896a14624df05bc0e6664 | [] | no_license | Brandon-Valley/reddit_comp | 50dcd3571be1116bebb607c54e105096078c161f | ec618dc12b007a670fb4cc879554c4cf41796b62 | refs/heads/master | 2022-01-10T19:18:18.042008 | 2019-06-02T18:30:11 | 2019-06-02T18:30:11 | 188,881,552 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,408 | py | import subprocess
import json
import file_system_utils
# optional arguments:
# -h, --help show this help message and exit
# --directory DIRECTORY, -d DIRECTORY
# Specifies the directory where posts will be downloaded
# to
# --NoDownload Just gets the posts and stores them in a file for
# downloading later
# --verbose, -v Verbose Mode
# --quit, -q Auto quit afer the process finishes
# --link link, -l link Get posts from link
# --saved Triggers saved mode
# --submitted Gets posts of --user
# --upvoted Gets upvoted posts of --user
# --log LOG FILE Takes a log file which created by itself (json files),
# reads posts and tries downloading them again.
# --subreddit SUBREDDIT [SUBREDDIT ...]
# Triggers subreddit mode and takes subreddit's name
# without r/. use "frontpage" for frontpage
# --multireddit MULTIREDDIT
# Triggers multireddit mode and takes multireddit's name
# without m/
# --user redditor reddit username if needed. use "me" for current user
# --search query Searches for given query in given subreddits
# --sort SORT TYPE Either hot, top, new, controversial, rising or
# relevance default: hot
# --limit Limit default: unlimited
# --time TIME_LIMIT Either hour, day, week, month, year or all. default:
# all
EXE_PATH = "C:/Users/Brandon/Documents/Personal_Projects/reddit_comp/bulk_downloader_for_reddit-1.6.5-windows/bulk-downloader-for-reddit.exe "
LOG_FILES_SAVE_PATH = 'bulk_download_log_files'
DEFAULT_SORT_TYPE = 'hot'
def build_arg_str(num_posts, subreddit_l, sort_type = DEFAULT_SORT_TYPE):
# build_subreddit_l_str
subreddit_l_str = subreddit_l[0]
for subreddit in subreddit_l[1:]:
subreddit_l_str += '+' + subreddit
args = [' --directory ' + LOG_FILES_SAVE_PATH,
' --subreddit ' + subreddit_l_str,
' --limit ' + str(num_posts),
' --sort ' + sort_type,
' --NoDownload'
]
#build arg_str
arg_str = ''
for arg in args:
arg_str += arg
return arg_str
def build_post_info_dl_from_json():
#get path to most recent json logfile
newest_log_file_dir = file_system_utils.get_newest_file_path(LOG_FILES_SAVE_PATH + '/LOG_FILES')
json_file_path = newest_log_file_dir + '/POSTS.json'
post_info_dl = []
# read in json file
with open(json_file_path) as json_file:
data = json.load(json_file)
# fill post_info_dl
post_num = 1
while(str(post_num) in data):
post_info_dl.append(data[str(post_num)][0])
post_num += 1
return post_info_dl
def get_post_info_dl(num_posts, subreddit_list, quick_test = False):
if quick_test == False:
exe_arg_str = build_arg_str(num_posts, subreddit_list)
cmd = EXE_PATH + exe_arg_str
subprocess.call(cmd, shell=True)
post_info_dl = build_post_info_dl_from_json()
return post_info_dl
# print( get_post_info_dl(4, ['videomemes', 'pics']))
| [
"[email protected]"
] | |
4d432a747f4aa1931de0a032b8c48e1b17d77e38 | b7add0d1b1effc50b27d3316fa5889a5227e5b19 | /Micropython/PYCARD/tests/archieved/hall_encoder_test_2.py | 30c908f586454c75ba6d4044967b71489fa1d5d2 | [] | no_license | Woz4tetra/Atlas | efb83a7c7b2698bf8b36b023f7aa573cc38284f6 | c7380868a9efef9d1594ed7aa87187f03a7e4612 | refs/heads/master | 2020-04-04T06:25:50.657631 | 2017-04-05T01:53:15 | 2017-04-05T01:53:15 | 50,269,756 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 494 | py | import pyb
from objects import HallEncoder
pin = "X11"
mode = ""
while mode != "e" and mode != "r":
mode = input("Raw or encoder counts (r or e)?\n> ").lower()
if mode == "e":
encoder = HallEncoder(0, pin, 80, 100)
while True:
if encoder.recved_data():
print(encoder.enc_dist, encoder.hall_value)
pyb.delay(100)
elif mode == "r":
pin_ref = pyb.ADC(pyb.Pin(pin, pyb.Pin.ANALOG))
while True:
print(pin_ref.read())
pyb.delay(40)
| [
"[email protected]"
] | |
81db93b7457b194f44a05dbeb19bfbb07cbf8c1a | 6dedf9401746e934e87698f58eedb4631ea5f81b | /scrapy_demo/tesseract/demo.py | 5fe50242cefbd8841af0687901b25feedf89c539 | [] | no_license | 2415970940/scrapy | 0d03f9fe0cf121f637b7c5b03d328453c0ae35a7 | 5a1f4da0d92b3d80c1b95ed9fff0ab4bd02660bd | refs/heads/master | 2020-03-23T13:13:50.282118 | 2018-08-02T09:13:24 | 2018-08-02T09:13:24 | 141,607,064 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 317 | py | import pytesseract
from PIL import Image
pytesseract.pytesseract.tesseract_cmd = r"G:\progamapp\Tesseract-OCR\tesseract.exe"
tessdata_dir_config = '--tessdata-dir "G:\\progamapp\\Tesseract-OCR\\tessdata"'
image = Image.open("test.png")
text = pytesseract.image_to_string(image,config=tessdata_dir_config)
print(text) | [
"[email protected]"
] | |
1639881041340218040dcde824de76ea4b03ae92 | 58e02b9b88d5aed5acb3f544d1f92114c5468973 | /language/serene/training.py | 4e0df59a56e9d4c509e71f18ad0ea42a19a095f3 | [
"LicenseRef-scancode-generic-cla",
"Apache-2.0"
] | permissive | alsuhr-c/language | bfb15e23b223c13c119cbb9840dc938571d0d7e0 | 2223b5c72a69d298f211c8d77bb7a2243af8fe49 | refs/heads/master | 2021-08-09T01:18:10.314001 | 2020-09-20T20:45:22 | 2020-09-20T20:45:22 | 221,727,017 | 0 | 0 | Apache-2.0 | 2020-07-20T20:15:23 | 2019-11-14T15:18:07 | Python | UTF-8 | Python | false | false | 31,036 | py | # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Training manager for fever code."""
import json
import os
from absl import logging
import dataclasses
from language.serene import callbacks
from language.serene import fever_tfds
from language.serene import layers
from language.serene import losses
from language.serene import model
from language.serene import preprocessing
from language.serene import tokenizers
from language.serene import util
import numpy as np
import tensorflow.compat.v2 as tf
import tensorflow_datasets as tfds
import tqdm
from official.utils.misc import tpu_lib
@dataclasses.dataclass
class ModelConfig:
"""Typed parameters for model."""
fever_experiment_id: int
model_checkpoint: Text
dataset: Text
buffer_size: int
batch_size: int
word_emb_size: int
hidden_size: int
learning_rate: float
positive_class_weight: Optional[float]
max_epochs: int
dropout: float
activation: Text
use_batch_norm: bool
# Model Choice: two_tower or one_tower (not implemented yet).
model: Text
# Preprocessing
tokenizer: Text # EG: Convert strings to list of strings.
text_encoder: Text # EG: Convert list of strings to integers.
basic_lowercase: bool
# Embedder + Contextualizer
embedder: Text
contextualizer: Text
context_num_layers: int
tied_encoders: bool
bidirectional: bool
bert_model_name: Text
bert_max_seq_length: int
bert_vocab_path: Text
bert_model_path: Text
bert_trainable: bool
bert_dropout: float
# Neural Module Configuration
matcher: Text
matcher_hidden_size: int
projection_dim: int
fever_dev_path: Text
max_evidence: int
max_claim_tokens: int
max_evidence_tokens: int
# Whether to include the title/sentence_id in evidence encoding.
include_title: bool
include_sentence_id: bool
n_similar_negatives: int
n_background_negatives: int
scrape_type: Text
include_not_enough_info: bool
title_in_scoring: bool
classify_claim: bool
claim_loss_weight: float
def validate(self):
"""Validate that the arguments to the config are correct, error if not."""
if self.tokenizer not in ['bert', 'basic']:
raise ValueError(f'Invalid tokenizer: "{self.tokenizer}"')
if self.text_encoder not in ['bert', 'basic']:
raise ValueError(f'Invalid text encoder: "{self.text_encoder}"')
if self.matcher not in layers.matcher_registry:
raise ValueError(f'Invalid matcher: "{self.matcher}"')
if self.contextualizer not in ['bert', 'rnn', 'lstm', 'gru']:
raise ValueError(f'Invalid contextualizer: "{self.contextualizer}"')
if self.model not in ['one_tower', 'two_tower']:
raise ValueError(f'Invalid model: "{self.model}"')
if self.bert_model_name not in ['base', 'large']:
raise ValueError(f'Invalid bert model: "{self.bert_model_name}')
if self.embedder not in ['classic_embedder', 'bert_embedder']:
raise ValueError(f'Invalid embedder: "{self.embedder}"')
@classmethod
def from_dict(cls, params):
return ModelConfig(**params)
@classmethod
def from_file(cls,
file_path,
overrides = None):
with util.safe_open(file_path) as f:
params: Dict[Text, Any] = json.load(f)
if overrides is not None:
params.update(overrides)
return ModelConfig.from_dict(params)
def save(self, file_path):
with util.safe_open(file_path, 'w') as f:
json.dump(self.asdict(), f)
def asdict(self):
return dataclasses.asdict(self)
class Trainer:
"""Training wrapper around keras to manage vocab/saving/dataset creation.
The primary methods of this class are:
- train()
- predict()
- embed()
- save()
- load()
The intended use of this is
> trainer = Trainer(my_config)
> trainer.train()
The following methods are primarily for converting TFDS to tf.data.Dataset
for keras training
- _build_tokenizer()
- _build_encoder()
- _encode_and_batch()
- _batch_dataset()
- _encode_dataset()
- _build_vocab()
- _tokenize_example()
These are utilities for embedding different TFDSs
- embed_wiki_dataset()
- embed_claim_dataset()
The following methods deal with preparing the keras model for training
- _compile(): Compile model uner right scope, create callbacks, glue losses
to model
- _build_callbacks(): Keras callbacks
"""
def __init__(
self,
model_config,
debug = False,
tpu = None,
distribution_strategy = None,
tb_log_dir = None):
"""Configure the trainer.
Args:
model_config: ModelConfig parameters for training
debug: Enables certain debug behaviors like dataset subsampling
tpu: The TPU to use or None otherwise
distribution_strategy: Parallel training strategy
tb_log_dir: The directory for Tensorboard to log to
"""
self._debug = debug
if debug:
logging.info('Debug mode enabled on trainer')
self._tpu = tpu
self._distribution_strategy = distribution_strategy
self._tb_log_dir = tb_log_dir
self._strategy: Optional[tf.distribute.Strategy] = None
self._model_config = model_config
self._vocab: Optional[List[Text]] = None
self._vocab_stats: Dict[Text, Any] = {}
self._class_stats: Dict[int, int] = {0: 0, 1: 0}
# Whitespace tokenizer
self._tokenizer: Optional[tokenizers.Tokenizer] = None
self._encoder: Optional[preprocessing.FeverTextEncoder] = None
self._model: Optional[tf.keras.Model] = None
self._inner_model: Optional[tf.keras.Model] = None
def save(self):
"""Persist the encoder and the model to disk.
"""
if self._model is None or self._encoder is None:
raise ValueError('Model and encoder cannot be None')
else:
self._encoder.save_to_file(
# This is a prefix, which converts to: mydir/text_encoder.tokens
os.path.join(self._model_config.model_checkpoint, 'text_encoder'))
self._model.save_weights(
os.path.join(self._model_config.model_checkpoint, 'best_model.tf'))
@classmethod
def load(cls,
model_checkpoint,
model_config_overrides = None,
**kwargs):
"""Load the model, its tokenizer, and weights from the checkpoint.
Args:
model_checkpoint: Checkpoint to restore from, from .save()
model_config_overrides: Extra args for ModelConfig
**kwargs: Passed through to trainer, used for overriding checkpoint
Returns:
A model in the same state as just before it was saved with .save()
"""
# pylint: disable=protected-access
model_config = ModelConfig.from_file(
os.path.join(model_checkpoint, 'model_config.json'),
overrides=model_config_overrides)
trainer = Trainer(model_config=model_config, **kwargs)
trainer._tokenizer = trainer._build_tokenizer()
encoder_path = os.path.join(model_checkpoint, 'text_encoder')
if model_config.text_encoder == 'bert':
trainer._encoder = preprocessing.BertTextEncoder.load_from_file(
encoder_path)
elif model_config.text_encoder == 'basic':
trainer._encoder = preprocessing.BasicTextEncoder.load_from_file(
encoder_path)
else:
raise ValueError('Invalid text encoder')
trainer._compile()
if trainer._model is None:
raise ValueError('Model does not exist despite being compiled')
trainer._model.load_weights(os.path.join(model_checkpoint, 'best_model.tf'))
return trainer
def _save_model_config(self):
"""Save only the Model configuration to disk."""
logging.info('Saving config to: %s/model_config.json',
self._model_config.model_checkpoint)
self._model_config.save(
os.path.join(self._model_config.model_checkpoint, 'model_config.json'))
def _save_encoder(self):
"""Save only the text encoder to disk."""
self._encoder.save_to_file(
os.path.join(self._model_config.model_checkpoint, 'text_encoder'))
@property
def vocab_size(self):
if self._encoder is None:
raise ValueError('Model has not been build, so no vocab size')
else:
return self._encoder.vocab_size
def _init_strategy(self):
"""Initialize the distribution strategy (e.g. TPU/GPU/Mirrored)."""
if self._strategy is None:
if self._tpu is not None:
resolver = tpu_lib.tpu_initialize(self._tpu)
self._strategy = tf.distribute.experimental.TPUStrategy(resolver)
elif self._distribution_strategy is None or self._distribution_strategy == 'default':
self._strategy = tf.distribute.get_strategy()
elif self._distribution_strategy == 'cpu':
self._strategy = tf.distribute.OneDeviceStrategy('/device:cpu:0')
else:
if self._distribution_strategy == 'mirrored':
self._strategy = tf.distribute.MirroredStrategy()
else:
raise ValueError(
f'Invalid distribution strategy="{self._distribution_strategy}"')
def _build_tokenizer(self):
"""Build the correct tokenizer depending on model encoder.
Returns:
Tokenizer for model
"""
if self._model_config.tokenizer == 'basic':
base_tokenizer = tfds.deprecated.text.Tokenizer()
return tokenizers.ReservedTokenizer(
tokenizer=base_tokenizer, reserved_re=preprocessing.SEPARATOR_RE)
elif self._model_config.tokenizer == 'bert':
return tokenizers.BertTokenizer(
vocab_file=self._model_config.bert_vocab_path, do_lower_case=True)
else:
raise ValueError('Invalid tokenizer')
def _build_encoder(self, vocab,
tokenizer):
"""Build the encoder using the given vocab and tokenizer.
Args:
vocab: Vocab to build encoder from
tokenizer: Tokenizer to build encoder from
Returns:
The built text encoder
"""
if self._model_config.text_encoder == 'basic':
return preprocessing.BasicTextEncoder(
vocab_list=vocab,
tokenizer=tokenizer,
lowercase=self._model_config.basic_lowercase,
include_title=self._model_config.include_title,
include_sentence_id=self._model_config.include_sentence_id,
max_claim_tokens=self._model_config.max_claim_tokens,
max_evidence_tokens=self._model_config.max_evidence_tokens,
)
elif self._model_config.text_encoder == 'bert':
return preprocessing.BertTextEncoder(
tokenizer=tokenizer,
max_seq_length=self._model_config.bert_max_seq_length,
include_title=self._model_config.include_title,
include_sentence_id=self._model_config.include_sentence_id,
)
def _encode_and_batch(self,
dataset,
train=False,
filter_claims=True,
filter_evidence=True):
"""Convert a tensorflow dataset of unbatched, text examples to TF batches.
Args:
dataset: TF Dataset to transform
train: Whether to encode as training dataset
filter_claims: Whether to filter zero length claims
filter_evidence: Whether to filter zero length evidence
Returns:
encoded and batched dataset for keras fit
"""
encoded = self._encode_dataset(
dataset, filter_claims=filter_claims, filter_evidence=filter_evidence)
if train:
encoded = encoded.shuffle(
self._model_config.buffer_size, reshuffle_each_iteration=False)
batched = self._batch_dataset(encoded)
return batched
def _compile(self):
"""Compile the keras model using the correct scope."""
# pylint: disable=protected-access
self._init_strategy()
with self._strategy.scope():
if self._model_config.model == 'two_tower':
module_model = model.TwoTowerRanker(
self.vocab_size,
activation=self._model_config.activation,
matcher_name=self._model_config.matcher,
word_emb_size=self._model_config.word_emb_size,
hidden_size=self._model_config.hidden_size,
dropout=self._model_config.dropout,
use_batch_norm=self._model_config.use_batch_norm,
contextualizer=self._model_config.contextualizer,
context_num_layers=self._model_config.context_num_layers,
bidirectional=self._model_config.bidirectional,
tied_encoders=self._model_config.tied_encoders,
embedder_name=self._model_config.embedder,
matcher_hidden_size=self._model_config.matcher_hidden_size,
bert_model_name=self._model_config.bert_model_name,
bert_model_path=self._model_config.bert_model_path,
bert_trainable=self._model_config.bert_trainable,
bert_dropout=self._model_config.bert_dropout,
projection_dim=self._model_config.projection_dim,
classify_claim=self._model_config.classify_claim,
)
self._inner_model = module_model
# This hackery is necessary since keras doesn't handle dictionary inputs
# well, so we have to manually specify input/output output shapes. Since
# this is dependent on the model (e.g., bert vs other), let the encoder
# yield this.
inputs = self._encoder.compute_input_shapes()
outputs = module_model(inputs)
module_model.input_names = sorted(inputs.keys())
module_model._feed_input_names = sorted(inputs.keys())
module_model.output_names = sorted(
['evidence_matching', 'claim_classification'])
self._model = tf.keras.Model(inputs=inputs, outputs=outputs)
self._model.input_names = sorted(inputs.keys())
self._model._feed_input_names = sorted(inputs.keys())
self._model.output_names = sorted(
['evidence_matching', 'claim_classification'])
self._model.summary(line_length=500)
elif self._model_config.model == 'one_tower':
raise NotImplementedError()
else:
raise ValueError('Invalid model')
metrics = {}
evidence_metrics = [
tf.keras.metrics.BinaryAccuracy(name='accuracy'),
tf.keras.metrics.Precision(name='precision'),
tf.keras.metrics.Recall(name='recall'),
tf.keras.metrics.AUC(name='auc'),
tf.keras.metrics.TruePositives(name='tp'),
tf.keras.metrics.FalsePositives(name='fp'),
tf.keras.metrics.TrueNegatives(name='tn'),
tf.keras.metrics.FalsePositives(name='fn'),
]
metrics['evidence_matching'] = evidence_metrics
loss = {}
loss['evidence_matching'] = losses.WeightedBinaryCrossentropyFromProbs(
positive_class_weight=self._model_config.positive_class_weight)
loss_weights = {
'evidence_matching': 1.0,
'claim_classification': self._model_config.claim_loss_weight
}
if self._model_config.classify_claim:
# TODO(perodriguez): add claim classifier metrics
claim_metrics = [
tf.keras.metrics.SparseCategoricalAccuracy(name='accuracy'),
]
metrics['claim_classification'] = claim_metrics
loss[
'claim_classification'] = tf.keras.losses.SparseCategoricalCrossentropy(
from_logits=False)
else:
loss['claim_classification'] = losses.ZeroLoss()
metrics['claim_classification'] = []
self._model.compile(
loss=loss,
optimizer=tf.keras.optimizers.Adam(self._model_config.learning_rate),
metrics=metrics,
loss_weights=loss_weights,
)
def train(self,
*,
epochs = None,
steps_per_epoch = None,
validation_steps = None):
"""Prepare the dataset, callbacks, and model, then train/save it.
Args:
epochs: The number of epochs to train for, if None then default to
early stopping (useful for debugging)
steps_per_epoch: How many training steps to take, if None default to
normal training (useful for debugging)
validation_steps: How many validation steps to take, if None defualt to
normal training (useful for debugging)
"""
logging.info('Preparing model with config:\n%s', self._model_config)
with util.log_time('Initial dataset read'):
builder = fever_tfds.FeverEvidence(
data_dir=self._model_config.dataset,
n_similar_negatives=self._model_config.n_similar_negatives,
n_background_negatives=self._model_config.n_background_negatives,
train_scrape_type=self._model_config.scrape_type,
include_not_enough_info=self._model_config.include_not_enough_info,
title_in_scoring=self._model_config.title_in_scoring,
)
# Cache here to prevent hitting remote fs again
train_dataset = (builder.as_dataset(split='train')).cache()
val_dataset = builder.as_dataset(split='validation').cache()
if self._debug:
train_dataset = train_dataset.take(1000)
if self._debug:
val_dataset = val_dataset.take(200)
self._tokenizer = self._build_tokenizer()
self._vocab = list(self._build_vocab(train_dataset))
self._encoder = self._build_encoder(self._vocab, self._tokenizer)
train_batched = self._encode_and_batch(train_dataset, train=True)
val_batched = self._encode_and_batch(val_dataset, train=False)
# Cache the batch creation, but not the batchwise shuffle.
train_batched = train_batched.cache().shuffle(
100,
reshuffle_each_iteration=True).prefetch(tf.data.experimental.AUTOTUNE)
# Cache the batched validation data.
val_batched = val_batched.cache().prefetch(tf.data.experimental.AUTOTUNE)
self._compile()
model_callbacks = self._build_callbacks(val_batched)
# Save enough to reconstruct anything except for the model.
# The model itself is saved with the ModelCheckpoint callback.
self._save_model_config()
self._save_encoder()
if epochs is None:
epochs = self._model_config.max_epochs
self._model.fit(
train_batched,
validation_data=val_batched,
callbacks=model_callbacks,
epochs=epochs,
steps_per_epoch=steps_per_epoch,
validation_steps=validation_steps)
logging.info('Model Summary:\n%s', self._model.summary())
# First load the best model.
logging.info('Loading best model weights')
self._model.load_weights(self.model_weight_path)
logging.info('Saving dev predictions from best model')
self._save_dev_predictions(val_batched)
@property
def model_weight_path(self):
return os.path.join(self._model_config.model_checkpoint, 'best_model.tf')
def _save_dev_predictions(self, val_batched):
"""Save model predictions for the dev set.
This is used to compute Fever F1 as stopping metric
Args:
val_batched: The batched validation set.
"""
unbatched = val_batched.unbatch()
model_predictions = self._model.predict(val_batched)
claim_probs = model_predictions['claim_classification']
evidence_probs = model_predictions['evidence_matching']
predictions = []
# Extra _ is the label, which we don't need
for (ex, _), claim_prob, evidence_prob in tqdm.tqdm(
zip(unbatched, claim_probs, evidence_probs), mininterval=5):
predictions.append({
'claim_prob': claim_prob.tolist(),
'evidence_prob': evidence_prob.tolist(),
'metadata': json.loads(ex['metadata'].numpy().decode('utf8'))
})
pred_path = os.path.join(self._model_config.model_checkpoint,
'val_predictions.json')
with util.safe_open(pred_path, 'w') as f:
json.dump({'predictions': predictions}, f)
def predict(self, examples):
"""Given examples in JSON format, predict evidence relevance.
Args:
examples: List of claim/evidence pairs to rank
Returns:
Scalar scores for each pair
"""
stacked = {
'claim_text': [],
'evidence_text': [],
'metadata': [],
'label': [],
}
for ex in examples:
stacked['claim_text'].append(ex['claim_text'])
stacked['evidence_text'].append(ex['evidence_text'])
stacked['metadata'].append(ex['metadata'])
stacked['label'].append(ex['label'])
dataset = tf.data.Dataset.from_tensor_slices((stacked,))
batched_examples = self._encode_and_batch(
dataset, filter_claims=False, filter_evidence=False)
preds = []
for batch in batched_examples:
# model.predict() is broken after model load so we have to do this
# manually.
preds.append(self._model(batch))
return np.vstack(preds).reshape(-1).tolist()
def embed(self, examples, *, as_claim,
as_evidence): # Checker .tolist() -> Any
"""Embed a list of evidence text.
Args:
examples: A list of evidence text to embed.
as_claim: Whether to embed examples as claims
as_evidence: Whether to embed examples as evidence
Returns:
A list of embeddings, one for each evidence text.
"""
stacked = {
'claim_text': [],
'evidence_text': [],
'metadata': [],
'label': [],
}
for text in examples:
# Dummie value to make sure tokenizing works.
if as_claim:
stacked['claim_text'].append(text)
else:
stacked['claim_text'].append('a')
if as_evidence:
stacked['evidence_text'].append(text)
else:
stacked['evidence_text'].append('a')
stacked['metadata'].append('')
stacked['label'].append(tf.constant(0, dtype=tf.int64))
dataset = tf.data.Dataset.from_tensor_slices((stacked,))
batched_examples = self._encode_and_batch(
dataset, filter_claims=False, filter_evidence=False)
claim_preds = []
ev_preds = []
for batch in batched_examples:
# model.predict() is broken after model load due to missing shapes, so
# have to do our own batching/unbatching.
inputs, _ = batch
claim_encoding, ev_encoding = self._model(
inputs, embed_claim=as_claim, embed_evidence=as_evidence)
claim_preds.append(claim_encoding)
ev_preds.append(ev_encoding)
return np.vstack(claim_preds).tolist(), np.vstack(ev_preds).tolist()
def embed_wiki_dataset(self, dataset):
"""Embed the wikipedia/evidence only dataset.
Args:
dataset: The wikipedia only dataset (e.g. wiki_tfds.py)
Returns:
Aligned wikipedia_urls, sentence_ids, and embeddings of model
"""
# map_fn and tf_map_fn transform the dataset to the same format as
# tfds_evidence/the one the model expects
def map_fn(text, wikipedia_url, sentence_id):
return ('a', text, wikipedia_url, str(sentence_id),
json.dumps({
'sentence_id': int(sentence_id.numpy()),
'wikipedia_url': wikipedia_url.numpy().decode('utf8')
}))
def tf_map_fn(example):
tensors = tf.py_function(
map_fn,
inp=[
example['text'], example['wikipedia_url'], example['sentence_id']
],
Tout=(tf.string, tf.string, tf.string, tf.string, tf.string))
return {
'claim_text': tensors[0],
'evidence_text': tensors[1],
'wikipedia_url': tensors[2],
'sentence_id': tensors[3],
'claim_label': tf.constant(0, dtype=tf.int64),
'evidence_label': tf.constant(0, dtype=tf.int64),
'metadata': tensors[4]
}
formatted_ds = dataset.map(tf_map_fn)
batched_examples = self._encode_and_batch(
formatted_ds, filter_claims=False, filter_evidence=False)
preds = []
wikipedia_urls = []
sentence_ids = []
for batch in tqdm.tqdm(batched_examples, mininterval=5):
# model.predict() is broken after model load due to missing shapes, so
# have to do our own batching/unbatching.
inputs, _ = batch
_, ev_encoding = self._inner_model(
inputs, embed_claim=False, embed_evidence=True)
for m in inputs['metadata'].numpy():
key = json.loads(m.decode('utf8'))
wikipedia_urls.append(key['wikipedia_url'])
sentence_ids.append(key['sentence_id'])
preds.append(ev_encoding)
return np.array(wikipedia_urls), np.array(sentence_ids), np.vstack(preds)
def embed_claim_dataset(self, dataset):
"""Embed the claim only dataset and save them with claim_ids.
Args:
dataset: The claims only dataset (e.g. claim_tfds.py)
Returns:
Aligned claim ids and embeddings from the model
"""
batched_examples = self._encode_and_batch(
dataset, filter_claims=False, filter_evidence=False)
claim_ids = []
embeddings = []
for batch in tqdm.tqdm(batched_examples, mininterval=5):
# model.predict() is broken after model load due to missing shapes, so
# have to do our own batching/unbatching.
inputs, _ = batch
# Cannot use self._model since it does not take extra arguments. Since
# we're not using the keras API (namey .predict()), we can just use the
# underlying model stored in self._inner_model.
claim_encoding, _ = self._inner_model(
inputs, embed_claim=True, embed_evidence=False)
for m in inputs['metadata'].numpy():
key = json.loads(m.decode('utf8'))
claim_ids.append(int(key['claim_id']))
embeddings.append(claim_encoding)
return np.array(claim_ids), np.vstack(embeddings)
def _build_callbacks(self, val_batched):
"""Build the callbacks used during training."""
cns_model_checkpoint = util.safe_path(
os.path.join(self._model_config.model_checkpoint, 'best_model.tf'))
model_callbacks = [
# Note: Order matters here, particularly that FeverMetricsCallback
# comes before tensorboard so it can write to the log dictionary
# and TB picks it up.
callbacks.FeverMetricsCallback(
validation_batched=val_batched,
debug=self._debug,
fever_dev_path=self._model_config.fever_dev_path,
max_evidence=self._model_config.max_evidence,
checkpoint_dir=self._model_config.model_checkpoint,
),
# TODO(perodriguez): Determine a better thing to stop on
tf.keras.callbacks.EarlyStopping(
monitor='val_loss',
min_delta=.001,
patience=3,
verbose=1,
mode='min'),
# TODO(perodriguez): Determine a better thing to save on
# Checkpointing also needs to know about fever recall.
tf.keras.callbacks.ModelCheckpoint(
filepath=cns_model_checkpoint,
save_best_only=True,
monitor='val_loss',
mode='min',
verbose=1,
# There is no support for GRU/LSTM Dropout with normal save
save_weights_only=True,
),
]
if self._tb_log_dir is not None:
model_callbacks.append(
tf.keras.callbacks.TensorBoard(log_dir=self._tb_log_dir))
return model_callbacks
def _batch_dataset(self, dataset):
"""Batch the dataset depending on what model is used.
Args:
dataset: A dataset to batch
Returns:
A batched dataset with correct padding shapes.
"""
return dataset.padded_batch(
batch_size=self._model_config.batch_size,
padded_shapes=(
self._encoder.padded_shapes(),
# Must match losses in training.py
{
'claim_classification': [],
'evidence_matching': []
}))
def _encode_dataset(self,
dataset,
filter_claims=True,
filter_evidence=True):
"""Convert the tfds dataset to numbers by tokenizing/embedding."""
encode = self._encoder.build_encoder_fn()
encoded_data = dataset.map(
encode, num_parallel_calls=tf.data.experimental.AUTOTUNE)
if filter_claims:
encoded_data = encoded_data.filter(preprocessing.filter_claim_fn)
if filter_evidence:
encoded_data = encoded_data.filter(preprocessing.filter_evidence_fn)
return encoded_data
def _build_vocab(self, dataset):
"""Build the vocabulary and encoder from the dataset.
Args:
dataset: The dataset to build vocab from.
Returns:
The vocabulary in the dataset, or empty vocab if using bert
"""
# If we are using bert, then we do not need to build the vocab
# since its already defined
if self._model_config.tokenizer == 'bert' and self._model_config.text_encoder == 'bert':
logging.info('Using bert, skipping vocabulary creation')
return set()
if self._tokenizer is None:
raise ValueError('Cannot build vocab without a tokenizer.')
claim_lengths = []
evidence_lengths = []
vocab = set()
for example in tqdm.tqdm(dataset, mininterval=5):
tokenized_claim, tokenized_evidence = self._tokenize_example(example)
claim_lengths.append(len(tokenized_claim))
evidence_lengths.append(len(tokenized_evidence))
vocab.update(tokenized_claim)
vocab.update(tokenized_evidence)
logging.info('Build vocab of size (without padding): %s', len(vocab))
logging.info('Claim length statistics')
logging.info('Max: %s', max(claim_lengths))
logging.info('Min: %s', min(claim_lengths))
claim_percentiles = np.percentile(claim_lengths, [50, 90, 95, 99]).tolist()
logging.info('50/90/95/99: %s', str(claim_percentiles))
logging.info('Evidence length statistics')
logging.info('Max: %s', max(evidence_lengths))
logging.info('Min: %s', min(evidence_lengths))
evidence_percentiles = np.percentile(evidence_lengths,
[50, 90, 95, 99]).tolist()
logging.info('50/90/95/99: %s', str(evidence_percentiles))
self._vocab_stats['claim_max'] = max(claim_lengths)
self._vocab_stats['claim_min'] = min(claim_lengths)
self._vocab_stats['claim_percentiles'] = claim_percentiles
self._vocab_stats['evidence_max'] = max(evidence_lengths)
self._vocab_stats['evidence_min'] = min(evidence_lengths)
self._vocab_stats['evidence_percentiles'] = evidence_percentiles
return vocab
def _tokenize_example(self, example):
tokenized_claim = self._tokenizer.tokenize(
example['claim_text'].numpy().decode('utf8'))
tokenized_evidence = self._tokenizer.tokenize(
example['evidence_text'].numpy().decode('utf8'))
return tokenized_claim, tokenized_evidence
| [
"[email protected]"
] | |
7c7fc0f5567572ab0c07b65e752550907fbbcd9e | 9e988c0dfbea15cd23a3de860cb0c88c3dcdbd97 | /sdBs/AllRun/sdssj_135144.13+124255.8/sdB_sdssj_135144.13+124255.8_coadd.py | 4c2a173777392d114cb21ec9fb51411e53878edf | [] | no_license | tboudreaux/SummerSTScICode | 73b2e5839b10c0bf733808f4316d34be91c5a3bd | 4dd1ffbb09e0a599257d21872f9d62b5420028b0 | refs/heads/master | 2021-01-20T18:07:44.723496 | 2016-08-08T16:49:53 | 2016-08-08T16:49:53 | 65,221,159 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 480 | py | from gPhoton.gMap import gMap
def main():
gMap(band="NUV", skypos=[207.933875,12.7155], skyrange=[0.0333333333333,0.0333333333333], stepsz = 30., cntfile="/data2/fleming/GPHOTON_OUTPUT/LIGHTCURVES/sdBs/sdB_sdssj_135144.13+124255.8/sdB_sdssj_135144.13+124255.8_movie_count.fits", cntcoaddfile="/data2/fleming/GPHOTON_OUTPUT/LIGHTCURVES/sdB/sdB_sdssj_135144.13+124255.8/sdB_sdssj_135144.13+124255.8_count_coadd.fits", overwrite=True, verbose=3)
if __name__ == "__main__":
main()
| [
"[email protected]"
] | |
ed785e2b5af05ae400c0d35d937f9c33162497ac | 09e57dd1374713f06b70d7b37a580130d9bbab0d | /benchmark/startQiskit_QC59.py | 2688aa43ef87eefbaedb8591966863e235cc4be4 | [
"BSD-3-Clause"
] | permissive | UCLA-SEAL/QDiff | ad53650034897abb5941e74539e3aee8edb600ab | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | refs/heads/main | 2023-08-05T04:52:24.961998 | 2021-09-19T02:56:16 | 2021-09-19T02:56:16 | 405,159,939 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,580 | py | # qubit number=3
# total number=10
import numpy as np
from qiskit import QuantumCircuit, execute, Aer, QuantumRegister, ClassicalRegister, transpile, BasicAer, IBMQ
import networkx as nx
from qiskit.visualization import plot_histogram
from typing import *
from pprint import pprint
from math import log2
from collections import Counter
from qiskit.test.mock import FakeVigo, FakeYorktown
kernel = 'circuit/bernstein'
def make_circuit(n:int) -> QuantumCircuit:
# circuit begin
input_qubit = QuantumRegister(n,"qc")
prog = QuantumCircuit(input_qubit)
prog.h(input_qubit[0]) # number=1
prog.h(input_qubit[1]) # number=2
prog.h(input_qubit[2]) # number=3
prog.h(input_qubit[3]) # number=4
prog.y(input_qubit[3]) # number=5
for edge in E:
k = edge[0]
l = edge[1]
prog.cp(-2 * gamma, input_qubit[k-1], input_qubit[l-1])
prog.p(gamma, k)
prog.p(gamma, l)
prog.rx(2 * beta, range(len(V)))
prog.swap(input_qubit[1],input_qubit[0]) # number=6
prog.swap(input_qubit[1],input_qubit[0]) # number=7
prog.cx(input_qubit[1],input_qubit[0]) # number=8
prog.cx(input_qubit[1],input_qubit[0]) # number=9
# circuit end
return prog
if __name__ == '__main__':
n = 4
V = np.arange(0, n, 1)
E = [(0, 1, 1.0), (0, 2, 1.0), (1, 2, 1.0), (3, 2, 1.0), (3, 1, 1.0)]
G = nx.Graph()
G.add_nodes_from(V)
G.add_weighted_edges_from(E)
step_size = 0.1
a_gamma = np.arange(0, np.pi, step_size)
a_beta = np.arange(0, np.pi, step_size)
a_gamma, a_beta = np.meshgrid(a_gamma, a_beta)
F1 = 3 - (np.sin(2 * a_beta) ** 2 * np.sin(2 * a_gamma) ** 2 - 0.5 * np.sin(4 * a_beta) * np.sin(4 * a_gamma)) * (
1 + np.cos(4 * a_gamma) ** 2)
result = np.where(F1 == np.amax(F1))
a = list(zip(result[0], result[1]))[0]
gamma = a[0] * step_size
beta = a[1] * step_size
prog = make_circuit(4)
sample_shot =5600
writefile = open("../data/startQiskit_QC59.csv", "w")
# prog.draw('mpl', filename=(kernel + '.png'))
IBMQ.load_account()
provider = IBMQ.get_provider(hub='ibm-q')
provider.backends()
backend = provider.get_backend("ibmq_5_yorktown")
circuit1 = transpile(prog, FakeYorktown())
circuit1.measure_all()
prog = circuit1
info = execute(prog,backend=backend, shots=sample_shot).result().get_counts()
print(info, file=writefile)
print("results end", file=writefile)
print(circuit1.depth(), file=writefile)
print(circuit1, file=writefile)
writefile.close()
| [
"[email protected]"
] | |
7b9e7785e184e54395de1a8b8168e9e9f6dd5da5 | 3604693215eeac8667a2d140cf0881ec8c3461c4 | /TA1_solution.py | eafff85dea49f69746c0240874fd1d89c4227619 | [] | no_license | SanjuktaBhatt/G11_C5 | 7ded1547a5939442f8e6f0a7bcbba065e0ef38e2 | 2b35462c67bf3a4cfdc3a3a7b257531352304d2a | refs/heads/main | 2023-09-04T20:14:41.379654 | 2021-10-28T06:42:10 | 2021-10-28T06:42:10 | 381,617,303 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 113 | py | maria_busy=True
while maria_busy:
print("Keep decorating!")
maria_busy=False
print("Get ready to surprise!")
| [
"[email protected]"
] | |
b80afaa7949dc6770d059a0287bbc3a17570e136 | ac09dbe531660a8f5ea6d0ab9b9496db856fe88e | /rules/download_data.smk | bd1e90cfe76baacaac35853c849070aadbdbe100 | [
"MIT"
] | permissive | EthanHolleman/GLOE-reps | fc203c0676d474e1477318c18251eb0ff1ac0fc3 | 196e98eb8cf5fb591ae02b5e999d3562c46b81c2 | refs/heads/main | 2023-04-09T18:14:23.009901 | 2021-04-15T01:53:33 | 2021-04-15T01:53:33 | 354,414,057 | 0 | 0 | MIT | 2021-04-04T17:57:18 | 2021-04-03T23:18:50 | Python | UTF-8 | Python | false | false | 1,745 | smk | import pandas as pd
GLOE_SAMPLES = pd.read_csv(
'samples/GLOE_samples.csv', sep=','
).set_index('Sample Name', drop=False)
# Download GLOE-seq data and process into fastq
rule expand_gloe_samples:
input:
expand('rawdata/GLOE-seq/{sample_name}.sra', sample_name=GLOE_SAMPLES['Sample Name'])
rule download_all_gloe_samples:
conda:
'../envs/sra-toolkit.yml'
params:
sra_accession = lambda wildcards: GLOE_SAMPLES.loc[wildcards.sample_name]['Run'],
output:
temp('rawdata/GLOE-seq/{sample_name}.sra')
shell:'''
prefetch {params.sra_accession} --output-file {output}
'''
rule dump_gloe_fastq:
input:
'rawdata/GLOE-seq/{sample}.sra'
output:
'rawdata/GLOE-seq/{sample}.fastq.gz'
shell:'''
fastq-dump -Z {input} | gzip > {output}
'''
# Download primers
rule download_primer_file:
output:
'rawdata/primers/TruSeq3-SE.fa'
shell:'''
curl https://raw.githubusercontent.com/timflutre/trimmomatic/master/adapters/TruSeq3-SE.fa \
-o {output}
'''
rule download_hg19_chr_sizes:
output:
'rawdata/hg19/hg19.chrom.sizes'
shell:'''
curl -L http://hgdownload.cse.ucsc.edu/goldenpath/hg19/bigZips/hg19.chrom.sizes -o {output}
'''
# Download footloop data
rule download_footloop_all:
output:
'rawdata/footloop/footloop_all.bed'
shell:'''
curl -L "https://genome.ucsc.edu/cgi-bin/hgTables?hgsid=1079385889_dXqdbBP5Hsal2siu4fVmefmsWOgX&boolshad.hgta_printCustomTrackHeaders=0&hgta_ctName=tb_ct_footLoopPeakALL_41&hgta_ctDesc=table+browser+query+on+ct_footLoopPeakALL_41&hgta_ctVis=pack&hgta_ctUrl=&fbQual=whole&fbUpBases=200&fbDownBases=200&hgta_doGetBed=get+BED" -o {output}
'''
| [
"[email protected]"
] | |
126b2a8c33e68656967eae19d2378d22a7ea2628 | 242918b007e06cbebbf5b276580a0ed89d0020fa | /thrift/compiler/test/fixtures/inheritance/gen-py3/module/services.pyi | f696015594a83586fbfc4424fc7df5a066a60980 | [
"Apache-2.0"
] | permissive | wby-test/fbthrift | 2a16ce45d94d3961936300f5b81098cf345360e6 | 8c50c4ac75ec16ebcd2485ca886b49d3ea55515e | refs/heads/master | 2022-08-03T08:30:40.931712 | 2017-11-28T06:28:36 | 2017-11-28T06:34:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 907 | pyi | #
# Autogenerated by Thrift
#
# DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
# @generated
#
import typing as _typing
from thrift.py3.server import RequestContext, ServiceInterface
import module.types as _module_types
class MyRootInterface(
ServiceInterface
):
@_typing.overload
async def do_root(
self,
ctx: RequestContext
) -> None: ...
async def do_root(
self
) -> None: ...
class MyNodeInterface(
_module_services.MyRootInterface
):
@_typing.overload
async def do_mid(
self,
ctx: RequestContext
) -> None: ...
async def do_mid(
self
) -> None: ...
class MyLeafInterface(
_module_services.MyNodeInterface
):
@_typing.overload
async def do_leaf(
self,
ctx: RequestContext
) -> None: ...
async def do_leaf(
self
) -> None: ...
| [
"[email protected]"
] | |
09f0bceefb59bb73736f9993fabbf1ed9b32640e | 9766c2e479e99cca5bf7cc834c949fc4d5286275 | /SRC/engine/element.spy | c45a0915e20516a294ba71e27364b627bf1d54c2 | [
"LicenseRef-scancode-warranty-disclaimer"
] | no_license | UstbCmsPjy/OOF2 | 4c141e8da3c7e3c5bc9129c2cb27ed301455a155 | f8539080529d257a02b8f5cc44040637387ed9a1 | refs/heads/master | 2023-05-05T09:58:22.597997 | 2020-05-28T23:05:30 | 2020-05-28T23:05:30 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 784 | spy | # -*- python -*-
# This software was produced by NIST, an agency of the U.S. government,
# and by statute is not subject to copyright in the United States.
# Recipients of this software assume all responsibilities associated
# with its operation, modification and maintenance. However, to
# facilitate maintenance we ask that before distributing modified
# versions of this software, you first contact the authors at
# [email protected].
def _Element__position(self, coords):
return map(self.from_master, coords)
ElementPtr.position = _Element__position
from ooflib.SWIG.engine.masterelement import MasterElementPtr
from ooflib.SWIG.common.coord import CoordPtr
from ooflib.SWIG.engine.mastercoord import MasterCoordPtr
from ooflib.SWIG.engine.edge import BoundaryEdgePtr
| [
"[email protected]"
] | |
8d6306d191fe7def474de55a4085373419f1808d | b9dcea5142af620b651fdfac05ffcac021ef6f83 | /heroku_deploy/settings.py | 0c8efb0f8c32a6b2ddba5192ab7b8e4fb168aedd | [] | no_license | Jordan-Ak/heroku_deployment | aee7c1f721a51831329fef244a48fc7d65c3d0fc | ada1568a1c120903e400ba190f9a8fac3fb86d77 | refs/heads/master | 2023-03-29T12:16:30.680308 | 2021-02-23T08:16:58 | 2021-02-23T08:16:58 | 339,159,841 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,474 | py | """
Django settings for heroku_deploy project.
Generated by 'django-admin startproject' using Django 3.1.6.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.1/ref/settings/
"""
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'kg%s)6nnp0+b%k=i7e3xgjawp16z3=9@x(_m#_(_s=40$g5m*1'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['safe-chamber-01830.herokuapp.com']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
#My apps
'deploys',
]
MIDDLEWARE = [
'whitenoise.middleware.WhiteNoiseMiddleware',
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'heroku_deploy.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'heroku_deploy.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
STATIC_URL = '/static/'
# Add configuration for static files storage using whitenoise
STATICFILES_STORAGE = 'whitenoise.django.GzipManifestStaticFilesStorage'
#Database configuration
import dj_database_url
prod_db = dj_database_url.config(conn_max_age=500)
DATABASES['default'].update(prod_db) | [
"[email protected]"
] | |
31061a65eab30817d6bac7d2d11b6ae2e84bf634 | e6f73cc3398050b23df28e3f11a10afbb46ee38b | /idea/chain_iter/great.py | 9ef7b72eb2ef2aebef5e53d23cfc96e52856e86a | [] | no_license | monarin/divelite | 3db262bf07a0de870d0bfe650ebdf21225b88c1b | 0d297bda7368c5295336565431fbfa18a5686f15 | refs/heads/master | 2023-06-29T23:42:34.541874 | 2023-06-08T17:59:59 | 2023-06-08T17:59:59 | 120,695,376 | 0 | 1 | null | 2018-09-06T00:03:30 | 2018-02-08T01:41:19 | C++ | UTF-8 | Python | false | false | 1,054 | py | class Step:
def __init__(self,chunks):
self.chunks = chunks
def events(self):
for i, chunk in enumerate(chunks):
print(f'events() chunk {i}')
for dg in chunk:
if dg==102: return
if dg<100: yield dg
chunks = [iter([101,1,2,3,102,101,4,5,102,101,6]),iter([7,8,102,101,9,10])]
class Run:
def __init__(self):
pass
def events(self):
for chunk in chunks:
for dg in chunk:
if dg<100: yield dg
def steps(self):
for chunk in chunks:
for dg in chunk:
if dg==101: yield Step(chunks)
myrun = Run()
#for evt in myrun.events():
# print(evt)
for istep,step in enumerate(myrun.steps()):
print('step:',istep)
for evt in step.events():
print(evt)
| [
"[email protected]"
] | |
f9644cf63ba76c1285a5a362ae49db9da36c072d | 711756b796d68035dc6a39060515200d1d37a274 | /output_cog/optimized_16052.py | ef3296d91a540d8831255e1a96840896b72cfa8c | [] | no_license | batxes/exocyst_scripts | 8b109c279c93dd68c1d55ed64ad3cca93e3c95ca | a6c487d5053b9b67db22c59865e4ef2417e53030 | refs/heads/master | 2020-06-16T20:16:24.840725 | 2016-11-30T16:23:16 | 2016-11-30T16:23:16 | 75,075,164 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,843 | py | import _surface
import chimera
try:
import chimera.runCommand
except:
pass
from VolumePath import markerset as ms
try:
from VolumePath import Marker_Set, Link
new_marker_set=Marker_Set
except:
from VolumePath import volume_path_dialog
d= volume_path_dialog(True)
new_marker_set= d.new_marker_set
marker_sets={}
surf_sets={}
if "Cog2_GFPN" not in marker_sets:
s=new_marker_set('Cog2_GFPN')
marker_sets["Cog2_GFPN"]=s
s= marker_sets["Cog2_GFPN"]
mark=s.place_marker((352.463, 469.079, 509.037), (0.89, 0.1, 0.1), 18.4716)
if "Cog2_0" not in marker_sets:
s=new_marker_set('Cog2_0')
marker_sets["Cog2_0"]=s
s= marker_sets["Cog2_0"]
mark=s.place_marker((409.222, 449.195, 522.743), (0.89, 0.1, 0.1), 17.1475)
if "Cog2_1" not in marker_sets:
s=new_marker_set('Cog2_1')
marker_sets["Cog2_1"]=s
s= marker_sets["Cog2_1"]
mark=s.place_marker((492.237, 446.886, 544.215), (0.89, 0.1, 0.1), 17.1475)
if "Cog2_GFPC" not in marker_sets:
s=new_marker_set('Cog2_GFPC')
marker_sets["Cog2_GFPC"]=s
s= marker_sets["Cog2_GFPC"]
mark=s.place_marker((413.797, 543.405, 607.247), (0.89, 0.1, 0.1), 18.4716)
if "Cog2_Anch" not in marker_sets:
s=new_marker_set('Cog2_Anch')
marker_sets["Cog2_Anch"]=s
s= marker_sets["Cog2_Anch"]
mark=s.place_marker((687.523, 413.102, 555.67), (0.89, 0.1, 0.1), 18.4716)
if "Cog3_GFPN" not in marker_sets:
s=new_marker_set('Cog3_GFPN')
marker_sets["Cog3_GFPN"]=s
s= marker_sets["Cog3_GFPN"]
mark=s.place_marker((398.822, 468.584, 510.435), (1, 1, 0), 18.4716)
if "Cog3_0" not in marker_sets:
s=new_marker_set('Cog3_0')
marker_sets["Cog3_0"]=s
s= marker_sets["Cog3_0"]
mark=s.place_marker((398.385, 469.641, 509.134), (1, 1, 0.2), 17.1475)
if "Cog3_1" not in marker_sets:
s=new_marker_set('Cog3_1')
marker_sets["Cog3_1"]=s
s= marker_sets["Cog3_1"]
mark=s.place_marker((411.171, 482.142, 487.69), (1, 1, 0.2), 17.1475)
if "Cog3_2" not in marker_sets:
s=new_marker_set('Cog3_2')
marker_sets["Cog3_2"]=s
s= marker_sets["Cog3_2"]
mark=s.place_marker((411.254, 501.272, 466.984), (1, 1, 0.2), 17.1475)
if "Cog3_3" not in marker_sets:
s=new_marker_set('Cog3_3')
marker_sets["Cog3_3"]=s
s= marker_sets["Cog3_3"]
mark=s.place_marker((430.691, 520.729, 461.297), (1, 1, 0.2), 17.1475)
if "Cog3_4" not in marker_sets:
s=new_marker_set('Cog3_4')
marker_sets["Cog3_4"]=s
s= marker_sets["Cog3_4"]
mark=s.place_marker((429.462, 538.628, 439.679), (1, 1, 0.2), 17.1475)
if "Cog3_5" not in marker_sets:
s=new_marker_set('Cog3_5')
marker_sets["Cog3_5"]=s
s= marker_sets["Cog3_5"]
mark=s.place_marker((403.448, 539.246, 450.212), (1, 1, 0.2), 17.1475)
if "Cog3_GFPC" not in marker_sets:
s=new_marker_set('Cog3_GFPC')
marker_sets["Cog3_GFPC"]=s
s= marker_sets["Cog3_GFPC"]
mark=s.place_marker((374.544, 453.872, 507.792), (1, 1, 0.4), 18.4716)
if "Cog3_Anch" not in marker_sets:
s=new_marker_set('Cog3_Anch')
marker_sets["Cog3_Anch"]=s
s= marker_sets["Cog3_Anch"]
mark=s.place_marker((432.859, 619.991, 387.562), (1, 1, 0.4), 18.4716)
if "Cog4_GFPN" not in marker_sets:
s=new_marker_set('Cog4_GFPN')
marker_sets["Cog4_GFPN"]=s
s= marker_sets["Cog4_GFPN"]
mark=s.place_marker((605.419, 524.709, 432.249), (0, 0, 0.8), 18.4716)
if "Cog4_0" not in marker_sets:
s=new_marker_set('Cog4_0')
marker_sets["Cog4_0"]=s
s= marker_sets["Cog4_0"]
mark=s.place_marker((605.419, 524.709, 432.249), (0, 0, 0.8), 17.1475)
if "Cog4_1" not in marker_sets:
s=new_marker_set('Cog4_1')
marker_sets["Cog4_1"]=s
s= marker_sets["Cog4_1"]
mark=s.place_marker((585.283, 505.346, 436.949), (0, 0, 0.8), 17.1475)
if "Cog4_2" not in marker_sets:
s=new_marker_set('Cog4_2')
marker_sets["Cog4_2"]=s
s= marker_sets["Cog4_2"]
mark=s.place_marker((565.966, 486.066, 444.1), (0, 0, 0.8), 17.1475)
if "Cog4_3" not in marker_sets:
s=new_marker_set('Cog4_3')
marker_sets["Cog4_3"]=s
s= marker_sets["Cog4_3"]
mark=s.place_marker((538.347, 483.318, 449.587), (0, 0, 0.8), 17.1475)
if "Cog4_4" not in marker_sets:
s=new_marker_set('Cog4_4')
marker_sets["Cog4_4"]=s
s= marker_sets["Cog4_4"]
mark=s.place_marker((517.296, 472.977, 465.542), (0, 0, 0.8), 17.1475)
if "Cog4_5" not in marker_sets:
s=new_marker_set('Cog4_5')
marker_sets["Cog4_5"]=s
s= marker_sets["Cog4_5"]
mark=s.place_marker((490.807, 469.282, 475.056), (0, 0, 0.8), 17.1475)
if "Cog4_6" not in marker_sets:
s=new_marker_set('Cog4_6')
marker_sets["Cog4_6"]=s
s= marker_sets["Cog4_6"]
mark=s.place_marker((465.402, 463.364, 486.656), (0, 0, 0.8), 17.1475)
if "Cog4_GFPC" not in marker_sets:
s=new_marker_set('Cog4_GFPC')
marker_sets["Cog4_GFPC"]=s
s= marker_sets["Cog4_GFPC"]
mark=s.place_marker((581.844, 675.271, 397.552), (0, 0, 0.8), 18.4716)
if "Cog4_Anch" not in marker_sets:
s=new_marker_set('Cog4_Anch')
marker_sets["Cog4_Anch"]=s
s= marker_sets["Cog4_Anch"]
mark=s.place_marker((330.666, 258.472, 574.594), (0, 0, 0.8), 18.4716)
if "Cog5_GFPN" not in marker_sets:
s=new_marker_set('Cog5_GFPN')
marker_sets["Cog5_GFPN"]=s
s= marker_sets["Cog5_GFPN"]
mark=s.place_marker((486.399, 435.61, 498.969), (0.3, 0.3, 0.3), 18.4716)
if "Cog5_0" not in marker_sets:
s=new_marker_set('Cog5_0')
marker_sets["Cog5_0"]=s
s= marker_sets["Cog5_0"]
mark=s.place_marker((486.399, 435.61, 498.969), (0.3, 0.3, 0.3), 17.1475)
if "Cog5_1" not in marker_sets:
s=new_marker_set('Cog5_1')
marker_sets["Cog5_1"]=s
s= marker_sets["Cog5_1"]
mark=s.place_marker((482.785, 459.266, 514.82), (0.3, 0.3, 0.3), 17.1475)
if "Cog5_2" not in marker_sets:
s=new_marker_set('Cog5_2')
marker_sets["Cog5_2"]=s
s= marker_sets["Cog5_2"]
mark=s.place_marker((477.362, 476.059, 537.526), (0.3, 0.3, 0.3), 17.1475)
if "Cog5_3" not in marker_sets:
s=new_marker_set('Cog5_3')
marker_sets["Cog5_3"]=s
s= marker_sets["Cog5_3"]
mark=s.place_marker((482.759, 472.07, 565.487), (0.3, 0.3, 0.3), 17.1475)
if "Cog5_GFPC" not in marker_sets:
s=new_marker_set('Cog5_GFPC')
marker_sets["Cog5_GFPC"]=s
s= marker_sets["Cog5_GFPC"]
mark=s.place_marker((358.907, 486.453, 564.415), (0.3, 0.3, 0.3), 18.4716)
if "Cog5_Anch" not in marker_sets:
s=new_marker_set('Cog5_Anch')
marker_sets["Cog5_Anch"]=s
s= marker_sets["Cog5_Anch"]
mark=s.place_marker((606.76, 457.202, 572.692), (0.3, 0.3, 0.3), 18.4716)
if "Cog6_GFPN" not in marker_sets:
s=new_marker_set('Cog6_GFPN')
marker_sets["Cog6_GFPN"]=s
s= marker_sets["Cog6_GFPN"]
mark=s.place_marker((404.881, 470.855, 541.854), (0.21, 0.49, 0.72), 18.4716)
if "Cog6_0" not in marker_sets:
s=new_marker_set('Cog6_0')
marker_sets["Cog6_0"]=s
s= marker_sets["Cog6_0"]
mark=s.place_marker((404.89, 470.878, 541.865), (0.21, 0.49, 0.72), 17.1475)
if "Cog6_1" not in marker_sets:
s=new_marker_set('Cog6_1')
marker_sets["Cog6_1"]=s
s= marker_sets["Cog6_1"]
mark=s.place_marker((428.992, 473.142, 523.35), (0.21, 0.49, 0.72), 17.1475)
if "Cog6_2" not in marker_sets:
s=new_marker_set('Cog6_2')
marker_sets["Cog6_2"]=s
s= marker_sets["Cog6_2"]
mark=s.place_marker((433.502, 458.721, 497.623), (0.21, 0.49, 0.72), 17.1475)
if "Cog6_3" not in marker_sets:
s=new_marker_set('Cog6_3')
marker_sets["Cog6_3"]=s
s= marker_sets["Cog6_3"]
mark=s.place_marker((439.571, 453.063, 469.191), (0.21, 0.49, 0.72), 17.1475)
if "Cog6_4" not in marker_sets:
s=new_marker_set('Cog6_4')
marker_sets["Cog6_4"]=s
s= marker_sets["Cog6_4"]
mark=s.place_marker((435.205, 469.322, 445.925), (0.21, 0.49, 0.72), 17.1475)
if "Cog6_5" not in marker_sets:
s=new_marker_set('Cog6_5')
marker_sets["Cog6_5"]=s
s= marker_sets["Cog6_5"]
mark=s.place_marker((420.331, 487.98, 430.413), (0.21, 0.49, 0.72), 17.1475)
if "Cog6_6" not in marker_sets:
s=new_marker_set('Cog6_6')
marker_sets["Cog6_6"]=s
s= marker_sets["Cog6_6"]
mark=s.place_marker((404.427, 511.312, 432.331), (0.21, 0.49, 0.72), 17.1475)
if "Cog6_GFPC" not in marker_sets:
s=new_marker_set('Cog6_GFPC')
marker_sets["Cog6_GFPC"]=s
s= marker_sets["Cog6_GFPC"]
mark=s.place_marker((422.823, 430.126, 451.356), (0.21, 0.49, 0.72), 18.4716)
if "Cog6_Anch" not in marker_sets:
s=new_marker_set('Cog6_Anch')
marker_sets["Cog6_Anch"]=s
s= marker_sets["Cog6_Anch"]
mark=s.place_marker((386.643, 595.882, 416.921), (0.21, 0.49, 0.72), 18.4716)
if "Cog7_GFPN" not in marker_sets:
s=new_marker_set('Cog7_GFPN')
marker_sets["Cog7_GFPN"]=s
s= marker_sets["Cog7_GFPN"]
mark=s.place_marker((431.079, 405.034, 489.233), (0.7, 0.7, 0.7), 18.4716)
if "Cog7_0" not in marker_sets:
s=new_marker_set('Cog7_0')
marker_sets["Cog7_0"]=s
s= marker_sets["Cog7_0"]
mark=s.place_marker((436.007, 421.364, 509.242), (0.7, 0.7, 0.7), 17.1475)
if "Cog7_1" not in marker_sets:
s=new_marker_set('Cog7_1')
marker_sets["Cog7_1"]=s
s= marker_sets["Cog7_1"]
mark=s.place_marker((448.694, 457.188, 552.471), (0.7, 0.7, 0.7), 17.1475)
if "Cog7_2" not in marker_sets:
s=new_marker_set('Cog7_2')
marker_sets["Cog7_2"]=s
s= marker_sets["Cog7_2"]
mark=s.place_marker((461.339, 493.01, 595.697), (0.7, 0.7, 0.7), 17.1475)
if "Cog7_GFPC" not in marker_sets:
s=new_marker_set('Cog7_GFPC')
marker_sets["Cog7_GFPC"]=s
s= marker_sets["Cog7_GFPC"]
mark=s.place_marker((386.894, 472.908, 620.709), (0.7, 0.7, 0.7), 18.4716)
if "Cog7_Anch" not in marker_sets:
s=new_marker_set('Cog7_Anch')
marker_sets["Cog7_Anch"]=s
s= marker_sets["Cog7_Anch"]
mark=s.place_marker((533.738, 557.235, 634.873), (0.7, 0.7, 0.7), 18.4716)
if "Cog8_0" not in marker_sets:
s=new_marker_set('Cog8_0')
marker_sets["Cog8_0"]=s
s= marker_sets["Cog8_0"]
mark=s.place_marker((471.119, 433.752, 467.911), (1, 0.5, 0), 17.1475)
if "Cog8_1" not in marker_sets:
s=new_marker_set('Cog8_1')
marker_sets["Cog8_1"]=s
s= marker_sets["Cog8_1"]
mark=s.place_marker((494.266, 417.148, 471.636), (1, 0.5, 0), 17.1475)
if "Cog8_2" not in marker_sets:
s=new_marker_set('Cog8_2')
marker_sets["Cog8_2"]=s
s= marker_sets["Cog8_2"]
mark=s.place_marker((510.32, 411.76, 494.28), (1, 0.5, 0), 17.1475)
if "Cog8_3" not in marker_sets:
s=new_marker_set('Cog8_3')
marker_sets["Cog8_3"]=s
s= marker_sets["Cog8_3"]
mark=s.place_marker((516.393, 407.226, 521.62), (1, 0.5, 0), 17.1475)
if "Cog8_4" not in marker_sets:
s=new_marker_set('Cog8_4')
marker_sets["Cog8_4"]=s
s= marker_sets["Cog8_4"]
mark=s.place_marker((496.239, 409.823, 541.053), (1, 0.5, 0), 17.1475)
if "Cog8_5" not in marker_sets:
s=new_marker_set('Cog8_5')
marker_sets["Cog8_5"]=s
s= marker_sets["Cog8_5"]
mark=s.place_marker((496.895, 410.969, 569.174), (1, 0.5, 0), 17.1475)
if "Cog8_GFPC" not in marker_sets:
s=new_marker_set('Cog8_GFPC')
marker_sets["Cog8_GFPC"]=s
s= marker_sets["Cog8_GFPC"]
mark=s.place_marker((428.284, 432.604, 531.352), (1, 0.6, 0.1), 18.4716)
if "Cog8_Anch" not in marker_sets:
s=new_marker_set('Cog8_Anch')
marker_sets["Cog8_Anch"]=s
s= marker_sets["Cog8_Anch"]
mark=s.place_marker((565.447, 389.538, 607.451), (1, 0.6, 0.1), 18.4716)
for k in surf_sets.keys():
chimera.openModels.add([surf_sets[k]])
| [
"[email protected]"
] | |
f2b92c95db0379a8834ace8efc29165dfbec2f75 | 6569f43b525305a8899b920b8e58aab413feb519 | /CommitteApp/migrations/0001_initial.py | b35fc854aca31273b4892e95c7c63a3797207735 | [] | no_license | sontus-tripura-python/tsfbd | daa6b19f2dae8eaf8fd9c5a5c412d7cc9606a381 | 5f851c2616e912d0af1addaaeb8e64167eed9501 | refs/heads/main | 2023-04-25T08:36:59.288577 | 2021-05-07T05:13:28 | 2021-05-07T05:13:28 | 242,639,065 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,016 | py | # Generated by Django 3.1.5 on 2021-03-26 13:47
import autoslug.fields
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='BranchCategory',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=200)),
('slug', autoslug.fields.AutoSlugField(editable=False, populate_from='name')),
],
),
migrations.CreateModel(
name='BranchName',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('branchname', models.CharField(max_length=200)),
('slug', autoslug.fields.AutoSlugField(editable=False, populate_from='branchname')),
('branch_category', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='branch_categories', to='CommitteApp.branchcategory')),
],
options={
'verbose_name_plural': 'Branch Name',
},
),
migrations.CreateModel(
name='CentralYear',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('yearname', models.CharField(max_length=30)),
('slug', autoslug.fields.AutoSlugField(editable=False, populate_from='yearname')),
],
options={
'verbose_name_plural': 'central year',
},
),
migrations.CreateModel(
name='Coordinator',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('photo', models.ImageField(blank=True, default='default.jpg', upload_to='branchmember')),
('name', models.CharField(blank=True, max_length=50)),
('slug', autoslug.fields.AutoSlugField(editable=False, populate_from='name')),
('position', models.CharField(blank=True, max_length=200)),
('blood_group', models.CharField(blank=True, max_length=20)),
('phone', models.CharField(blank=True, max_length=11)),
('about_description', models.TextField()),
('facebook', models.URLField(blank=True)),
('twitter', models.URLField(blank=True)),
('instagram', models.URLField(blank=True)),
('linkdin', models.URLField(blank=True)),
],
options={
'verbose_name_plural': 'Co-ordinator',
},
),
migrations.CreateModel(
name='CentralMember',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('photo', models.ImageField(blank=True, default='default.jpg', upload_to='central')),
('name', models.CharField(blank=True, max_length=50)),
('slug', autoslug.fields.AutoSlugField(editable=False, populate_from='name')),
('position', models.CharField(blank=True, max_length=50)),
('blood_group', models.CharField(blank=True, max_length=20)),
('phone', models.CharField(blank=True, max_length=11)),
('village', models.CharField(blank=True, max_length=200)),
('thana', models.CharField(blank=True, max_length=200)),
('district', models.CharField(blank=True, max_length=200)),
('gender', models.CharField(choices=[('Male', 'Male'), ('Female', 'Female')], default='Male', max_length=20)),
('current_enroll', models.CharField(choices=[('University', 'University'), ('College', 'College'), ('School', 'School'), ('Job', 'Job'), ('Other', 'Other')], max_length=200, null=True)),
('facebook', models.URLField(blank=True)),
('twitter', models.URLField(blank=True)),
('instagram', models.URLField(blank=True)),
('linkdin', models.URLField(blank=True)),
('session', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='CommitteApp.centralyear')),
],
options={
'verbose_name_plural': 'Central Member',
},
),
migrations.CreateModel(
name='BranchYear',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('branchyear', models.CharField(max_length=200)),
('slug', autoslug.fields.AutoSlugField(editable=False, populate_from='branchyear')),
('branches', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='CommitteApp.branchname')),
],
options={
'verbose_name_plural': 'Branch year',
},
),
migrations.CreateModel(
name='BranchMember',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('photo', models.ImageField(blank=True, default='default.jpg', upload_to='branchmember')),
('University', models.CharField(blank=True, max_length=100)),
('name', models.CharField(blank=True, max_length=50)),
('slug', autoslug.fields.AutoSlugField(editable=False, populate_from='name')),
('position', models.CharField(blank=True, max_length=50)),
('blood_group', models.CharField(blank=True, max_length=20)),
('phone', models.CharField(blank=True, max_length=11)),
('gender', models.CharField(choices=[('Male', 'Male'), ('Female', 'Female')], default='Male', max_length=20)),
('current_enroll', models.CharField(choices=[('University', 'University'), ('College', 'College'), ('School', 'School'), ('Job', 'Job'), ('Other', 'Other')], max_length=200, null=True)),
('facebook', models.URLField(blank=True)),
('twitter', models.URLField(blank=True)),
('instagram', models.URLField(blank=True)),
('linkdin', models.URLField(blank=True)),
('memberbranch', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='CommitteApp.branchyear')),
('namebranch', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='CommitteApp.branchname')),
],
options={
'verbose_name_plural': 'Branch member',
'ordering': ('id',),
},
),
]
| [
"[email protected]"
] | |
cb039221da592e976304557e61902704eecbcbac | ab0315bcded75c10c591076b22ed8ff664ee76af | /fig4/8mods_round4_0919/config_scf_8mods_data_freeze_190917_sub3_1_2.py | df15d79c7bc45e5b1e3aad780dae8f8d1bab9a7e | [] | no_license | mukamel-lab/BICCN-Mouse-MOp | 389f62492986a2ffe4278ed16f59fc17dc75b767 | 8058ab8ae827c6e019fff719903b0ba5b400931d | refs/heads/master | 2021-07-06T11:14:25.401628 | 2020-09-30T04:54:27 | 2020-09-30T04:54:27 | 189,758,115 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,916 | py | #!/usr/bin/env python3
"""An example configuration file
"""
import sys
sys.path.insert(0, '/cndd/fangming/CEMBA/snmcseq_dev')
import os
import snmcseq_utils
# # Configs
name = 'mop_8mods_0915_k30_sub3-1-2'
outdir = '/cndd/fangming/CEMBA/data/MOp_all/results'
output_pcX_all = outdir + '/pcX_all_{}.npy'.format(name)
output_cells_all = outdir + '/cells_all_{}.npy'.format(name)
output_imputed_data_format = outdir + '/imputed_data_{}_{{}}.npy'.format(name)
output_clst_and_umap = outdir + '/intg_summary_{}.tsv'.format(name)
output_figures = outdir + '/figures/{}_{{}}.{{}}'.format(name)
output_cluster_centroids = outdir + '/centroids_{}.pkl'.format(name)
DATA_DIR = '/cndd/fangming/CEMBA/data/MOp_all/data_freeze_neurons_subtypes_8mods_round4/sub3-1-2'
# fixed dataset configs
sys.path.insert(0, DATA_DIR)
from __init__datasets import *
meta_f = os.path.join(DATA_DIR, '{0}_metadata.tsv')
hvftrs_f = os.path.join(DATA_DIR, '{0}_hvfeatures.{1}')
hvftrs_gene = os.path.join(DATA_DIR, '{0}_hvfeatures.gene')
hvftrs_cell = os.path.join(DATA_DIR, '{0}_hvfeatures.cell')
# mods_selected = [
# 'snmcseq_gene',
# 'snatac_gene',
# 'smarter_cells',
# 'smarter_nuclei',
# '10x_cells_v2',
# '10x_cells_v3',
# '10x_nuclei_v3',
# '10x_nuclei_v3_macosko',
# ]
mods_selected = snmcseq_utils.import_single_textcol(os.path.join(DATA_DIR, 'datasets.txt'))
print(mods_selected)
features_selected = ['10x_cells_v2']
# check features
for features_modality in features_selected:
assert (features_modality in mods_selected)
# within modality
ps = {'mc': 0.9,
'atac': 0.1,
'rna': 0.7,
}
drop_npcs = {
'mc': 0,
'atac': 0,
'rna': 0,
}
# across modality
cross_mod_distance_measure = 'correlation' # cca
knn = 20
relaxation = 3
n_cca = 30
# PCA
npc = 50
# clustering
k = 30
resolutions = [0.1, 0.2, 0.5, 1,]
# umap
umap_neighbors = 30
min_dist = 0.5
| [
"[email protected]"
] | |
8bfe423384a181fbcaaca4b82f6299f2a9d8cac4 | b6203a8829e4387031762d7a3d9c2125f82a465e | /helloDjango/mainapp/migrations/0011_auto_20210716_1550.py | 387e9863e32ec579fb9003544d74473618a96248 | [] | no_license | Jack-liyuanjie/Django01 | db5f88560d65311987d70325c35f1783ded5ace9 | 7068dab5fe85b21d7a0f052572a68a2fe814fc21 | refs/heads/master | 2023-06-27T22:10:44.852483 | 2021-07-28T07:02:13 | 2021-07-28T07:02:13 | 390,248,440 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,263 | py | # Generated by Django 2.0.1 on 2021-07-16 07:50
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('mainapp', '0010_auto_20210716_1531'),
]
operations = [
migrations.CreateModel(
name='FruitCartEntity',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('cnt', models.IntegerField(default=1, verbose_name='数量')),
('cart', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='mainapp.CartEntity', verbose_name='购物车')),
],
options={
'verbose_name': '购物车详情表',
'verbose_name_plural': '购物车详情表',
'db_table': 't_fruit_cart',
},
),
migrations.AlterModelTable(
name='fruitentity',
table='t_fruit',
),
migrations.AddField(
model_name='fruitcartentity',
name='fruit',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='mainapp.FruitEntity', verbose_name='水果名'),
),
]
| [
"[email protected]"
] | |
16fbdc4c4b8b382fdc8963e09498fcd6e61b7633 | e262e64415335060868e9f7f73ab8701e3be2f7b | /.history/demo_20201106171218.py | e40917f42694981b872f3e9e7b9a58b87321d17f | [] | no_license | Allison001/developer_test | 6e211f1e2bd4287ee26fd2b33baf1c6a8d80fc63 | b8e04b4b248b0c10a35e93128a5323165990052c | refs/heads/master | 2023-06-18T08:46:40.202383 | 2021-07-23T03:31:54 | 2021-07-23T03:31:54 | 322,807,303 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,118 | py | # a = 1
# if a==0:
# print("a=0")
# else:
# print("a!0")
# """
# x>1 (3x-5)
# -1<=x<=1 (x+2)
# x < -1 (5x+3)
# """
# x = int(input("输入您的数字:"))
# if x > 1:
# print(3*x-5)
# else:
# if x >= -1:
# print(x + 2)
# else:
# print(5*x+3)
# 猜数字游戏
# import random
# computet_num = random.randint(1,100)
# while True:
# people_num = int(input("请输入您的数字:"))
# if people_num < computet_num:
# print("大一点")
# elif people_num > computet_num:
# print("小一点")
# else:
# print("猜对了")
# break
# def fun1(a,b,c):
# print("这是参数a:",a)
# print("这是参数b:",b)
# print("这是参数c:",c)
# fun1(1,23,4)
# def fun1(a):
# # return "ac"
# print("a")
# fun1("c")
# def fun1(a,b,c,d):
# print(a,b,c,d)
# fun1(10,13,d=13,c=90)
# fun1 = lambda x: x+10
# print(fun1(5))
# def fun1(x):
# return x+10
# print(fun1(5))
# fun1 = lambda x,y: x+y
# print(fun1(10,12))
list = ["ha"]
b = {"hah"}
c = "a"
print(type(list))
print(type(b))
print(type()) | [
"[email protected]"
] | |
67c3ac7edd41ead61f20b9de0717ebc0397b9688 | bf683eb4a6080cf67669de90d1afdad53fccb738 | /Lib/site-packages/django/contrib/gis/geoip2/base.py | b1035ac6ff31eecbe0e9d58d3948e23d7caa0892 | [
"MIT"
] | permissive | mspgeek/Client_Portal | cd513308840aa4203554ebc1160f17f0dd4b17cf | 0267168bb90e8e9c85aecdd715972b9622b82384 | refs/heads/master | 2023-03-07T21:33:22.767108 | 2020-04-08T01:43:19 | 2020-04-08T01:43:19 | 253,946,635 | 6 | 0 | MIT | 2022-12-31T07:01:43 | 2020-04-08T00:43:07 | HTML | UTF-8 | Python | false | false | 9,033 | py | import os
import socket
import geoip2.database
from django.conf import settings
from django.core.exceptions import ValidationError
from django.core.validators import validate_ipv46_address
from .resources import City, Country
# Creating the settings dictionary with any settings, if needed.
GEOIP_SETTINGS = {
'GEOIP_PATH': getattr(settings, 'GEOIP_PATH', None),
'GEOIP_CITY': getattr(settings, 'GEOIP_CITY', 'GeoLite2-City.mmdb'),
'GEOIP_COUNTRY': getattr(settings, 'GEOIP_COUNTRY', 'GeoLite2-Country.mmdb'),
}
class GeoIP2Exception(Exception):
pass
class GeoIP2:
# The flags for GeoIP memory caching.
# Try MODE_MMAP_EXT, MODE_MMAP, MODE_FILE in that order.
MODE_AUTO = 0
# Use the C extension with memory map.
MODE_MMAP_EXT = 1
# Read from memory map. Pure Python.
MODE_MMAP = 2
# Read database as standard file. Pure Python.
MODE_FILE = 4
# Load database into memory. Pure Python.
MODE_MEMORY = 8
cache_options = frozenset((MODE_AUTO, MODE_MMAP_EXT, MODE_MMAP, MODE_FILE, MODE_MEMORY))
# Paths to the city & country binary databases.
_city_file = ''
_country_file = ''
# Initially, pointers to GeoIP file references are NULL.
_city = None
_country = None
def __init__(self, path=None, cache=0, country=None, city=None):
"""
Initialize the GeoIP object. No parameters are required to use default
settings. Keyword arguments may be passed in to customize the locations
of the GeoIP datasets.
* path: Base directory to where GeoIP data is located or the full path
to where the city or country data files (*.mmdb) are located.
Assumes that both the city and country data sets are located in
this directory; overrides the GEOIP_PATH setting.
* cache: The cache settings when opening up the GeoIP datasets. May be
an integer in (0, 1, 2, 4, 8) corresponding to the MODE_AUTO,
MODE_MMAP_EXT, MODE_MMAP, MODE_FILE, and MODE_MEMORY,
`GeoIPOptions` C API settings, respectively. Defaults to 0,
meaning MODE_AUTO.
* country: The name of the GeoIP country data file. Defaults to
'GeoLite2-Country.mmdb'; overrides the GEOIP_COUNTRY setting.
* city: The name of the GeoIP city data file. Defaults to
'GeoLite2-City.mmdb'; overrides the GEOIP_CITY setting.
"""
# Checking the given cache option.
if cache in self.cache_options:
self._cache = cache
else:
raise GeoIP2Exception('Invalid GeoIP caching option: %s' % cache)
# Getting the GeoIP data path.
if not path:
path = GEOIP_SETTINGS['GEOIP_PATH']
if not path:
raise GeoIP2Exception('GeoIP path must be provided via parameter or the GEOIP_PATH setting.')
if not isinstance(path, str):
raise TypeError('Invalid path type: %s' % type(path).__name__)
if os.path.isdir(path):
# Constructing the GeoIP database filenames using the settings
# dictionary. If the database files for the GeoLite country
# and/or city datasets exist, then try to open them.
country_db = os.path.join(path, country or GEOIP_SETTINGS['GEOIP_COUNTRY'])
if os.path.isfile(country_db):
self._country = geoip2.database.Reader(country_db, mode=cache)
self._country_file = country_db
city_db = os.path.join(path, city or GEOIP_SETTINGS['GEOIP_CITY'])
if os.path.isfile(city_db):
self._city = geoip2.database.Reader(city_db, mode=cache)
self._city_file = city_db
elif os.path.isfile(path):
# Otherwise, some detective work will be needed to figure out
# whether the given database path is for the GeoIP country or city
# databases.
reader = geoip2.database.Reader(path, mode=cache)
db_type = reader.metadata().database_type
if db_type.endswith('City'):
# GeoLite City database detected.
self._city = reader
self._city_file = path
elif db_type.endswith('Country'):
# GeoIP Country database detected.
self._country = reader
self._country_file = path
else:
raise GeoIP2Exception('Unable to recognize database edition: %s' % db_type)
else:
raise GeoIP2Exception('GeoIP path must be a valid file or directory.')
@property
def _reader(self):
if self._country:
return self._country
else:
return self._city
@property
def _country_or_city(self):
if self._country:
return self._country.country
else:
return self._city.city
def __del__(self):
# Cleanup any GeoIP file handles lying around.
if self._reader:
self._reader.close()
def __repr__(self):
meta = self._reader.metadata()
version = '[v%s.%s]' % (meta.binary_format_major_version, meta.binary_format_minor_version)
return '<%(cls)s %(version)s _country_file="%(country)s", _city_file="%(city)s">' % {
'cls': self.__class__.__name__,
'version': version,
'country': self._country_file,
'city': self._city_file,
}
def _check_query(self, query, country=False, city=False, city_or_country=False):
"Check the query and database availability."
# Making sure a string was passed in for the query.
if not isinstance(query, str):
raise TypeError('GeoIP query must be a string, not type %s' % type(query).__name__)
# Extra checks for the existence of country and city databases.
if city_or_country and not (self._country or self._city):
raise GeoIP2Exception('Invalid GeoIP country and city data files.')
elif country and not self._country:
raise GeoIP2Exception('Invalid GeoIP country data file: %s' % self._country_file)
elif city and not self._city:
raise GeoIP2Exception('Invalid GeoIP city data file: %s' % self._city_file)
# Return the query string back to the caller. GeoIP2 only takes IP addresses.
try:
validate_ipv46_address(query)
except ValidationError:
query = socket.gethostbyname(query)
return query
def city(self, query):
"""
Return a dictionary of city information for the given IP address or
Fully Qualified Domain Name (FQDN). Some information in the dictionary
may be undefined (None).
"""
enc_query = self._check_query(query, city=True)
return City(self._city.city(enc_query))
def country_code(self, query):
"Return the country code for the given IP Address or FQDN."
enc_query = self._check_query(query, city_or_country=True)
return self.country(enc_query)['country_code']
def country_name(self, query):
"Return the country name for the given IP Address or FQDN."
enc_query = self._check_query(query, city_or_country=True)
return self.country(enc_query)['country_name']
def country(self, query):
"""
Return a dictionary with the country code and name when given an
IP address or a Fully Qualified Domain Name (FQDN). For example, both
'24.124.1.80' and 'djangoproject.com' are valid parameters.
"""
# Returning the country code and name
enc_query = self._check_query(query, city_or_country=True)
return Country(self._country_or_city(enc_query))
# #### Coordinate retrieval routines ####
def coords(self, query, ordering=('longitude', 'latitude')):
cdict = self.city(query)
if cdict is None:
return None
else:
return tuple(cdict[o] for o in ordering)
def lon_lat(self, query):
"Return a tuple of the (longitude, latitude) for the given query."
return self.coords(query)
def lat_lon(self, query):
"Return a tuple of the (latitude, longitude) for the given query."
return self.coords(query, ('latitude', 'longitude'))
def geos(self, query):
"Return a GEOS Point object for the given query."
ll = self.lon_lat(query)
if ll:
from django.contrib.gis.geos import Point
return Point(ll, srid=4326)
else:
return None
# #### GeoIP Database Information Routines ####
@property
def info(self):
"Return information about the GeoIP library and databases in use."
meta = self._reader.metadata()
return 'GeoIP Library:\n\t%s.%s\n' % (meta.binary_format_major_version, meta.binary_format_minor_version)
@classmethod
def open(cls, full_path, cache):
return GeoIP2(full_path, cache)
| [
"[email protected]"
] | |
e46760ca6a1ddba11d0537d234a557acc6dd3425 | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_216/ch25_2020_09_09_21_54_03_750638.py | ad2b06073c8d92f2380ee1a03f82efb712a43682 | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 229 | py | import math
v = float(input("Velocidade"))
a = float(input("Angulo"))
d = ((v**2) * math.sin(2*a))/9.8
if d <= 98:
print("Muito perto")
else:
if d >= 102:
print("Muito longe")
else:
print("Acertou!") | [
"[email protected]"
] | |
b18bc25f8220824e4dc95d1c7070d671cc8e4d5f | e2f9d506dcc3fee7dbbbce370c7e2c3f48275828 | /poc/merge-multiple-json-file/test.py | 13f57b73202835f5b6dd25006a623ec8932c627f | [
"MIT"
] | permissive | MacHu-GWU/s3splitmerge-project | d33829f1ff6aed9cc77c9b4bec30601ce4570f60 | 873892158f4a2d0ee20f291e5d3b2a80f0bae1ba | refs/heads/main | 2023-08-30T09:07:32.312453 | 2021-11-07T16:08:24 | 2021-11-07T16:08:24 | 394,803,306 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,238 | py | # -*- coding: utf-8 -*-
import io
import time
import boto3
from boto3.s3.transfer import TransferConfig
from icecream import ic
import awswrangler as wr
from datetime import datetime
import pandas as pd
from pathlib_mate import Path
boto_ses = boto3.session.Session()
s3_client = boto_ses.client("s3")
class Config:
bucket = "aws-data-lab-sanhe-aws-etl-solutions"
key_prefix = "s3splitmerge/poc/merge-multiple-json-file"
n_file = 3
n_records_per_file = 150000
bucket = "aws-data-lab-sanhe-aws-etl-solutions"
key_prefix = "s3splitmerge/poc/merge-multiple-json-file"
def create_test_data():
n_file = 3
n_records_per_file = 150000
columns = ["id", "value"]
value = "[email protected]"
for nth_file in range(1, 1+n_file):
start_id = (nth_file - 1) * n_records_per_file + 1
end_id = start_id + n_records_per_file
df = pd.DataFrame(columns=columns)
df["id"] = range(start_id, end_id)
df["value"] = value
wr.s3.to_json(
df=df,
path=f"s3://{bucket}/{key_prefix}/{nth_file}.json",
orient="records",
lines=True,
)
def merge_files():
KB = 1024
config = TransferConfig(multipart_threshold=1)
target_key = f"{key_prefix}/data.json"
response = s3_client.create_multipart_upload(
Bucket=bucket,
Key=target_key,
)
upload_id = response["UploadId"]
n_file = 3
s3_key_lst = [
f"{key_prefix}/{nth_file}.json"
for nth_file in range(1, 1+n_file)
]
parts = list()
for part_number, s3_key in enumerate(s3_key_lst):
part_number += 1
response = s3_client.upload_part_copy(
Bucket=bucket,
Key=target_key,
CopySource={"Bucket": bucket, "Key": s3_key},
PartNumber=part_number,
UploadId=upload_id,
)
etag = response["CopyPartResult"]["ETag"]
parts.append({"ETag": etag, "PartNumber": part_number})
s3_client.complete_multipart_upload(
Bucket=bucket,
Key=target_key,
MultipartUpload={"Parts": parts},
UploadId=upload_id
)
if __name__ == "__main__":
create_test_data()
merge_files()
pass
| [
"[email protected]"
] | |
932b2be3018c7e85db9ac36d0ef3868c1c8bc902 | 89521af529f155a2352003caddd4b5edd58a57a6 | /sale_invoice_plan/models/sale.py | 5010af3a87f864d3e5aff52079aa8eef1222c4d7 | [] | no_license | ecosoft-odoo/eco-addons | bd132d326c4af150f16dda7935af23d200e1e3df | cb0ebea2cb9a26945093e2a4036a0854b6fc89b2 | refs/heads/11.0 | 2021-07-15T03:22:37.875705 | 2019-02-23T08:32:59 | 2019-02-23T08:32:59 | 168,274,323 | 0 | 5 | null | 2020-07-17T09:15:20 | 2019-01-30T03:41:11 | Python | UTF-8 | Python | false | false | 6,745 | py | # License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
from dateutil.relativedelta import relativedelta
from odoo import models, fields, api, _
from odoo.exceptions import UserError
from odoo.addons import decimal_precision as dp
from odoo.tools.float_utils import float_round as round
class SaleOder(models.Model):
_inherit = 'sale.order'
invoice_plan_ids = fields.One2many(
comodel_name='sale.invoice.plan',
inverse_name='sale_id',
string='Inovice Plan',
copy=False,
)
use_invoice_plan = fields.Boolean(
string='Use Invoice Plan',
default=False,
copy=False,
)
@api.multi
def create_invoice_plan(self, num_installment, installment_date,
interval, interval_type, advance):
self.ensure_one()
self.invoice_plan_ids.unlink()
invoice_plans = []
if num_installment <= 1:
raise UserError(_('Number Installment must greater than 1'))
Decimal = self.env['decimal.precision']
prec = Decimal.precision_get('Product Unit of Measure')
percent = round(1.0 / num_installment * 100, prec)
percent_last = 100 - (percent * (num_installment-1))
# Advance
if advance:
vals = {'installment': 0, 'plan_date': installment_date,
'type': 'advance', 'percent': 0.0}
invoice_plans.append((0, 0, vals))
installment_date = self._next_date(installment_date,
interval, interval_type)
# Normal
for i in range(num_installment):
this_installment = i+1
if num_installment == this_installment:
percent = percent_last
vals = {'installment': this_installment,
'plan_date': installment_date,
'type': 'installment',
'percent': percent}
invoice_plans.append((0, 0, vals))
installment_date = self._next_date(installment_date,
interval, interval_type)
self.write({'invoice_plan_ids': invoice_plans})
return True
@api.multi
def remove_invoice_plan(self):
self.ensure_one()
self.invoice_plan_ids.unlink()
return True
@api.model
def _next_date(self, installment_date, interval, interval_type):
installment_date = fields.Date.from_string(installment_date)
if interval_type == 'month':
next_date = installment_date + relativedelta(months=+interval)
elif interval_type == 'year':
next_date = installment_date + relativedelta(years=+interval)
else:
next_date = installment_date + relativedelta(days=+interval)
next_date = fields.Date.to_string(next_date)
return next_date
@api.multi
def action_invoice_create(self, grouped=False, final=False):
inv_ids = super().action_invoice_create(grouped=grouped, final=final)
invoice_plan_id = self._context.get('invoice_plan_id')
if invoice_plan_id:
plan = self.env['sale.invoice.plan'].browse(invoice_plan_id)
invoices = self.env['account.invoice'].browse(inv_ids)
invoices.ensure_one() # Expect 1 invoice for 1 invoice plan
plan._compute_new_invoice_quantity(invoices[0])
plan.invoice_ids += invoices
return inv_ids
class SaleInvoicePlan(models.Model):
_name = 'sale.invoice.plan'
_order = 'installment'
sale_id = fields.Many2one(
comodel_name='sale.order',
string='Sales Order',
index=True,
readonly=True,
ondelete='cascade',
)
installment = fields.Integer(
string='Installment',
)
plan_date = fields.Date(
string='Plan Date',
required=True,
)
type = fields.Selection(
[('advance', 'Advance'),
('installment', 'Installment'), ],
string='Type',
required=True,
default='installment',
)
last = fields.Boolean(
string='Last Installment',
compute='_compute_last',
help="Last installment will create invoice use remaining amount",
)
percent = fields.Float(
string='Percent',
digits=dp.get_precision('Product Unit of Measure'),
help="This percent will be used to calculate new quantity"
)
invoice_ids = fields.Many2many(
'account.invoice',
relation="sale_invoice_plan_invoice_rel",
column1='plan_id', column2='invoice_id',
string='Invoices',
readonly=True,
)
to_invoice = fields.Boolean(
string='Next Invoice',
compute='_compute_to_invoice',
help="If this line is ready to create new invoice",
)
invoiced = fields.Boolean(
string='Invoice Created',
compute='_compute_invoiced',
help="If this line already invoiced",
)
_sql_constraint = [('unique_instalment',
'UNIQUE (sale_id, installment)',
'Installment must be unique on invoice plan')]
@api.multi
def _compute_to_invoice(self):
""" If any invoice is in draft/open/paid do not allow to create inv
Only if previous to_invoice is False, it is eligible to_invoice
"""
for rec in self.sorted('installment'):
rec.to_invoice = False
if rec.sale_id.state != 'sale': # Not confirmed, no to_invoice
continue
if not rec.invoiced:
rec.to_invoice = True
break
@api.multi
def _compute_invoiced(self):
for rec in self:
invoiced = rec.invoice_ids.filtered(
lambda l: l.state in ('draft', 'open', 'paid'))
rec.invoiced = invoiced and True or False
@api.multi
def _compute_last(self):
for rec in self:
last = max(rec.sale_id.invoice_plan_ids.mapped('installment'))
rec.last = rec.installment == last
@api.multi
def _compute_new_invoice_quantity(self, invoice):
self.ensure_one()
if self.last: # For last install, let the system do the calc.
return
percent = self.percent
for line in invoice.invoice_line_ids:
assert len(line.sale_line_ids) >= 0, \
'No matched order line for invoice line'
order_line = line.sale_line_ids[0]
if order_line.is_downpayment:
line.quantity = -percent/100 # Always based on 1 unit
else:
line.quantity = order_line.product_uom_qty * (percent/100)
invoice.compute_taxes()
| [
"[email protected]"
] | |
3759d19c7821225d7aff6f1f4cd1a6780d8444bb | d7390fea6c7f712ee32be6d3478835d965d795e0 | /py26_08day/task_08day.py | db5bc4485d6ca0abe1775d07e118dc8743cc7d00 | [] | no_license | luwenchun/Automated_Test | 2f424655d80127e3ed98657869021a775beca868 | 79b9937cfc0841b0a80d4fd45d8ff467654b5b55 | refs/heads/master | 2021-02-10T15:23:08.446463 | 2020-03-26T10:39:38 | 2020-03-26T10:39:38 | 244,393,626 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,430 | py | """
============================
Author:柠檬班-木森
Time:2019/10/7
E-mail:[email protected]
Company:湖南零檬信息技术有限公司
============================
"""
# 第一题
def mul_table():
for i in range(1, 10):
for j in range(1, i + 1):
print('{} * {} = {:<4}'.format(i,j,i*j),end="")
print()
mul_table()
# for i in range(1, 10):
# print()
# for j in range(1, i + 1):
# print('{}*{}={} '.format(i,j,i*j), end="")
# print()
# 第二题
def count_num():
count = 0
for a in range(1, 5):
for b in range(1, 5):
for c in range(1, 5):
if a != b and c != b and a != c:
print(a, b, c)
number = int('{}{}{}'.format(a,b,c))
print(number)
count += 1
print('一共有{}个'.format(count))
count_num()
# 第三题
def compute_number():
print('欢迎使用计算器')
a = int(input('请输入数字1:'))
b = int(input('请输入数字2:'))
print('功能提示:【1】加 【2】减【3】乘 【4】除')
num = input('请选择:')
if num == '1':
return a + b
elif num == '2':
return a - b
elif num == '3':
return a * b
elif num == '4':
return a / b
else:
print('没有此选项!')
res = compute_number()
print(res)
# 第四题
users = [{"name": "py01", "pwd": "123"},
{"name": "py02", "pwd": "123"},
{"name": "py03", "pwd": "123"},
{"name": "py04", "pwd": "123"}]
def register():
# 注册功能
username = input('请输入新账号:') # 输入账号
password1 = input('请输入密码:') # 输入密码
password2 = input('请再次确认密码:') # 再次确认密码
for user in users: # 遍历出所有账号,判断账号是否存在
if username == user['name']:
print('该账户已存在') # 账号存在,
break
else:
# 判断两次密码是否一致
if password1 != password2:
print('注册失败,两次输入的密码不一致')
else:
# 账号不存在 密码一样,则添加到账户列表中
users.append({'name': username, 'pwd': password2})
print('注册成功!')
register()
| [
"[email protected]"
] | |
8c810e7e7f2efde783f7790fd2f14511ddd35ac6 | 8ded89b0aff486337e17ddd710eca15b8450a015 | /first.py | 25f35fa4facb43418c0376e78efcdf8fc5547efa | [] | no_license | svetlyak40wt/moscow-python-confpp-2021 | 2f99881efce9e41f0b281bd9f16d0611025ac684 | d0b7ce93ac24d0c681697eb17703e975d15fdb27 | refs/heads/master | 2023-08-04T07:53:23.776076 | 2021-09-20T09:28:19 | 2021-09-20T09:28:19 | 406,925,502 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 157 | py | def load_ipython_extension(ipython):
print('Loading "first" extension')
def unload_ipython_extension(ipython):
print('Unloading "first" extension')
| [
"[email protected]"
] | |
b3603edcdd2487b22caaac500e8b836d86c87e51 | d01670aa5bddb47dc414bf01921155610e2a5070 | /leetcode/091_decodeways.py | 66c865541c4fc56aa6acd0de248dfcfb71736389 | [] | no_license | hwillmott/csfundamentals | 14c7e4253b581cef7046ca035bda038c24a52613 | 832f6a8c0deb0569d3fe0dc03e4564c2d850f067 | refs/heads/master | 2020-08-01T12:27:01.914391 | 2020-03-26T16:47:35 | 2020-03-26T16:47:35 | 73,576,522 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 501 | py | class Solution(object):
def numDecodings(self, s):
"""
:type s: str
:rtype: int
"""
if len(s) == 0: return 0
if len(s) == 1: return 0 if s[0] == "0" else 1
dp = [0]*(len(s)+1)
dp[0] = 1
dp[1] = 0 if s[1] == "0" else 1
s = "0" + s
for i in range(2,len(s)):
if s[i] == 0: continue
dp[i] = dp[i-1] + dp[i-2] if int(s[i-2:i+1]) <= 26 else dp[i-1]
return dp[len(s)-1] | [
"[email protected]"
] | |
1b01b4d4b97db401bb32399f6d99e33daa724450 | ba90cb8a089d38de2c6d63bf65e9bf556731d5c6 | /Projeto1/aplicacaoR.py | 026fceaa757598cd22376291ab7299d7ce43933c | [] | no_license | luizasilveira/Camada_fisica | edc96288250b155b4d68156cf2a9ec638a3cb8c8 | 98c9f6c04d293abaa450c080403d3f25db9c4ef4 | refs/heads/master | 2020-07-05T21:00:33.782301 | 2019-10-31T18:50:12 | 2019-10-31T18:50:12 | 202,774,216 | 0 | 0 | null | 2019-09-05T17:33:14 | 2019-08-16T17:46:54 | Python | UTF-8 | Python | false | false | 1,966 | py | #!/usr/bin/env python3
# -- coding: utf-8 --
#####################################################
# Camada Física da Computação
#Carareto
#17/02/2018
# Aplicação
####################################################
print("comecou")
from enlace import *
import time
# Serial Com Port
# para saber a sua porta, execute no terminal :
# python -m serial.tools.list_ports
#serialName = "/dev/ttyACM0" # Ubuntu (variacao de)
serialName = "/dev/cu.usbmodem146201" # Mac (variacao de)
#serialName = "COM5" # Windows(variacao de)
print("abriu com")
def main():
# Inicializa enlace ... variavel com possui todos os metodos e propriedades do enlace, que funciona em threading
com = enlace(serialName) # repare que o metodo construtor recebe um string (nome)
# Ativa comunicacao
com.enable()
# Log
print("-------------------------")
print("Comunicação inicializada")
print(" porta : {}".format(com.fisica.name))
print("-------------------------")
# Faz a recepção dos dados
print ("Recebendo dados .... ")
bufferReceived = bytearray()
while True:
rxBuffer, nRx = com.getData(1)
bufferReceived += rxBuffer
if (b"end" in bufferReceived):
break
imgSize = bufferReceived[:-3]
rxBuffer, nRx = com.getData(int(imgSize))
txLen = len(rxBuffer)
with open("teste.jpg", "wb") as img:
img.write(rxBuffer)
print ("Recebidos {} bytes ".format(txLen))
com.sendData(imgSize)
print ("Transmitido {} bytes ".format(len(imgSize)))
while(com.tx.getIsBussy()):
pass
# Encerra comunicação
print("-------------------------")
print("Comunicação encerrada")
print("-------------------------")
com.disable()
#so roda o main quando for executado do terminal ... se for chamado dentro de outro modulo nao roda
if __name__ == "__main__":
main() | [
"[email protected]"
] | |
60fba09736c5b2b4834cc41097c27d5db61e411f | 163bbb4e0920dedd5941e3edfb2d8706ba75627d | /Code/CodeRecords/2486/60793/268658.py | cb6f125c3651fd7219133d58cda5cf81b8bf149a | [] | no_license | AdamZhouSE/pythonHomework | a25c120b03a158d60aaa9fdc5fb203b1bb377a19 | ffc5606817a666aa6241cfab27364326f5c066ff | refs/heads/master | 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 318 | py | ls = []
for test in range(0, int(input())):
ls.append(input())
if ls == []:
print()
print()
elif ls == []:
print()
print()
elif ls == []:
print()
print()
elif ls == []:
print()
print()
elif ls == []:
print()
print()
elif ls == []:
print()
print()
else:
print(ls) | [
"[email protected]"
] | |
ab109fe310c0a92a3c5a6c5d7cc674974201c387 | 84a19fe0b89bb19caa1641aeadc9623c1a181767 | /arc/078/d.py | 0ff00fea6d789210dda2f0e596992697ebc2dad2 | [
"MIT"
] | permissive | wotsushi/competitive-programming | 75abae653cff744189c53ad7e6dbd2ca1a62e3a8 | 17ec8fd5e1c23aee626aee70b1c0da8d7f8b8c86 | refs/heads/master | 2021-06-10T06:42:40.846666 | 2021-05-31T10:32:51 | 2021-05-31T10:32:51 | 175,002,279 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 970 | py | from heapq import heappush, heappop
# 入力
N = int(input())
a, b = (
zip(*(map(int, input().split()) for _ in range(N - 1))) if N - 1 else
((), ())
)
# 頂点1, N から各蝶点への距離を求める
G = [{} for _ in range(N + 1)]
for x, y in zip(a, b):
G[x][y] = 1
G[y][x] = 1
INF = 10**10
def dijkstra(G, s):
dp = [INF for _ in range(len(G))]
q = []
heappush(q, (0, s))
while q:
c, i = heappop(q)
if dp[i] == INF:
dp[i] = c
for j, w in G[i].items():
heappush(q, (c + w, j))
return dp
dp1 = dijkstra(G, 1)
dpN = dijkstra(G, N)
# 頂点Nより頂点1のほうが近い頂点、または、頂点1と頂点Nとの距離が等しい頂点は
# 頂点1から頂点Nの間のパスに含まれる頂点のうち、Fennecが塗れる頂点である。
ans = (
'Fennec' if sum(dp1[i] <= dpN[i] for i in range(1, N + 1)) > N // 2 else
'Snuke'
)
# 出力
print(ans)
| [
"[email protected]"
] | |
bd9820da489ae49e89dde62f758b29cba5318c2b | 7997a9581ac4badc53793e32bc85878b8e16094e | /breaklines2dxf.py | 8f168312c1ed8ca374fbae7ab17271d31999fc5e | [] | no_license | jasonfleming/pputils | 78adb832c5860a37e1473a43a5b9a54ad4d5ce55 | 2b34e47e4c3331d5780019d248e3f37c71164665 | refs/heads/master | 2022-09-29T13:40:38.657005 | 2022-08-10T19:40:06 | 2022-08-10T19:40:06 | 79,500,881 | 1 | 0 | null | 2017-01-19T22:17:09 | 2017-01-19T22:17:09 | null | UTF-8 | Python | false | false | 2,917 | py | #!/usr/bin/env python3
#
#+!+!+!+!+!+!+!+!+!+!+!+!+!+!+!+!+!+!+!+!+!+!+!+!+!+!+!+!+!+!+!+!+!+!+!+!
# #
# breaklines2dxf.py #
# #
#+!+!+!+!+!+!+!+!+!+!+!+!+!+!+!+!+!+!+!+!+!+!+!+!+!+!+!+!+!+!+!+!+!+!+!+!
#
# Author: Pat Prodanovic, Ph.D., P.Eng.
#
# Date: Sept 12, 2015
#
# Modified: Feb 20, 2016
# Made it work for python 2 and 3
#
# Purpose: Takes a pputils 3d breakline and exports it to dxf format.
# To create the 3d breakline from xyz and lines.csv, run mkbreakline.py
#
# Uses: Python 2 or 3, Numpy
#
# Example:
#
# python breaklines2dxf.py -l lines3d.csv -o lines3d.dxf
#
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Global Imports
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
import os,sys # system parameters
import numpy as np # numpy
from dxfwrite import DXFEngine as dxf # for dxf export
from progressbar import ProgressBar, Bar, Percentage, ETA
curdir = os.getcwd()
#
# I/O
if len(sys.argv) == 5 :
dummy2 = sys.argv[1]
lines_file = sys.argv[2]
dummy3 = sys.argv[3]
output_file = sys.argv[4]
else:
print('Wrong number of Arguments, stopping now...')
print('Usage:')
print('python breaklines2dxf.py -l lines3d.csv -o lines3d.dxf')
sys.exit()
# to create the output file
drawing = dxf.drawing(output_file)
#fout = open(output_file,"w")
# use numpy to read the file
# each column in the file is a row in data read by np.loadtxt method
lines_data = np.loadtxt(lines_file, delimiter=',',skiprows=0,unpack=True)
shapeid_lns = lines_data[0,:]
x_lns = lines_data[1,:]
y_lns = lines_data[2,:]
z_lns = lines_data[3,:]
# round lines nodes to three decimals
x_lns = np.around(x_lns,decimals=3)
y_lns = np.around(y_lns,decimals=3)
z_lns = np.around(z_lns,decimals=3)
# finds out how many unique breaklines there are
n_unique_lns = np.unique(shapeid_lns)
# number of nodes in the lines file
n_lns = len(x_lns)
w = [Percentage(), Bar(), ETA()]
pbar = ProgressBar(widgets=w, maxval=n_lns).start()
# write the breaklines
poly = dxf.polyline()
for i in range(0,n_lns):
pbar.update(i+1)
if (i>0):
cur_lns_shapeid = shapeid_lns[i]
prev_lns_shapeid = shapeid_lns[i-1]
if (cur_lns_shapeid - prev_lns_shapeid < 0.001):
# create tupples for vertexes to add
v0 = (x_lns[i-1], y_lns[i-1], z_lns[i-1])
v1 = (x_lns[i], y_lns[i], z_lns[i])
poly.add_vertices( [v0, v1] )
# this is needed, as the else below is never executed
# for the last line in the lines file!
if (i == n_lns-1):
drawing.add(poly)
else:
drawing.add(poly)
poly = dxf.polyline()
############################################################################
drawing.save()
pbar.finish()
| [
"[email protected]"
] | |
2283913c20c0a5855e4b54e307d09da3b38a5e03 | 28a462a28f443c285ca5efec181ebe36b147c167 | /tests/compile/basic/es2019/IsInteger.spec | a3eeb6b9762f1cd246c58bd8d2323452533ffde5 | [
"BSD-3-Clause",
"BSD-2-Clause"
] | permissive | kaist-plrg/jstar | 63e71f9156860dc21cccc33a9f6c638dfee448ea | 1282919127ea18a7e40c7a55e63a1ddaaf7d9db4 | refs/heads/main | 2022-07-22T08:12:34.947712 | 2022-02-27T04:19:33 | 2022-02-27T11:06:14 | 384,045,526 | 6 | 4 | NOASSERTION | 2022-02-27T11:05:26 | 2021-07-08T07:53:21 | Python | UTF-8 | Python | false | false | 231 | spec | 1. If Type(_argument_) is not Number, return *false*.
1. If _argument_ is *NaN*, *+∞*, or *-∞*, return *false*.
1. If floor(abs(_argument_)) ≠ abs(_argument_), return *false*.
1. Return *true*. | [
"[email protected]"
] | |
457a65493ed4ce617c56aab78df164c4dfc8fce9 | 24c0f121340a2a2df4490ed2c40057170b131087 | /words/admin.py | be801cfd5893ebd14272dd0ea963be5109b4ba4f | [] | no_license | Rediet8abere/wordplay | 81bcb7dfde11aa6d850aa7ae27ada2953b3c81a3 | bb366a0d003d0efa91a1e5bb2ca754d3b24642ec | refs/heads/master | 2022-12-12T18:31:22.046606 | 2020-01-08T17:01:03 | 2020-01-08T17:01:03 | 224,571,086 | 0 | 0 | null | 2022-12-08T01:23:19 | 2019-11-28T04:40:43 | Python | UTF-8 | Python | false | false | 88 | py | from django.contrib import admin
from .models import Words
admin.site.register(Words)
| [
"[email protected]"
] | |
8a92c87c32c27752fd7b041c88469c7df129a667 | 70b339d0b2638a7914d0d56c5edf8a2637c9f4b0 | /maxSideLength.py | cddba5c45c0fad94452c6e6bf97e8ce6d0442861 | [] | no_license | pflun/advancedAlgorithms | 9991da7514024e18ba08de8688966b9220e12571 | 5520dbcd26999b98e1229bf03c2f62dd690a2ddc | refs/heads/master | 2023-02-19T12:05:26.902535 | 2023-02-14T06:08:54 | 2023-02-14T06:08:54 | 189,055,701 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,253 | py | class Solution(object):
def maxSideLength(self, mat, threshold):
res = 0
sum = [[0 for _ in range(len(mat[0]) + 1)] for _ in range(len(mat) + 1)]
for i in range(1, len(mat) + 1):
for j in range(1, len(mat[0]) + 1):
sum[i][j] = int(mat[i - 1][j - 1]) + sum[i - 1][j] + sum[i][j - 1] - sum[i - 1][j - 1]
for i in range(1, len(mat) + 1):
for j in range(1, len(mat[0]) + 1):
for k in range(1, min(len(mat) - i + 1, len(mat[0]) - j + 1)):
# large square - two rectangle + small square
tmp = sum[i + k - 1][j + k - 1] - sum[i + k - 1][j - 1] - sum[i - 1][j + k - 1] + sum[i - 1][j - 1]
print k, tmp
if tmp > threshold:
break
else:
res = max(res, k)
return res
test = Solution()
print test.maxSideLength([[1,1,3,2,4,3,2],[1,1,3,2,4,3,2],[1,1,3,2,4,3,2]], 4)
print test.maxSideLength([[2,2,2,2,2],[2,2,2,2,2],[2,2,2,2,2],[2,2,2,2,2],[2,2,2,2,2]], 1)
print test.maxSideLength([[1,1,1,1],[1,0,0,0],[1,0,0,0],[1,0,0,0]], 6)
print test.maxSideLength([[18,70],[61,1],[25,85],[14,40],[11,96],[97,96],[63,45]], 40184) | [
"[email protected]"
] | |
3805526ca3074737c6cb1a415f59d1e0594a8a86 | 45ca434bdb9e48fdbb2cda0e7fdd9a76474117b0 | /aliyun-python-sdk-cdn/aliyunsdkcdn/request/v20141111/SetOptimizeConfigRequest.py | 19d77da4c23b5c17673b622254d3d165edf91520 | [
"Apache-2.0"
] | permissive | wanyanzhenjiang/aliyun-openapi-python-sdk | e41e9937ad3f851e5a58f6bea95663e88f7fee13 | 4a5bf1b35f2395d047ead4444ea46721976bdd24 | refs/heads/master | 2020-12-30T10:37:55.789911 | 2017-07-27T06:55:15 | 2017-07-27T06:55:15 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,659 | py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
class SetOptimizeConfigRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Cdn', '2014-11-11', 'SetOptimizeConfig')
def get_OwnerId(self):
return self.get_query_params().get('OwnerId')
def set_OwnerId(self,OwnerId):
self.add_query_param('OwnerId',OwnerId)
def get_SecurityToken(self):
return self.get_query_params().get('SecurityToken')
def set_SecurityToken(self,SecurityToken):
self.add_query_param('SecurityToken',SecurityToken)
def get_DomainName(self):
return self.get_query_params().get('DomainName')
def set_DomainName(self,DomainName):
self.add_query_param('DomainName',DomainName)
def get_Enable(self):
return self.get_query_params().get('Enable')
def set_Enable(self,Enable):
self.add_query_param('Enable',Enable) | [
"[email protected]"
] | |
089f41da2f0384caa4f7439752ccbadea77a3cf2 | e15294825647bb904a32703bc4e7b9008d094710 | /services/recognize-text/textrecognizer/service.py | eabc4901eb16c2c6ad0cbbf6fa22b332c3352ea4 | [
"MIT"
] | permissive | seekersapp2013/aleph | 44494ea7c8c02df4f74a817ae2cf901a9a13f099 | 2c4767da3c75d38d8ea39769559a06a1f29390a8 | refs/heads/master | 2020-06-14T01:35:31.328619 | 2019-07-01T15:01:59 | 2019-07-01T15:01:59 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,073 | py | import grpc
import time
import logging
from threading import RLock
from concurrent import futures
from servicelayer.rpc.ocr_pb2 import Image
from servicelayer.rpc.common_pb2 import Text
from servicelayer.rpc.ocr_pb2_grpc import RecognizeTextServicer
from servicelayer.rpc.ocr_pb2_grpc import add_RecognizeTextServicer_to_server
from textrecognizer.recognize import OCR, PSM
log = logging.getLogger('service')
class OCRServicer(RecognizeTextServicer):
MODES = {
Image.PAGE: PSM.AUTO_OSD,
Image.WORD: PSM.SINGLE_WORD,
Image.CHARACTER: PSM.SINGLE_CHAR
}
def __init__(self):
self.lock = RLock()
self.ocr = OCR()
def Recognize(self, image, context):
# acquired = self.lock.acquire(blocking=False)
# if acquired is False:
# context.set_code(grpc.StatusCode.RESOURCE_EXHAUSTED)
# context.set_details('OCR engine is busy.')
# return Text()
try:
mode = self.MODES.get(image.mode, PSM.AUTO_OSD)
text = self.ocr.extract_text(image.data,
mode=mode,
languages=image.languages)
return Text(text=text)
except Exception as exc:
log.exception("Failed OCR.")
self.ocr.clear_engine()
context.abort(grpc.StatusCode.INTERNAL, str(exc))
# finally:
# self.lock.release()
def serve(port):
options = [('grpc.max_receive_message_length', 20 * 1024 * 1024)]
executor = futures.ThreadPoolExecutor(max_workers=4)
server = grpc.server(executor, options=options)
add_RecognizeTextServicer_to_server(OCRServicer(), server)
server.add_insecure_port(port)
server.start()
log.info("Server started: %s", port)
try:
while True:
time.sleep(84600)
except KeyboardInterrupt:
server.stop(60)
if __name__ == '__main__':
logging.basicConfig(level=logging.DEBUG)
logging.getLogger('PIL').setLevel(logging.INFO)
serve('[::]:50000')
| [
"[email protected]"
] | |
c58e47f6987edea2fd07cc9f63d9d6781e4a2217 | e6c65e2e354336a4bea5b6a4ccbccd3682915fe2 | /out-bin/py/google/fhir/models/model_test.runfiles/pypi__numpy_1_15_4/numpy/core/tests/test_scalarmath.py | 7edf9eee8a8e442e4d1620267a1d1d9b7627cebe | [
"Apache-2.0"
] | permissive | rasalt/fhir-datalab | c30ab773d84983dd04a37e9d0ddec8bf2824b8a4 | 3e329fc8b4226d3e3a4a7c23c306a86e7a9ea0de | refs/heads/master | 2021-10-09T05:51:04.593416 | 2018-12-21T18:11:03 | 2018-12-22T05:38:32 | 162,744,237 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 140 | py | /home/rkharwar/.cache/bazel/_bazel_rkharwar/0ddaa3627472ad9d1367a008236ce2f5/external/pypi__numpy_1_15_4/numpy/core/tests/test_scalarmath.py | [
"[email protected]"
] | |
abb229e9e35ef546af8d18066a68b81643982096 | 48832d27da16256ee62c364add45f21b968ee669 | /res_bw/scripts/common/lib/plat-sunos5/sunaudiodev.py | 0a3efa9bcb6cdc1e4b813c180119e8e06ce79f19 | [] | no_license | webiumsk/WOT-0.9.15.1 | 0752d5bbd7c6fafdd7f714af939ae7bcf654faf7 | 17ca3550fef25e430534d079876a14fbbcccb9b4 | refs/heads/master | 2021-01-20T18:24:10.349144 | 2016-08-04T18:08:34 | 2016-08-04T18:08:34 | 64,955,694 | 0 | 0 | null | null | null | null | WINDOWS-1250 | Python | false | false | 776 | py | # 2016.08.04 20:01:31 Střední Evropa (letní čas)
# Embedded file name: scripts/common/Lib/plat-sunos5/SUNAUDIODEV.py
from warnings import warnpy3k
warnpy3k('the SUNAUDIODEV module has been removed in Python 3.0', stacklevel=2)
del warnpy3k
ENCODING_NONE = 0
ENCODING_ULAW = 1
ENCODING_ALAW = 2
ENCODING_LINEAR = 3
MIN_GAIN = 0
MAX_GAIN = 255
LEFT_BALANCE = 0
MID_BALANCE = 32
RIGHT_BALANCE = 64
BALANCE_SHIFT = 3
PORT_A = 1
PORT_B = 2
PORT_C = 3
PORT_D = 4
SPEAKER = 1
HEADPHONE = 2
LINE_OUT = 4
MICROPHONE = 1
LINE_IN = 2
CD = 4
INTERNAL_CD_IN = CD
# okay decompyling c:\Users\PC\wotsources\files\originals\res_bw\scripts\common\lib\plat-sunos5\sunaudiodev.pyc
# decompiled 1 files: 1 okay, 0 failed, 0 verify failed
# 2016.08.04 20:01:31 Střední Evropa (letní čas)
| [
"[email protected]"
] | |
e5f5f2ed9f2ca264930ccfbd5c09c43a13883401 | ee974d693ca4c4156121f8cb385328b52eaac07c | /env/lib/python3.6/site-packages/h5py/_hl/datatype.py | d692af241f56461c8de827c823faa1554b264d1b | [] | no_license | ngonhi/Attendance_Check_System_with_Face_Recognition | f4531cc4dee565d0e45c02217f73f3eda412b414 | 92ff88cbc0c740ad48e149033efd38137c9be88d | refs/heads/main | 2023-03-12T07:03:25.302649 | 2021-02-26T15:37:33 | 2021-02-26T15:37:33 | 341,493,686 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 129 | py | version https://git-lfs.github.com/spec/v1
oid sha256:ecdc9625758fdf18b86e1432c4f0eaf840a3df0001c5c84f83bc420c7ac598f4
size 1652
| [
"Nqk180998!"
] | Nqk180998! |
da395f369c351d53a3417bc633bb6b772db67d10 | f9273a89fa79c74efb931787dd5bf6d899730adb | /Python_file/python_westos_teacher_doc/day22/03_实现最简单的web开发.py | fbc24a7213ca41949ae3194548593f2a7405637a | [] | no_license | lianlian-YE/Mypycharm | 53148a58c6cbc07604abff801fd22b2a3212c6e7 | 004612a52300a6a99fbeedefa14ece0eeaf96556 | refs/heads/master | 2020-04-13T22:43:01.650018 | 2019-01-14T15:15:53 | 2019-01-14T15:15:53 | 163,486,739 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,270 | py | """
目标:
Flask应用的基本构成?
"""
# 1. 导入Flask类;
from flask import Flask, render_template
# 2. 实例化Flaks类。 生成一个实例;
# __name__结果是__main__或者模块名/包名, 根据这个参数确定项目的位置,(确定该项目的静态文件或者模板的位置);
app = Flask(__name__)
# 3. 通过路由绑定处理的视图函数;
# URL: (eg:http://127.0.0.1:5000/ )
# 装饰器@app.route()告诉Flask哪个url才能触发装饰器装饰的函数, 这个又专业的称为路由;
# 定义的函数hello, return后面的返回值是想要显示在浏览器上的内容;
@app.route('/')
def hello():
return "<h1 style='color:red'>hello python!</h1><br/><a href='/westos/'>西部开源技术中心</a>"
@app.route('/westos/')
def westos():
# 如何在flask程序中返回一个html页面;flask默认查找页面内容的位置为templates目录;
return render_template('westos.html')
if __name__ == "__main__":
# 4. 运行flask应用,
# 默认端口是5000, 如果想要修改端口,传递参数port=xxx;
# 默认情况下该web程序只能在本机浏览器访问, 如果想要其他主机访问, 指定host="0.0.0.0"
app.run(host='0.0.0.0', port=9000)
| [
"[email protected]"
] | |
bb7edc05ec8515b7ec7187ce1fed24a80cdd19ee | 7c7c3a34b266e664cf63f710ae5aff5587672c91 | /TutorialSeries/Pandas/Outlier Detection.py | 2efd45237f8e7a3ffec7defd1e29d09ed123a69a | [] | no_license | Schnei1811/PythonScripts | 845594a886a1fecc81cf5d7c550abec325f006a3 | 89eb331357b7cea86f5b2d0b33089215b73f9481 | refs/heads/main | 2023-03-31T23:36:48.731570 | 2021-04-11T21:55:46 | 2021-04-11T21:55:46 | 356,950,318 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 668 | py | import pandas as pd
import matplotlib.pyplot as plt
from matplotlib import style
style.use('fivethirtyeight')
bridge_height = {'meters':[10.26, 10.31, 10.27, 10.22, 10.23, 6212.42, 10.28, 10.25, 10.31]}
df = pd.DataFrame(bridge_height)
df['STD'] = pd.rolling_std(df['meters'], 2)
print(df)
df_std = df.describe()
print(df_std)
df_std = df.describe()['meters']['std']
print(df_std)
df = df[ (df['STD'] < df_std) ]
print(df)
'''
df is equal now to df, where df['STD'] is less than the overall df_std that we calculated before.
Thus, the only remaining Data here will be Data where the standard deviation is less than that 2067.
'''
df['meters'].plot()
plt.show() | [
"[email protected]"
] | |
b350d005f125d2893bb96db7385f592cd56d8246 | 74912c10f66e90195bf87fd71e9a78fa09f017ec | /execroot/syntaxnet/bazel-out/local-opt/bin/syntaxnet/text_formats_test.runfiles/org_tensorflow/tensorflow/contrib/slim/python/slim/data/data_provider.py | 21a5295b1d8d173a3f752de142703231d27b73a6 | [] | no_license | koorukuroo/821bda42e7dedbfae9d936785dd2d125- | 1f0b8f496da8380c6e811ed294dc39a357a5a8b8 | 237fcc152ff436f32b2b5a3752a4181d279b3a57 | refs/heads/master | 2020-03-17T03:39:31.972750 | 2018-05-13T14:35:24 | 2018-05-13T14:35:24 | 133,244,956 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 145 | py | /root/.cache/bazel/_bazel_root/821bda42e7dedbfae9d936785dd2d125/external/org_tensorflow/tensorflow/contrib/slim/python/slim/data/data_provider.py | [
"k"
] | k |
4dc4c8e77c4b1dbe4795c4926c3672a5c19aeadf | c537ce53f435f1c17d5fdbfe8f97405d0fb3f4f3 | /src/apps/utils/fields.py | ae1a5e895451ee1b1590f2bdd8f4023874c796e4 | [] | no_license | HiPiH/life | dbe8892ceb9cc4aaaf6409ffb8391b4903be7fdc | 8b0cd5906bd5c508776831368896fc935c9e044b | refs/heads/master | 2021-01-10T21:33:17.478873 | 2011-12-21T05:13:55 | 2011-12-21T05:13:55 | 2,938,055 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 296 | py | from django.db import models
__all__ = ('BigIntegerField', )
class BigIntegerField(models.IntegerField):
empty_strings_allowed=False
def get_internal_type(self):
return "BigIntegerField"
def db_type(self):
return 'bigint' # Note this won't work with Oracle. | [
"[email protected]"
] | |
664e32a8982f250cb5211834d2f57d9b1dc032f0 | 79b0a4d3db7299814b963a8ff98732a6df4fe5f8 | /worker.py | a87844e6fa3d49823247f731fe051eba67a746a8 | [] | no_license | jattoabdul/Ranti-bot | 549b28cf84b47d52a767a84e759e1f66c2fee860 | 6673778ed610c2b331e2da7d8348a798b122cb35 | refs/heads/master | 2022-12-14T03:24:32.191750 | 2018-06-05T12:04:08 | 2018-06-05T12:04:08 | 136,028,737 | 8 | 6 | null | 2021-06-01T22:12:37 | 2018-06-04T13:24:25 | Python | UTF-8 | Python | false | false | 232 | py | from app.actions import Actions
from app.utils.slackhelper import SlackHelper
# Main function
def main():
slackhelper = SlackHelper()
actions = Actions(slackhelper)
actions.notify_channel()
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
6e772685c7255e17b1203cec46dc6e0be930ed40 | 315788ed9c3727acca394ad107b0a55285a7ddc4 | /listings/v5_ra9.py | d0b7aacba374b0a4b256ffe2a6c38a0b987bb53a | [] | no_license | n04hk/Python_Zusammenfassung | b118e967d5d5547ad3eb88f9570cb7c9de45d443 | 923fadb28ab4609450e532f08de41dc4bf4913d1 | refs/heads/master | 2020-04-24T20:28:43.656148 | 2019-04-28T13:52:31 | 2019-04-28T13:52:31 | 172,245,211 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 159 | py | def func(m):
return '(' + m.group() + ')'
s = re.sub(r'\d+', func, '3 Stuecke kosten 250 Franken.')
print(s)
# Ausgabe: (3) Stuecke kosten (250) Franken.
| [
"[email protected]"
] | |
07e216c632b6520fb95391258bf9ab1b25475733 | 762742b3c5cb5706e93e12dbdc3f8c46fc65f0db | /Packs/ML/Scripts/DBotPredictOutOfTheBoxV2/DBotPredictOutOfTheBoxV2.py | adc01d9712e96b7678df42b5718f5d43aecb9715 | [
"MIT"
] | permissive | EmersonElectricCo/content | 018f95f7fe7de13819e093a3661587a18407e348 | 82c82bbee7d428f0b14991a88c67672e2c02f5af | refs/heads/master | 2021-06-17T04:54:22.938033 | 2021-05-06T16:39:59 | 2021-05-06T16:39:59 | 161,693,191 | 2 | 0 | MIT | 2018-12-18T15:16:49 | 2018-12-13T20:47:26 | Python | UTF-8 | Python | false | false | 4,087 | py | # pylint: disable=no-member
import demisto_ml
from CommonServerPython import *
import traceback
TARGET_PRECISION = 0.97
THRESHOLD = 0.9
OUT_OF_THE_BOX_MODEL_NAME = 'demisto_out_of_the_box_model_v2'
OUT_OF_THE_BOX_MODEL_PATH = '/ml/encrypted_model.b'
EVALUATION_PATH = '/ml/oob_evaluation.txt'
SCRIPT_MODEL_VERSION = '1.0'
OOB_VERSION_INFO_KEY = 'oob_version'
def oob_model_exists_and_updated():
res_model = demisto.executeCommand("getMLModel", {"modelName": OUT_OF_THE_BOX_MODEL_NAME})[0]
if is_error(res_model):
return False
existing_model_version = res_model['Contents']['model']['extra'].get(OOB_VERSION_INFO_KEY, -1)
return existing_model_version == SCRIPT_MODEL_VERSION
def load_oob_model():
try:
encoded_model = demisto_ml.load_oob(OUT_OF_THE_BOX_MODEL_PATH)
except Exception:
return_error(traceback.format_exc())
res = demisto.executeCommand('createMLModel', {'modelData': encoded_model.decode('utf8'),
'modelName': OUT_OF_THE_BOX_MODEL_NAME,
'modelLabels': ['Malicious', 'Non-Malicious'],
'modelOverride': 'true',
'modelType': 'torch',
'modelExtraInfo': {'threshold': THRESHOLD,
OOB_VERSION_INFO_KEY: SCRIPT_MODEL_VERSION
}
})
if is_error(res):
return_error(get_error(res))
with open(EVALUATION_PATH, 'r') as json_file:
data = json.load(json_file)
y_test = data['YTrue']
y_pred = data['YPred']
y_pred_prob = data['YPredProb']
y_pred_evaluation = [{pred: prob} for pred, prob in zip(y_pred, y_pred_prob)]
res = demisto.executeCommand('GetMLModelEvaluation', {'yTrue': json.dumps(y_test),
'yPred': json.dumps(y_pred_evaluation),
'targetPrecision': str(0.85),
'targetRecall': str(0),
'detailedOutput': 'true'
})
if is_error(res):
return_error(get_error(res))
confusion_matrix = json.loads(res[0]['Contents']['csr_matrix_at_threshold'])
confusion_matrix_no_all = {k: v for k, v in confusion_matrix.items() if k != 'All'}
confusion_matrix_no_all = {k: {sub_k: sub_v for sub_k, sub_v in v.items() if sub_k != 'All'}
for k, v in confusion_matrix_no_all.items()}
res = demisto.executeCommand('evaluateMLModel',
{'modelConfusionMatrix': confusion_matrix_no_all,
'modelName': OUT_OF_THE_BOX_MODEL_NAME,
'modelEvaluationVectors': {'Ypred': y_pred,
'Ytrue': y_test,
'YpredProb': y_pred_prob
},
'modelConfidenceThreshold': THRESHOLD,
'modelTargetPrecision': TARGET_PRECISION
})
if is_error(res):
return_error(get_error(res))
def predict_phishing_words():
if not oob_model_exists_and_updated():
load_oob_model()
dargs = demisto.args()
dargs['modelName'] = OUT_OF_THE_BOX_MODEL_NAME
res = demisto.executeCommand('DBotPredictPhishingWords', dargs)
if is_error(res):
return_error(get_error(res))
return res
def main():
res = predict_phishing_words()
return res
if __name__ in ['__main__', '__builtin__', 'builtins']:
demisto.results(main())
| [
"[email protected]"
] | |
be236401a50222b0114e1126ee9946d74187e9dd | 3cdb4faf34d8375d6aee08bcc523adadcb0c46e2 | /web/env/lib/python3.6/site-packages/django/db/backends/base/validation.py | a02780a6947b0d164adbcf26dfc8a43433a65b07 | [
"MIT",
"GPL-3.0-only"
] | permissive | rizwansoaib/face-attendence | bc185d4de627ce5adab1cda7da466cb7a5fddcbe | 59300441b52d32f3ecb5095085ef9d448aef63af | refs/heads/master | 2020-04-25T23:47:47.303642 | 2019-09-12T14:26:17 | 2019-09-12T14:26:17 | 173,157,284 | 45 | 12 | MIT | 2020-02-11T23:47:55 | 2019-02-28T17:33:14 | Python | UTF-8 | Python | false | false | 1,040 | py | class BaseDatabaseValidation:
"""Encapsulate backend-specific validation."""
def __init__(self, connection):
self.connection = connection
def check(self, **kwargs):
return []
def check_field(self, field, **kwargs):
errors = []
# Backends may implement a check_field_type() method.
if (hasattr(self, 'check_field_type') and
# Ignore any related fields.
not getattr(field, 'remote_field', None)):
# Ignore fields with unsupported features.
db_supports_all_required_features = all(
getattr(self.connection.features, feature, False)
for feature in field.model._meta.required_db_features
)
if db_supports_all_required_features:
field_type = field.db_type(self.connection)
# Ignore non-concrete fields.
if field_type is not None:
errors.extend(self.check_field_type(field, field_type))
return errors
| [
"[email protected]"
] | |
eb45a4ceaeba160509afafd51cde32f37ac7ab0c | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02399/s487176195.py | 97bc7ca1c37548157f4bd77246ef2f57a6fb5da8 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 76 | py | x,y=map(int,input().split())
print("{0} {1} {2:.8f}".format(x//y,x%y,x/y))
| [
"[email protected]"
] | |
1ddf2f22b39151a8fd975399994f0c47d007c8ef | 5f2103b1083b088aed3f3be145d01a770465c762 | /169. Majority Element.py | ef741962e751483cad719893fc83c186e65afd3e | [] | no_license | supersj/LeetCode | 5605c9bcb5ddcaa83625de2ad9e06c3485220019 | 690adf05774a1c500d6c9160223dab7bcc38ccc1 | refs/heads/master | 2021-01-17T17:23:39.585738 | 2017-02-27T15:08:42 | 2017-02-27T15:08:42 | 65,526,089 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 801 | py | # Given an array of size n, find the majority element. The majority element is the element that appears more than ⌊ n/2 ⌋ times.
#
# You may assume that the array is non-empty and the majority element always exist in the array.
#
# Credits:
# Special thanks to @ts for adding this problem and creating all test cases.
#
# Subscribe to see which companies asked this question
class Solution(object):
def majorityElement(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
major = nums[0]
count = 1
for i in range(1,len(nums)):
if count == 0:
count += 1
elif major == nums[i]:
count += 1
else:
count -= 1
return major
| [
"[email protected]"
] | |
fcca03f588d55858da75addbb82353a1568ff909 | 163bbb4e0920dedd5941e3edfb2d8706ba75627d | /Code/CodeRecords/2234/60632/279655.py | 5ccddbecc0421ed6a32a6f18b48171c5099a2573 | [] | no_license | AdamZhouSE/pythonHomework | a25c120b03a158d60aaa9fdc5fb203b1bb377a19 | ffc5606817a666aa6241cfab27364326f5c066ff | refs/heads/master | 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,428 | py | # x为当前访问的节点,time为时间戳,n为节点总数
def tarjan(x: int, time: int, n: int):
time += 1
dfn[x] = low[x] = time
stack.append(x)
for y in range(n):
if adj[x][y] == 1:
if dfn[y] == 0:
tarjan(y, time, n)
low[x] = min(low[x], low[y])
elif y in stack:
low[x] = min(low[x], low[y])
if dfn[x] == low[x]:
tmp = []
while stack[-1] != x:
tmp.append(stack.pop())
tmp.append(stack.pop())
result.append(tmp)
n = int(input()) # 间谍人数
p = int(input()) # 愿意被收买的人数
money = [] # 收买所需金额
for i in range(p):
money.append(list(map(int, input().split(' '))))
r = int(input()) # 图中边数
link = [] # 图中的边
for i in range(r):
link.append(list(map(int, input().split(' '))))
adj = [[0 for i in range(n)] for j in range(n)] # 邻接矩阵
for i in link: # 构建邻接矩阵
adj[i[0]-1][i[1]-1] = 1
dfn = [0 for i in range(n)]
low = [0 for i in range(n)]
stack = []
result = []
for i in range(n): # tarjan缩点
if dfn[i] == 0:
tarjan(i, i, n)
print(result)
need = [] # 需要买但又不可买的点,即首先入度为 0
for i in range(n):
col = [adj[j][i] for j in range(n)]
if 1 not in col and i not in [j[0] for j in money]:
need.append(i)
print(need)
print([i[0] for i in money])
| [
"[email protected]"
] | |
fab46677e2e984639881af549272b656e8a58621 | c9ddbdb5678ba6e1c5c7e64adf2802ca16df778c | /cases/pa3/benchmarks/tree-20.py | 8efa7e396a5213ec09e22d51c9664f39cc8e494f | [] | no_license | Virtlink/ccbench-chocopy | c3f7f6af6349aff6503196f727ef89f210a1eac8 | c7efae43bf32696ee2b2ee781bdfe4f7730dec3f | refs/heads/main | 2023-04-07T15:07:12.464038 | 2022-02-03T15:42:39 | 2022-02-03T15:42:39 | 451,969,776 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,448 | py | # Binary-search trees
class TreeNode(object):
value:int = 0
left:$Type = None
right:"TreeNode" = None
def insert(self:"TreeNode", x:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode(x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode(x)
return True
else:
return self.right.insert(x)
return False
def contains(self:"TreeNode", x:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
class Tree(object):
root:TreeNode = None
size:int = 0
def insert(self:"Tree", x:int) -> object:
if self.root is None:
self.root = makeNode(x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def contains(self:"Tree", x:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
def makeNode(x: int) -> TreeNode:
b:TreeNode = None
b = TreeNode()
b.value = x
return b
# Input parameters
n:int = 100
c:int = 4
# Data
t:Tree = None
i:int = 0
k:int = 37813
# Crunch
t = Tree()
while i < n:
t.insert(k)
k = (k * 37813) % 37831
if i % c != 0:
t.insert(i)
i = i + 1
print(t.size)
for i in [4, 8, 15, 16, 23, 42]:
if t.contains(i):
print(i)
| [
"[email protected]"
] | |
d31c9d70adba120acd6845805f35c9d9fa7cf28e | 6ed6171b313cd485764d6d952b0f06c07d078f5d | /api/permissions.py | 1fdded12e31dbd62b79d671d54169e99c8374fba | [] | no_license | yezyilomo/marathon | db802724cecf0360a3e9864d70402d029a10e7b6 | 75e4fd42ffe37e9b6b1d1bf6610a8bea6b12113e | refs/heads/master | 2021-05-26T16:15:47.632053 | 2020-04-08T16:08:29 | 2020-04-08T16:08:29 | 254,133,385 | 3 | 0 | null | 2020-04-08T16:08:30 | 2020-04-08T15:54:07 | Python | UTF-8 | Python | false | false | 1,517 | py | from rest_framework import permissions
class IsAllowedUser(permissions.BasePermission):
"""
Custom permission to only allow owners of an object to edit it.
"""
def has_object_permission(self, request, view, obj):
return obj == request.user
class IsCategoryOwner(permissions.BasePermission):
"""
Custom permission to only allow owners of an object to edit it.
"""
def has_object_permission(self, request, view, obj):
return obj.marathon.organizer == request.user
class IsSponsorOwner(permissions.BasePermission):
"""
Custom permission to only allow owners of an object to edit it.
"""
def has_object_permission(self, request, view, obj):
return obj.marathon.organizer == request.user
class IsMarathonOwner(permissions.BasePermission):
"""
Custom permission to only allow owners of an object to edit it.
"""
def has_object_permission(self, request, view, obj):
return obj.organizer == request.user
class IsPaymentOwner(permissions.BasePermission):
"""
Custom permission to only allow owners of an object to edit it.
"""
def has_object_permission(self, request, view, obj):
return obj.user == request.user or obj.marathon.organizer == request.user
class IsAdminUser(permissions.BasePermission):
"""
Custom permission to check if user is admin
"""
def has_permission(self, request, view):
return request.user.is_authenticated and request.user.is_admin | [
"[email protected]"
] | |
2359691b0b5c80c8263fd3b40be86eedd64a19ee | 0c70dcec22a090e70b1f20613ea6e0a64fd9a037 | /GPS卫星位置的计算/venv/Lib/site-packages/pandas/tests/io/test_gcs.py | b80320e7700f52123bfb6001760260a947bec930 | [
"MIT"
] | permissive | payiz-asj/Gis | 82c1096d830878f62c7a0d5dfb6630d4e4744764 | 3d315fed93e2ab850b836ddfd7a67f5618969d10 | refs/heads/main | 2023-06-27T15:25:17.301154 | 2021-08-03T10:02:58 | 2021-08-03T10:02:58 | 392,269,853 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,090 | py | from io import BytesIO
import os
import numpy as np
import pytest
from pandas import DataFrame, date_range, read_csv
import pandas._testing as tm
from pandas.util import _test_decorators as td
@td.skip_if_no("gcsfs")
def test_read_csv_gcs(monkeypatch):
from fsspec import AbstractFileSystem, registry
registry.target.clear() # noqa # remove state
df1 = DataFrame(
{
"int": [1, 3],
"float": [2.0, np.nan],
"str": ["t", "s"],
"dt": date_range("2018-06-18", periods=2),
}
)
class MockGCSFileSystem(AbstractFileSystem):
def open(*args, **kwargs):
return BytesIO(df1.to_csv(index=False).encode())
monkeypatch.setattr("gcsfs.GCSFileSystem", MockGCSFileSystem)
df2 = read_csv("gs://test/test.csv", parse_dates=["dt"])
tm.assert_frame_equal(df1, df2)
@td.skip_if_no("gcsfs")
def test_to_csv_gcs(monkeypatch):
from fsspec import AbstractFileSystem, registry
registry.target.clear() # noqa # remove state
df1 = DataFrame(
{
"int": [1, 3],
"float": [2.0, np.nan],
"str": ["t", "s"],
"dt": date_range("2018-06-18", periods=2),
}
)
s = BytesIO()
s.close = lambda: True
class MockGCSFileSystem(AbstractFileSystem):
def open(*args, **kwargs):
s.seek(0)
return s
monkeypatch.setattr("gcsfs.GCSFileSystem", MockGCSFileSystem)
df1.to_csv("gs://test/test.csv", index=True)
def mock_get_filepath_or_buffer(*args, **kwargs):
return BytesIO(df1.to_csv(index=True).encode()), None, None, False
monkeypatch.setattr(
"pandas.io.common.get_filepath_or_buffer", mock_get_filepath_or_buffer
)
df2 = read_csv("gs://test/test.csv", parse_dates=["dt"], index_col=0)
tm.assert_frame_equal(df1, df2)
@td.skip_if_no("fastparquet")
@td.skip_if_no("gcsfs")
def test_to_parquet_gcs_new_file(monkeypatch, tmpdir):
"""Regression test for writing to a not-yet-existent GCS Parquet file."""
from fsspec import AbstractFileSystem, registry
registry.target.clear() # noqa # remove state
df1 = DataFrame(
{
"int": [1, 3],
"float": [2.0, np.nan],
"str": ["t", "s"],
"dt": date_range("2018-06-18", periods=2),
}
)
class MockGCSFileSystem(AbstractFileSystem):
def open(self, path, mode="r", *args):
if "w" not in mode:
raise FileNotFoundError
return open(os.path.join(tmpdir, "test.parquet"), mode)
monkeypatch.setattr("gcsfs.GCSFileSystem", MockGCSFileSystem)
df1.to_parquet(
"gs://test/test.csv", index=True, engine="fastparquet", compression=None
)
@td.skip_if_installed("gcsfs")
def test_gcs_not_present_exception():
with pytest.raises(ImportError) as e:
read_csv("gs://test/test.csv")
assert "gcsfs library is required" in str(e.value)
| [
"[email protected]"
] | |
e010af64e48efa09d26f1e4b7f943a02082dace2 | bbe447a740929eaee1955bd9c1517cf760dd5cb9 | /keygrabber/adwords/adwords_api_python_14.2.1/adspygoogle/adwords/zsi/v201008/AdGroupAdService_services.py | bce9883a53e9d6a36b68f760daeabf22fc68622c | [
"Apache-2.0"
] | permissive | MujaahidSalie/aranciulla | f3d32e7dd68ecfca620fe4d3bf22ecb4762f5893 | 34197dfbdb01479f288611a0cb700e925c4e56ce | refs/heads/master | 2020-09-07T02:16:25.261598 | 2011-11-01T21:20:46 | 2011-11-01T21:20:46 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,205 | py | ##################################################
# AdGroupAdService_services.py
# generated by ZSI.generate.wsdl2python
##################################################
from AdGroupAdService_services_types import *
import urlparse, types
from ZSI.TCcompound import ComplexType, Struct
from ZSI import client
import ZSI
# Locator
class AdGroupAdServiceLocator:
AdGroupAdServiceInterface_address = "https://adwords.google.com:443/api/adwords/cm/v201008/AdGroupAdService"
def getAdGroupAdServiceInterfaceAddress(self):
return AdGroupAdServiceLocator.AdGroupAdServiceInterface_address
def getAdGroupAdServiceInterface(self, url=None, **kw):
return AdGroupAdServiceSoapBindingSOAP(url or AdGroupAdServiceLocator.AdGroupAdServiceInterface_address, **kw)
# Methods
class AdGroupAdServiceSoapBindingSOAP:
def __init__(self, url, **kw):
kw.setdefault("readerclass", None)
kw.setdefault("writerclass", None)
# no resource properties
self.binding = client.Binding(url=url, **kw)
# no ws-addressing
# get: getAdGroupAd
def getAdGroupAd(self, request):
if isinstance(request, getAdGroupAdRequest) is False:
raise TypeError, "%s incorrect request type" % (request.__class__)
kw = {}
# no input wsaction
self.binding.Send(None, None, request, soapaction="", **kw)
# no output wsaction
response = self.binding.Receive(getAdGroupAdResponse.typecode)
return response
# mutate: getAdGroupAd
def mutateAdGroupAd(self, request):
if isinstance(request, mutateAdGroupAdRequest) is False:
raise TypeError, "%s incorrect request type" % (request.__class__)
kw = {}
# no input wsaction
self.binding.Send(None, None, request, soapaction="", **kw)
# no output wsaction
response = self.binding.Receive(mutateAdGroupAdResponse.typecode)
return response
getAdGroupAdRequest = ns0.getAdGroupAd_Dec().pyclass
getAdGroupAdResponse = ns0.getAdGroupAdResponse_Dec().pyclass
mutateAdGroupAdRequest = ns0.mutateAdGroupAd_Dec().pyclass
mutateAdGroupAdResponse = ns0.mutateAdGroupAdResponse_Dec().pyclass
| [
"[email protected]"
] | |
39319351c0c7faafc21e10a864c35e6716a3785f | 15f321878face2af9317363c5f6de1e5ddd9b749 | /solutions_python/Problem_200/945.py | 7863ed26a1e33eacbab4fd0315c93eed52357ba7 | [] | no_license | dr-dos-ok/Code_Jam_Webscraper | c06fd59870842664cd79c41eb460a09553e1c80a | 26a35bf114a3aa30fc4c677ef069d95f41665cc0 | refs/heads/master | 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 611 | py | #/usr/bin/python3
def solve(N):
N = list(N)
res = ""
prev = 0
while N:
act = int(N.pop(0))
#print(prev, act)
if prev <= act:
res += str(prev)
prev = act
else:
res += str(prev-1)
res += "9"*len(N)
prev = 9
break
res += str(prev)
return str(int(res))
T = int(input())
for t in range(T):
N = input()
while True:
M = solve(N)
if M == N:
break
else:
N = M
print("Case #{0}: {1}".format(t+1, int(N)))
| [
"[email protected]"
] | |
76b05533f9e6516133fbc32a4c854068b3945a09 | 353def93fa77384ee3a5e3de98cfed318c480634 | /.history/week01/homework02/maoyanspiders/maoyanspiders/pipelines_20200627225511.py | 4f21199c88011b2b2d4020bf52f226457160cd94 | [] | no_license | ydbB/Python001-class01 | d680abc3ea1ccaeb610751e3488421417d381156 | ad80037ccfc68d39125fa94d2747ab7394ac1be8 | refs/heads/master | 2022-11-25T11:27:45.077139 | 2020-07-19T12:35:12 | 2020-07-19T12:35:12 | 272,783,233 | 0 | 0 | null | 2020-06-16T18:28:15 | 2020-06-16T18:28:15 | null | UTF-8 | Python | false | false | 578 | py | # -*- coding: utf-8 -*-
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html
class MaoyanspidersPipeline(object):
def process_item(self, item, spider):
films_name = item['films_name']
films_type = item['films_type']
release_time = item['release_time']
output = f'|{films_name}|\t|{films_type}|\t|{release_time}|\n\n'
with open('./week01/homework02/top10.csv',encoding='utf-8') as article:
article.write
| [
"[email protected]"
] | |
a7fa0c2b13e99441b55229c95b761c210c29ac52 | 253089ef4ee99c50cdaa23fde4d789794789e2e9 | /134/test_twosums.py | 842a20ecb4b75611347c73b305dd9b21be332816 | [] | no_license | Zaubeerer/bitesofpy | 194b61c5be79c528cce3c14b9e2c5c4c37059259 | e5647a8a7a28a212cf822abfb3a8936763cd6b81 | refs/heads/master | 2021-01-01T15:01:21.088411 | 2020-11-08T19:56:30 | 2020-11-08T19:56:30 | 239,328,990 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,442 | py | import pytest
from random import sample, seed
from twosums import two_sums
NUMBERS = [
2202, 9326, 1034, 4180, 1932, 8118, 7365, 7738, 6220, 3440, 1538, 7994, 465,
6387, 7091, 9953, 35, 7298, 4364, 3749, 9686, 1675, 5201, 502, 366, 417,
8871, 151, 6246, 3549, 6916, 476, 8645, 3633, 7175, 8124, 9059, 3819, 5664,
3783, 3585, 7531, 4748, 353, 6819, 9117, 1639, 3046, 4857, 1981]
def test_two_sums():
"""Test of the example given in the description"""
numbers = [3, 10, 14, 8, 15, 5, 16, 13, 9, 2]
expected = (2, 6)
target = 30
result = two_sums(numbers, target)
assert result == expected
@pytest.mark.parametrize("target, expected", [
(10093, (2, 36)),
(7067, (27, 30)),
(11261, (0, 36)),
(11350, (37, 41)),
(5224, (31, 42)),
(2934785974, None),
])
def test_two_sums_param(target, expected):
result = two_sums(NUMBERS, target)
assert result == expected
def test_two_sums_random():
seed(1)
numbers = sample(range(1, 1_000_000), 1_000)
picked = sample(numbers, 2)
index1 = numbers.index(picked[0])
index2 = numbers.index(picked[1])
ordered = sorted([index1, index2])
expected = ordered[0], ordered[1]
target = sum(picked)
result = two_sums(numbers, target)
assert result == expected
def test_two_sums_none():
result = two_sums(NUMBERS, 7000)
assert result is None | [
"[email protected]"
] | |
0eb6589094d8dfef39e0ec486bb16c31b27fc3f3 | 36e27ca74b734994fb2e5cd4e328e7b82202d8cd | /nodarb/migrations/0003_telpa.py | 1fe984dec5f7810becd4c75505ce077de3cfe3d1 | [] | no_license | svabis/vf | 5e9513f3a767a9561e2fb8bd3e37bb3c03d113dd | d83a4afd177e4f7007a9ce824ae5ed36f18654fc | refs/heads/master | 2020-05-21T21:19:59.952463 | 2018-06-04T11:11:50 | 2018-06-04T11:11:50 | 84,647,341 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 825 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('nodarb', '0002_auto_20170311_1322'),
]
operations = [
migrations.CreateModel(
name='Telpa',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('telpa', models.CharField(max_length=5, choices=[(b'L', b'liel\xc4\x81 z\xc4\x81le'), (b'M', b'maz\xc4\x81 z\xc4\x81le'), (b'G', b'gym z\xc4\x81le'), (b'V', b'velo z\xc4\x81le'), (b'C', b'c\xc4\xab\xc5\x86u z\xc4\x81le')])),
],
options={
'db_table': 'telpa',
'verbose_name': 'Telpa',
},
),
]
| [
"[email protected]"
] | |
2157e48e1a135e8fc11033df646c00f9085d895f | 5930f323d96e7ed45c01fef63b100e1ad220f764 | /catalyst/core/callbacks/__init__.py | 14c691fa96ccc1abd7e4f3bc8f85c97e1378d05c | [
"Apache-2.0"
] | permissive | saswat0/catalyst | 8cb91c2392bccdbdd318544e6861e6fe6ac39b33 | a35297ecab8d1a6c2f00b6435ea1d6d37ec9f441 | refs/heads/master | 2023-04-05T00:43:29.124864 | 2020-06-18T05:41:33 | 2020-06-18T05:41:33 | 272,268,902 | 2 | 0 | Apache-2.0 | 2020-06-18T05:41:34 | 2020-06-14T19:24:04 | null | UTF-8 | Python | false | false | 607 | py | # flake8: noqa
from .checkpoint import CheckpointCallback, IterationCheckpointCallback
from .criterion import CriterionCallback
from .early_stop import CheckRunCallback, EarlyStoppingCallback
from .exception import ExceptionCallback
from .logging import ConsoleLogger, TensorboardLogger, VerboseLogger
from .metrics import (
MetricAggregationCallback,
MetricCallback,
MetricManagerCallback,
MultiMetricCallback,
)
from .optimizer import OptimizerCallback
from .scheduler import LRUpdater, SchedulerCallback
from .timer import TimerCallback
from .validation import ValidationManagerCallback
| [
"[email protected]"
] | |
1f25baa28cba0a1250c9712f3a1de7ccb89556b5 | d9aa4291a4978b932bef84b8d26aa4b911ca2add | /day111Flask前戏/02偏函数.py | d5009db7bda2799235778291411ea579600ab40d | [] | no_license | SelfShadows/my_git | 9a32d3713efb1b055d04c813b319eb2196fdcf53 | b10a4c838e1146b3f6ce297480840de9a8e89206 | refs/heads/master | 2020-12-15T22:33:49.273814 | 2020-02-14T16:33:46 | 2020-02-14T16:33:46 | 235,274,933 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 218 | py | import functools
def index(a,b):
return a+b
# 原来的调用方法
ret = index(3,1)
print(ret)
# 偏函数, 帮助开发者自动传递参数
new_func = functools.partial(index, 55)
ret = new_func(1)
print(ret) | [
"[email protected]"
] | |
458f916ac8029a2a822f10c365aa1842b758e621 | b4241b2c9d01704d85c040825d4e7c32d8db3677 | /test.py | b598581bb8509cc4188807d9b7eb362a27f9d302 | [] | no_license | juyrjola/thermal-zones | 1d68b59daa183368559ce25c0c3c46cc7287576f | a21d025543d4f0413f43feffc7d05fba3b904cf9 | refs/heads/master | 2022-11-17T07:22:42.383610 | 2020-07-15T15:03:54 | 2020-07-15T15:03:54 | 279,899,451 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 20,221 | py | import click
import time
import os
import re
import glob
from functools import cached_property
THERMAL_PATH = '/sys/devices/virtual/thermal/'
RAPL_PATH = '/sys/devices/virtual/powercap/intel-rapl/intel-rapl:0'
POWERCAP_PATH = '/sys/devices/virtual/powercap'
def get_thermal_zone_paths():
paths = [p for p in os.listdir(THERMAL_PATH) if re.match('thermal_zone[0-9]+', p)]
return [os.path.join(THERMAL_PATH, p) for p in paths]
def read_sysfs_value(path):
f = open(path, 'r', encoding='ascii')
return f.read().strip()
def write_sysfs_value(path, val):
f = open(path, 'w', encoding='ascii')
return f.write(val.strip())
class SysfsMixin:
def read_attr(self, attr):
return read_sysfs_value(os.path.join(self.base_path, attr))
def write_attr(self, attr, val):
return write_sysfs_value(os.path.join(self.base_path, attr), val)
class Constraint(SysfsMixin):
def __init__(self, base_path, nr):
self.base_path = base_path
self.nr = nr
self._power_limit_uw = self.power_limit_uw
self._power_limit_changed = False
def restore(self):
if not self._power_limit_changed:
return
print('%s [%s]: %s -> %s' % (self.base_path, self.name, self.power_limit_uw, self._power_limit_uw))
self.set_power_limit_uw(self._power_limit_uw)
def read_attr(self, attr):
return super().read_attr('constraint_%d_%s' % (self.nr, attr))
def write_attr(self, attr, val):
return super().write_attr('constraint_%d_%s' % (self.nr, attr), val)
@property
def name(self):
return self.read_attr('name')
@property
def max_power_uw(self):
try:
out = int(self.read_attr('max_power_uw'))
except OSError:
return None
return out
@property
def max_power(self):
if self.max_power_uw is None:
return None
return self.max_power_uw / 1000000
@property
def power_limit_uw(self):
return int(self.read_attr('power_limit_uw'))
def set_power_limit_uw(self, val):
self._power_limit_changed = True
self.write_attr('power_limit_uw', str(int(val)))
@property
def power_limit(self):
return self.power_limit_uw / 1000000
@property
def time_window_us(self):
return int(self.read_attr('time_window_us'))
def __str__(self):
return self.name
class Battery(SysfsMixin):
def __init__(self):
self.base_path = '/sys/class/power_supply/BAT1'
@property
def power(self):
return int(self.read_attr('power_now')) / 1000000
class CPU:
def __init__(self, path):
self.path = path
self.nr = int(path[-1])
self.max_freq = int(self.read_cpufreq('cpuinfo_max_freq'))
self.min_freq = int(self.read_cpufreq('cpuinfo_min_freq'))
self._scaling_max_freq = self.scaling_max_freq
self._ep_pref = self.energy_performance_preference
self._scaling_gov = self.scaling_governor
def init(self):
# 'power', 'balance_power', 'balance_performance', 'performance'
self.set_energy_performance_preference('power')
# 'performance', 'powersave'
self.set_scaling_gov('powersave')
self.set_scaling_max_freq(self.max_freq)
def restore(self):
self.set_energy_performance_preference(self._ep_pref)
self.set_scaling_gov(self._scaling_gov)
@property
def energy_performance_preference(self):
return self.read_cpufreq('energy_performance_preference')
def set_energy_performance_preference(self, pref):
if self.energy_performance_preference != pref:
self.write_cpufreq('energy_performance_preference', pref)
@property
def scaling_max_freq(self):
return int(self.read_cpufreq('scaling_max_freq'))
def set_scaling_max_freq(self, freq):
if self.scaling_max_freq != freq:
self.write_cpufreq('scaling_max_freq', str(freq))
@property
def scaling_governor(self):
return self.read_cpufreq('scaling_governor')
def set_scaling_gov(self, gov):
if self.scaling_governor != gov:
self.write_cpufreq('scaling_governor', gov)
@property
def cur_freq(self):
return self.read_cpufreq('scaling_cur_freq')
def read_attr(self, attr):
return read_sysfs_value(os.path.join(self.path, attr))
def write_attr(self, attr, val):
return write_sysfs_value(os.path.join(self.path, attr), val)
def read_cpufreq(self, attr):
return self.read_attr('cpufreq/%s' % attr)
def write_cpufreq(self, attr, val):
print('[CPU%d] Setting %s to %s' % (self.nr, attr, val))
return self.write_attr('cpufreq/%s' % attr, val)
class PowerCapDevice:
def __init__(self, path):
self.path = path
self.constraints = []
self._enabled = self.enabled
self._enabled_changed = False
self._last_energy_sample_time = None
self._find_constraints()
self.print()
def restore(self):
for c in self.constraints:
c.restore()
if not self._enabled_changed:
return
print('%s: %s -> %s' % (self.name, self.enabled, self._enabled))
self.set_enabled(self._enabled)
def read_attr(self, attr):
return read_sysfs_value(os.path.join(self.path, attr))
def write_attr(self, attr, val):
return write_sysfs_value(os.path.join(self.path, attr), val)
def _find_constraints(self):
for fn in os.listdir(self.path):
m = re.match('constraint_([0-9]+)_name', fn)
if not m:
continue
self.constraints.append(Constraint(self.path, int(m.groups()[0])))
@property
def enabled(self):
return bool(int(self.read_attr('enabled')))
def set_enabled(self, val: bool):
self._enabled_changed = True
self.write_attr('enabled', '1' if val else '0')
@property
def name(self):
return self.read_attr('name')
@property
def power(self):
energy_uj = int(self.read_attr('energy_uj'))
now = time.time()
if self._last_energy_sample_time is not None:
power = (energy_uj - self._last_energy_uj) / (now - self._last_energy_sample_time) / 1000000
else:
power = 0
self._last_energy_sample_time = now
self._last_energy_uj = energy_uj
return power
def set_power_limit(self, limit_mw):
if limit_mw is None:
print('%s: restoring' % self)
self.restore()
return
print('%s: limit to %.3f W' % (self.name, limit_mw / 1000))
for c in self.constraints:
if c.name == 'short_term':
break
c.set_power_limit_uw(limit_mw * 1000)
print(c.power_limit_uw)
if not self.enabled:
self.set_enabled(True)
self.print()
def print(self):
print('%s [%s] %s' % (self.name, 'enabled' if self.enabled else 'disabled', self.path))
for c in self.constraints:
print(' %s (limit: %.3f, max: %s)' % (c.name, c.power_limit, c.max_power))
class PowerCap:
NO_LIMIT = 0
HOT_LIMIT = 1
CRITICAL_LIMIT = 2
def __init__(self, base_path=POWERCAP_PATH):
self.base_path = base_path
self.devices = []
self._scan()
def restore(self):
for d in self.devices:
d.restore()
def set_power_limit(self, name, limit):
found = False
for d in self.devices:
if d.name == name:
d.set_power_limit(limit)
found = True
if not found:
raise Exception('Unknown cap device: %s' % name)
def set_limit(self, limit):
if limit == self.NO_LIMIT:
self.restore()
elif limit == self.HOT_LIMIT:
self.set_power_limit('package-0', 16000)
self.set_power_limit('core', 6000)
elif limit == self.CRITICAL_LIMIT:
self.set_power_limit('package-0', 8000)
self.set_power_limit('core', 2000)
def _find_devices(self, path):
for fname in os.listdir(path):
if fname == 'energy_uj':
self.devices.append(PowerCapDevice(path))
continue
p = self.subpath(path, fname)
if os.path.islink(p):
continue
if os.path.isdir(p):
self._find_devices(p)
continue
def subpath(self, *paths):
return os.path.join(self.base_path, *paths)
def _scan(self, base_path=None):
for p in os.listdir(self.base_path):
if not os.path.isdir(self.subpath(p)):
continue
if os.path.exists(self.subpath(p, 'enabled')):
if read_sysfs_value(self.subpath(p, 'enabled')) != '1':
print('Disabled: %s' % p)
continue
self._find_devices(self.subpath(p))
class TripPoint:
def __init__(self, zone, path):
self.zone = zone
self.path = path
def read_attr(self, attr):
return read_sysfs_value(os.path.join(self.path + '_' + attr))
@cached_property
def temp(self):
return int(self.read_attr('temp'))
@property
def temp_c(self):
return self.temp / 1000
@cached_property
def type(self):
return self.read_attr('type')
class ThermalZone:
def __init__(self, path):
self.path = path
tp_paths = glob.glob(os.path.join(self.path, 'trip_point_*_type'))
tps = [TripPoint(self, p.replace('_type', '')) for p in tp_paths]
self.trip_points = sorted(filter(lambda x: x.temp > 0, tps), key=lambda x: x.temp, reverse=True)
try:
self.last_state = self.get_current_state()
except OSError:
self.valid = False
return
self.first_tp = self.trip_points[-1] if self.trip_points else None
if self.first_tp:
self.hot_tp = list(filter(lambda x: x.type in ('hot', 'critical'), self.trip_points))[-1]
self.valid = True
def __str__(self):
return self.type
def read_attr(self, attr):
return read_sysfs_value(os.path.join(self.path, attr))
@property
def type(self):
return self.read_attr('type')
@property
def temp(self):
return int(self.read_attr('temp'))
@property
def temp_c(self):
return self.temp / 1000
def get_scaled_temp(self):
if not self.trip_points:
return 0.0
current = self.temp
first = self.first_tp.temp
last = self.hot_tp.temp
if current <= first:
return 0.0
if current >= last:
return 1.0
ret = (current - first) / (last - first)
return ret
def get_current_state(self):
temp = self.temp
for tp in self.trip_points:
if temp >= tp.temp:
return tp.type
return None
def state_changed(self) -> bool:
state = self.get_current_state()
if state != self.last_state:
self.last_state = state
return True
return False
class ThermalDaemon:
def __init__(self):
self.pc = PowerCap()
tzs = [ThermalZone(p) for p in get_thermal_zone_paths()]
self.thermal_zones = [t for t in tzs if t.valid]
self.battery = Battery()
self.cpus = []
cpu_paths = glob.glob('/sys/devices/system/cpu/cpu?')
for p in sorted(cpu_paths):
self.cpus.append(CPU(p))
for z in self.thermal_zones:
print('%s: %.1f %s (%s)' % (z.type, z.temp / 1000, z.get_current_state(), z.path))
for tp in sorted(z.trip_points, key=lambda x: x.temp):
print(' %.1f: %s' % (tp.temp / 1000, tp.type))
self.last_state = None
self.callback_func = None
def init(self):
CPUIDLE_GOV = 'teo'
CPUIDLE_PATH = '/sys/devices/system/cpu/cpuidle/current_governor'
gov = read_sysfs_value(CPUIDLE_PATH)
if gov != CPUIDLE_GOV:
print('Setting cpuidle governor to %s' % CPUIDLE_GOV)
write_sysfs_value(CPUIDLE_PATH, CPUIDLE_GOV)
for cpu in self.cpus:
cpu.init()
self.loop_count = 0
def restore(self):
self.pc.restore()
for cpu in self.cpus:
cpu.restore()
def run(self):
self.loop_count = 0
while True:
try:
self.print_state()
self.loop()
time.sleep(2)
self.loop_count += 1
except (Exception, KeyboardInterrupt):
print('restoring')
self.restore()
raise
def set_state(self, state):
if state is None or state == 'active':
self.pc.set_limit(PowerCap.NO_LIMIT)
elif state == 'passive':
self.pc.set_limit(PowerCap.HOT_LIMIT)
else:
self.pc.set_limit(PowerCap.CRITICAL_LIMIT)
self.last_state = state
def print_state(self):
s1 = ''
s2 = ''
s3 = ''
s4 = ''
for z in self.thermal_zones:
s1 += '%-15s' % z.type
s2 += '%-15.1f' % (z.temp / 1000)
s3 += '%-15s' % z.get_current_state()
s4 += '%-15d' % (z.get_scaled_temp() * 100)
if self.loop_count % 20 == 0:
print(s1)
print(s3)
print(s2)
print(s4)
for d in self.pc.devices:
print('%s: %.02f W [%s]' % (d.name, d.power, d.path))
print('Battery: %.02f W' % self.battery.power)
def loop(self):
worst_state = None
for tz in self.thermal_zones:
state = tz.get_current_state()
if tz.state_changed():
print('%s changed to %s' % (str(tz), state))
if state is None:
continue
if worst_state is None:
worst_state = state
continue
if state == 'critical':
worst_state = state
continue
if state == 'hot' and worst_state != 'hot':
worst_state = state
continue
if state == 'passive' and worst_state == 'active':
worst_state = state
continue
if worst_state != self.last_state:
# self.pc.limit(worst_state)
print('state change to %s' % worst_state)
self.set_state(worst_state)
if self.callback_func:
self.callback_func()
def set_callback(self, callback_func):
self.callback_func = callback_func
class Plotter:
def __init__(self, d: ThermalDaemon):
self.d = d
class MatplotPlotter(Plotter):
def init(self):
import matplotlib
# matplotlib.use('GTK3Cairo')
matplotlib.use('GTK3Cairo')
import matplotlib.pyplot as plt
self.plt = plt
fig, axs = plt.subplots(2)
self.fig = fig
ax = axs[0]
ax.set_xlim(0, 50)
ax.set_ylim(0, 120)
for tz in self.d.thermal_zones:
if tz.type in ('acpitz', 'INT3400 Thermal', 'x86_pkg_temp'):
tz.line = None
continue
ydata = [tz.temp_c] * 50
tz.line, = ax.plot(ydata, label=tz.type)
ax.legend()
ax = axs[1]
ax.set_xlim(0, 50)
ax.set_ylim(0, 20)
bat = self.d.battery
bat.line, = ax.plot([bat.power] * 50, label='Battery')
for dev in self.d.pc.devices:
dev.line, = ax.plot([dev.power] * 50, label=dev.name)
ax.legend()
plt.show(block=False)
plt.pause(0.5)
def update(self, frame=None):
self.d.loop()
lines = []
for tz in self.d.thermal_zones:
if not tz.line:
continue
data = list(tz.line.get_ydata())[1:]
data.append(tz.temp_c)
tz.line.set_ydata(data)
lines.append(tz.line)
bat = self.d.battery
data = list(bat.line.get_ydata())[1:]
data.append(bat.power)
bat.line.set_ydata(data)
lines.append(bat.line)
for dev in self.d.pc.devices:
data = list(dev.line.get_ydata())[1:]
data.append(dev.power)
dev.line.set_ydata(data)
lines.append(dev.line)
return lines
def run(self):
from matplotlib.animation import FuncAnimation
ani = FuncAnimation(self.fig, self.update, frames=None, blit=True, interval=500) # noqa
print('showing')
self.plt.show()
class QTGraphPlotter(Plotter):
def init(self):
from pyqtgraph.Qt import QtGui, QtCore
import pyqtgraph as pg
import signal
import seaborn as sns
QtGui.QApplication.setAttribute(QtCore.Qt.AA_EnableHighDpiScaling, True)
QtGui.QApplication.setAttribute(QtCore.Qt.AA_UseHighDpiPixmaps, True)
self.app = app = QtGui.QApplication([])
app.setStyle("fusion")
self.win = win = pg.GraphicsLayoutWidget(show=True, title='Thermal')
win.resize(1000, 600)
win.setWindowTitle('Thermal Plot')
palette = sns.color_palette('deep')
pg.setConfigOptions(antialias=True)
p = self.temp_plot = win.addPlot(title='Temps')
p.setLabel('left', '°C')
p.addLegend()
p.setMouseEnabled(x=False, y=False)
ci = 0
for tz in self.d.thermal_zones:
if tz.type in ('acpitz', 'INT3400 Thermal', 'x86_pkg_temp'):
tz.line = None
continue
color = [x * 255 for x in palette[ci]]
tz.line = p.plot(pen=pg.mkPen(color, width=4), name=tz.type)
ci += 1
p.setYRange(0, 120, padding=0)
p.setXRange(0, 50, padding=0)
p.enableAutoRange('xy', False)
win.nextRow()
p = self.power_plot = win.addPlot(title='Power')
p.setLabel('left', 'W')
p.addLegend()
p.setMouseEnabled(x=False, y=False)
p.setYRange(0, 30, padding=0)
p.setXRange(0, 50, padding=0)
p.enableAutoRange('xy', False)
ci = 0
bat = self.d.battery
color = [x * 255 for x in palette[ci]]
bat.line = p.plot(pen=pg.mkPen(color, width=4), name='Battery')
ci += 1
for dev in self.d.pc.devices:
color = [x * 255 for x in palette[ci]]
dev.line = p.plot(pen=pg.mkPen(color, width=4), name=dev.name)
ci += 1
signal.signal(signal.SIGINT, self.sigint_handler)
def sigint_handler(self, signum, frame):
self.app.quit()
def update_line(self, line, sample):
x, y = line.getData()
if y is None:
y = []
else:
y = list(y)
y.append(sample)
if len(y) > 50:
y = y[1:]
line.setData(y)
def _update(self):
self.d.loop()
for tz in self.d.thermal_zones:
if not tz.line:
continue
self.update_line(tz.line, tz.temp_c)
bat = self.d.battery
self.update_line(bat.line, bat.power)
for dev in self.d.pc.devices:
self.update_line(dev.line, dev.power)
def update(self):
try:
self._update()
except Exception:
print('quitting')
self.app.quit()
raise
def safe_timer(self):
from pyqtgraph.Qt import QtCore
self.update()
QtCore.QTimer.singleShot(2000, self.safe_timer)
def run(self):
self.safe_timer()
self.app.exec_()
@click.command()
@click.option('--plot', is_flag=True)
def run(plot):
d = ThermalDaemon()
if plot:
plotter = QTGraphPlotter(d)
plotter.init()
d.init()
if plot:
try:
plotter.run()
finally:
print('restoring')
d.restore()
else:
d.run()
if __name__ == "__main__":
run()
| [
"[email protected]"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.