blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
616
| content_id
stringlengths 40
40
| detected_licenses
sequencelengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 777
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 149
values | src_encoding
stringclasses 26
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 3
10.2M
| extension
stringclasses 188
values | content
stringlengths 3
10.2M
| authors
sequencelengths 1
1
| author_id
stringlengths 1
132
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
4be8a9f91ad8fbc6da7e187636382f69d3e9014f | 3fe272eea1c91cc5719704265eab49534176ff0d | /scripts/field/jett_tuto_7_1.py | 14fea3977f59741c0b2fb4a9b3c317feb3510588 | [
"MIT"
] | permissive | Bratah123/v203.4 | e72be4843828def05592298df44b081515b7ca68 | 9cd3f31fb2ef251de2c5968c75aeebae9c66d37a | refs/heads/master | 2023-02-15T06:15:51.770849 | 2021-01-06T05:45:59 | 2021-01-06T05:45:59 | 316,366,462 | 1 | 0 | MIT | 2020-12-18T17:01:25 | 2020-11-27T00:50:26 | Java | UTF-8 | Python | false | false | 805 | py | # Created by MechAviv
# Map ID :: 620100027
# Spaceship : Spaceship Cockpit
sm.curNodeEventEnd(True)
sm.setTemporarySkillSet(0)
sm.setInGameDirectionMode(True, True, False, False)
sm.forcedInput(1)
sm.sendDelay(30)
sm.forcedInput(0)
sm.sendDelay(1000)
sm.showEffect("Effect/DirectionNewPirate.img/newPirate/balloonMsg1/20", 2000, 0, -100, -2, -2, False, 0)
sm.sendDelay(2000)
sm.spawnMob(9420567, -378, -120, False)
sm.setSpeakerID(9270085)
sm.removeEscapeButton()
sm.setPlayerAsSpeaker()
sm.setSpeakerType(3)
sm.sendNext("You there! Get away from those controls, and drop that key!")
sm.setTemporarySkillSet(0)
sm.setInGameDirectionMode(False, True, False, False)
sm.chatScript("Eliminate the Key Keeper and find the Master Key.")
sm.startQuestNoCheck(5672)
sm.createQuestWithQRValue(5672, "001") | [
"[email protected]"
] | |
68f9c64f9fdba88df8225851e68798225af2346d | 63acfadae1b26e521169191ae441cfdb86817651 | /tests/argparse/special/modules/defaults/__init__.py | 3505b0795f024d936847202220a08964b18c6216 | [
"LicenseRef-scancode-unknown-license-reference",
"MIT"
] | permissive | da-h/miniflask | 4e1e8889623665309b7194948222635275acea34 | e8398bcf2b81e1d1cd8d53d9f4b1125c027552b1 | refs/heads/master | 2023-05-10T19:28:59.669718 | 2023-05-05T12:54:05 | 2023-05-05T12:54:05 | 240,955,157 | 7 | 1 | MIT | 2023-02-28T14:44:33 | 2020-02-16T19:47:49 | Python | UTF-8 | Python | false | false | 573 | py |
def printVal(state, name):
val = state[name]
print("%s:" % state.fuzzy_names[name], val)
def print_all(state):
printVal(state, "var_default")
printVal(state, "var_default_override")
printVal(state, "var_default_override_twice")
printVal(state, "var_default_override_twice_and_cli")
def register(mf):
mf.register_defaults({
"var_default": 1,
"var_default_override": 2,
"var_default_override_twice": 3,
"var_default_override_twice_and_cli": 4
})
mf.register_event('print_all', print_all, unique=False)
| [
"[email protected]"
] | |
bda4bdd71bd5d2eeec3b1942611e2b982c0b36bc | fe3ecb9b1ddd8de17b8cc93209134f86cd9c4a6f | /6_Tensorflow/chap04_Classification/lecture_1x/step05_softmax_classifier.py | 848aca28e11509d6d891b478e7025c2ba6458666 | [] | no_license | nsh92/Bigdata-and-Machine-Learning-Education-at-ITWILL | d1a7292ee4865a3d0c664dd6ecf3afc0d6325847 | 3cb5661001597499178a2c85f4ccf70dcf0855d6 | refs/heads/master | 2022-11-21T23:10:51.421708 | 2020-07-23T12:49:11 | 2020-07-23T12:49:11 | 275,540,204 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,903 | py | # -*- coding: utf-8 -*-
"""
step04_softmax_classifier.py
- 활성함수 : Sotfmax(model)
- 손실함수 : Cross Entropy
"""
import tensorflow.compat.v1 as tf
tf.disable_v2_behavior()
import numpy as np
from sklearn.metrics import accuracy_score
# 1. x, y 공급 data
# [털, 날개]
x_data = np.array(
[[0, 0], [1, 0], [1, 1], [0, 0], [0, 1], [1, 1]]) # [6, 2]
# [기타, 포유류, 조류] : [6, 3] -> one hot encoding : 1과 0으로 표시
y_data = np.array([
[1, 0, 0], # 기타[0]
[0, 1, 0], # 포유류[1]
[0, 0, 1], # 조류[2]
[1, 0, 0],
[1, 0, 0],
[0, 0, 1]
])
# 2. X, Y 변수 정의
X = tf.placeholder(dtype=tf.float32, shape=[None, 2]) # [관측치, 입력수]
Y = tf.placeholder(dtype=tf.float32, shape=[None, 3]) # [관측치, 출력수]
# 3. w, b변수 정의 : 초기값은 난수
w = tf.Variable(tf.random_normal([2,3])) # [입력수, 출력수]
b = tf.Variable(tf.random_normal([3])) # [출력수]
# 4. softmax 분류기
# 1) 회귀방정식 : 예측치
model = tf.matmul(X, w) + b # 회귀모델
# softmax(예측치)
softmax = tf.nn.softmax(model)
# 2) loss function : Cross Entropy 이용 : -sum(Y * log(model))
loss = -tf.reduce_mean(Y * tf.log(softmax) + (1 - Y) * tf.log(1 - softmax))
# 3) optimizer : 오차 최소화(w, b update)
train = tf.train.AdamOptimizer(0.1).minimize(loss) # 오차 최소화
# 4) argmax() : encoding(2진수로된 것) -> decoding(10진수)
y_pred = tf.argmax(softmax, axis = 1)
y_true = tf.argmax(Y, axis = 1)
# 5. 모델 학습
with tf.Session() as sess:
sess.run(tf.global_variables_initializer()) # w b 초기화
feed_data = {X : x_data, Y : y_data}
# 반복학습 : 500ghl
for step in range(500):
_, loss_val = sess.run([train, loss], feed_dict=feed_data)
if (step+1) % 50 == 0:
print("step = {}, loss = {}".format(step+1, loss_val))
# model result
# 결과값이 몇 번 클래스인지 알아야하기에 y_data를 10진수 표시로 바꿔야 함 혹은 y1, y2, y3 이런 식이거나
y_pred_re = sess.run(y_pred, feed_dict = {X : x_data}) # 예측치
y_true_re = sess.run(y_true, feed_dict = {Y : y_data}) # 정답
print("y pred =", y_pred_re)
print("y true =", y_true_re)
acc = accuracy_score(y_true_re, y_pred_re)
print("분류정확도 =", acc)
'''
step = 50, loss = 0.08309727162122726
step = 100, loss = 0.02883036620914936
step = 150, loss = 0.016369767487049103
step = 200, loss = 0.01092542801052332
step = 250, loss = 0.007947643287479877
step = 300, loss = 0.0061068180948495865
step = 350, loss = 0.004874517675489187
step = 400, loss = 0.004001634661108255
step = 450, loss = 0.003356706351041794
step = 500, loss = 0.0028643012046813965
y pred = [0 1 2 0 0 2]
y true = [0 1 2 0 0 2]
분류정확도 = 1.0
'''
| [
"[email protected]"
] | |
32b5982fddad50b11c0b227150e2672caa73c81f | 2f98aa7e5bfc2fc5ef25e4d5cfa1d7802e3a7fae | /python/python_9466.py | 1ba8719a21b95b29fa4d86d410b7cf87c9d36a21 | [] | no_license | AK-1121/code_extraction | cc812b6832b112e3ffcc2bb7eb4237fd85c88c01 | 5297a4a3aab3bb37efa24a89636935da04a1f8b6 | refs/heads/master | 2020-05-23T08:04:11.789141 | 2015-10-22T19:19:40 | 2015-10-22T19:19:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 35 | py | # In [2]: tuple(a[:-1] for a in x)
| [
"[email protected]"
] | |
3da9a6b0aae255dd6bffb0296b6c0b05017d03a9 | 7bf51d5550195a7fae26bd626ed82c5f2b5d9737 | /my_graphs/g_graph_12.py | bbfbc1c98ccd7f78677de75c49227f306cf19173 | [] | no_license | venkatram64/python_ml | 9fcadc380c2a56856f590b40a7c3b015c19c62a0 | b0b40fec4547f3675a500af24878cddacc4c270c | refs/heads/master | 2020-04-08T05:07:28.993601 | 2018-12-26T08:30:28 | 2018-12-26T08:30:28 | 159,047,402 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 550 | py | import matplotlib.pyplot as plt
import matplotlib.animation as animation
from matplotlib import style
style.use('fivethirtyeight')
fig = plt.figure()
ax1 = fig.add_subplot(1,1,1)
def animate(i):
graph_data = open('chart_file.txt', 'r').read()
lines = graph_data.split('\n')
xs = []
ys = []
for line in lines:
if len(line) > 1:
x, y = line.split(',')
xs.append(x)
ys.append(y)
ax1.clear()
ax1.plot(xs, ys)
ani = animation.FuncAnimation(fig, animate, interval=1000)
plt.show() | [
"[email protected]"
] | |
4d8c2d81ee6ea261b59ddb4eb91cb66024778ed6 | ce607df215bd0569a966033092ae3d24f48af714 | /docs/guide/code/awesome-bot-7/awesome/plugins/usage.py | 825a57f9849268c8956a25255a3e924fee1d6840 | [
"MIT"
] | permissive | nonebot/nonebot | fbf926e77329fc129dd51a31cfcb62a8f4aab578 | 4b49289759af0ef774d28fd0ffaed7e1a9e35fbf | refs/heads/master | 2023-08-16T00:01:06.288649 | 2023-06-01T08:44:04 | 2023-06-01T08:44:04 | 75,402,138 | 1,521 | 268 | MIT | 2022-08-17T06:07:19 | 2016-12-02T14:23:43 | Python | UTF-8 | Python | false | false | 714 | py | import nonebot
from nonebot import on_command, CommandSession
@on_command('usage', aliases=['使用帮助', '帮助', '使用方法'])
async def _(session: CommandSession):
# 获取设置了名称的插件列表
plugins = list(filter(lambda p: p.name, nonebot.get_loaded_plugins()))
arg = session.current_arg_text.strip().lower()
if not arg:
# 如果用户没有发送参数,则发送功能列表
await session.send(
'我现在支持的功能有:\n\n' + '\n'.join(p.name for p in plugins))
return
# 如果发了参数则发送相应命令的使用帮助
for p in plugins:
if p.name.lower() == arg:
await session.send(p.usage)
| [
"[email protected]"
] | |
fb72e7fda094f7a1f4ccc7ffd78655c181bfa750 | 40d404e424489bb48c3673dee6664de64dbab39c | /myWebsite/.venv/lib/python3.8/site-packages/zope/site/next.py | 38c06ddc842113ca7eb79f312ea3f6a6a5c3b223 | [] | no_license | alyasamba/me | 07c9f5f27aa16f768e0432780ac8f6f5ab6afbd1 | 978053c867181bad8eb316a0920ba290a7b1ceae | refs/heads/main | 2023-01-28T09:57:46.616285 | 2020-12-02T02:31:25 | 2020-12-02T02:31:25 | 315,935,399 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 103 | py | # BBB. Remove in Version 5.0
from zope.deprecation import moved
moved('zope.component', 'Version 5.0')
| [
"[email protected]"
] | |
8c36bd998975fdb247485f1edf6cec1b02d2fe58 | f5d4863b6a62ef19ffc98e4f94f6ade1bc8810d3 | /Linked List/92_Reverse_Linked_List_II.py | 05f389e0d67f52c991bda6256be9df9eda403617 | [] | no_license | xiaomojie/LeetCode | 138808eb83938f9bd3c2e8a755d908509dff0fd3 | eedf73b5f167025a97f0905d3718b6eab2ee3e09 | refs/heads/master | 2021-06-12T09:26:01.257348 | 2019-10-23T10:41:06 | 2019-10-23T10:41:06 | 76,184,467 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 905 | py | """
Reverse a linked list from position m to n. Do it in one-pass.
Note: 1 ≤ m ≤ n ≤ length of list.
Example:
Input: 1->2->3->4->5->NULL, m = 2, n = 4
Output: 1->4->3->2->5->NULL
"""
# Definition for singly-linked list.
class ListNode(object):
def __init__(self, x):
self.val = x
self.next = None
class Solution:
def reverseBetween(self, head, m, n):
"""
:type head: ListNode
:type m: int
:type n: int
:rtype: ListNode
"""
if not head:
return head
pre = dummy = ListNode(0)
dummy.next = head
for i in range(m-1):
pre = pre.next
start = pre.next
then = start.next
for i in range(m, n):
start.next = then.next
then.next = pre.next
pre.next = then
then = start.next
return dummy.next
| [
"[email protected]"
] | |
36e45f93482d35cb5c9a7c17abd4badb5d472b96 | 32257983a6aa9b6f719ce8835e789c94df8b9346 | /manager/migrations/0001_initial.py | 57184eafc16e89981afeb99c3c784f17cc78d511 | [] | no_license | alireza-shirmohammadi/MySite | 2b165260460ea2a74769b0ceb81e520b26307a64 | 4537069f0ba50ac46f265d0157195f91bac5d853 | refs/heads/master | 2023-07-26T13:50:52.249782 | 2021-01-30T10:55:06 | 2021-01-30T10:55:06 | 303,093,825 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 651 | py | # Generated by Django 2.2 on 2020-10-18 21:00
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Manager',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50)),
('lastname', models.CharField(max_length=50)),
('utext', models.TextField()),
('email', models.TextField(default='')),
],
),
]
| [
"[email protected]"
] | |
9d3aee1939e7056e99df2e8fa0513615f8dad64d | 98c6ea9c884152e8340605a706efefbea6170be5 | /examples/data/Assignment_5/mllgad001/question1.py | f6592b8f10857fa9939c268f718db83ab9bf37c1 | [] | no_license | MrHamdulay/csc3-capstone | 479d659e1dcd28040e83ebd9e3374d0ccc0c6817 | 6f0fa0fa1555ceb1b0fb33f25e9694e68b6a53d2 | refs/heads/master | 2021-03-12T21:55:57.781339 | 2014-09-22T02:22:22 | 2014-09-22T02:22:22 | 22,372,174 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,801 | py | # program to simulate a simple Bulletin Board System
# Gadija Moollagie
# 14 April 2014
def displayName(): # defines the menu that will be displayed every time
print("Welcome to UCT BBS")
print("MENU")
print("(E)nter a message")
print("(V)iew message")
print("(L)ist files")
print("(D)isplay file")
print("e(X)it")
def main():
while True: # continues through the loop body as long as expression remains true
displayName() # displays this through every iteration
selection = input("Enter your selection:\n") # displays this through every iteration
selection = selection.upper()
if selection == 'E':
message = input("Enter the message:\n")
elif selection == 'V':
try:
print("The message is:", message)
except(NameError):
print("The message is: no message yet")
# exception raised if there is a NameError and variable cannot be found
elif selection == 'L':
print("List of files: 42.txt, 1015.txt")
elif selection == 'D':
fileName = input("Enter the filename:\n")
if fileName == '42.txt':
print("The meaning of life is blah blah blah ...")
elif fileName == '1015.txt':
print("Computer Science class notes ... simplified")
print("Do all work")
print("Pass course")
print("Be happy")
else: # if fileName is not in list
print("File not found")
elif selection == 'X':
print("Goodbye!")
break # breaks out of loop
main() | [
"[email protected]"
] | |
71987f9861827fdaa4915ab742137953ec85ddef | 5baf3cb8b08dcea2d53d2ef022e5c6d4b2468494 | /test/test_io_k8s_api_apps_v1_deployment_strategy.py | f56bb807af8c59d37eff53eb5135e7e72f41d20d | [] | no_license | atengler/swagger-kqueen-python | a4fc0de38378a08c6c2e0c339032ed4ad63f09f5 | 01225c74a743636483211f0274f772193517ffaf | refs/heads/master | 2021-08-07T18:16:28.453730 | 2017-11-08T17:24:53 | 2017-11-08T17:29:03 | 110,007,477 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,040 | py | # coding: utf-8
"""
Kubernetes Queen API
A simple API to interact with Kubernetes clusters
OpenAPI spec version: 0.8
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import os
import sys
import unittest
import swagger_client
from swagger_client.rest import ApiException
from swagger_client.models.io_k8s_api_apps_v1_deployment_strategy import IoK8sApiAppsV1DeploymentStrategy
class TestIoK8sApiAppsV1DeploymentStrategy(unittest.TestCase):
""" IoK8sApiAppsV1DeploymentStrategy unit test stubs """
def setUp(self):
pass
def tearDown(self):
pass
def testIoK8sApiAppsV1DeploymentStrategy(self):
"""
Test IoK8sApiAppsV1DeploymentStrategy
"""
# FIXME: construct object with mandatory attributes with example values
#model = swagger_client.models.io_k8s_api_apps_v1_deployment_strategy.IoK8sApiAppsV1DeploymentStrategy()
pass
if __name__ == '__main__':
unittest.main()
| [
"[email protected]"
] | |
5c6074b51caf0a45af3c5c46d88fbaaddb811393 | e1d6de1fb5ce02907df8fa4d4e17e61d98e8727d | /search2/lagou_query_range.py | ccacf67d6ac1eb4513901b1aca0d24fbce30db0d | [] | no_license | neuroph12/nlpy | 3f3d1a8653a832d6230cb565428ee0c77ef7451d | 095976d144dacf07414bf7ee42b811eaa67326c1 | refs/heads/master | 2020-09-16T08:24:37.381353 | 2016-09-10T19:24:05 | 2016-09-10T19:24:10 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 881 | py | # coding=utf-8
import datetime
from whoosh.index import open_dir
from whoosh.qparser import MultifieldParser
idx_dir = 'lagou_idx'
ix = open_dir(idx_dir)
searcher = ix.searcher()
parser = MultifieldParser(["name", "com_name"], schema=ix.schema)
# Single field parser.
k = u'自然 语言 自然语言 处理 salary_from:[1 TO 5000] salary_to:[ TO 5000] city:上海'
q = parser.parse(k)
today = datetime.datetime.now()
date_to = today
date_from = today + datetime.timedelta(days=-7)
print date_to.strftime('%Y%m%d')
print date_from.strftime('%Y%m%d')
results = searcher.search_page(q, 1, pagelen=30)
print(u'{0} results found for keyword {1}, {2} returned: '.format(len(results), k, results.scored_length()))
for hit in results[:50]:
print(hit['id'])
print(hit['name'])
print(hit['salary_from'], hit['salary_to'])
print(hit['date'])
print('************')
| [
"[email protected]"
] | |
840e123871abe4d70ae652e6987bb7fc4d6070e5 | f6078890ba792d5734d289d7a0b1d429d945a03a | /hw1/submissions/reyesortegacynthia/reyesortegacynthia_35890_1251163_homework 2.py | cb61aa6481078c83a83132baf98896513680dbad | [] | no_license | huazhige/EART119_Lab | 1c3d0b986a0f59727ee4ce11ded1bc7a87f5b7c0 | 47931d6f6a2c7bc053cd15cef662eb2f2027712c | refs/heads/master | 2020-05-04T23:40:53.709217 | 2019-06-11T18:30:45 | 2019-06-11T18:30:45 | 179,552,067 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 210 | py | import matplotlib from matplotlib
import numpy as np
V_1 = (1,1)
V_2 = (3,1)
V_3 = (4,2)
V_4 = (3.5,5)
V_5 = (2,4)
A = area_of_polygon
#get number x or y in each v
#set area of ploygon as x_1*y_1
| [
"[email protected]"
] | |
8ec98c5c953c24f01a12563fcd3198185f008a6e | 56014da6ebc817dcb3b7a136df8b11cf9f976d93 | /Django天天生鲜项目/14.1-注册基本逻辑.py | 939067bb95629536cb0337f2ca1fc38b2e11d8ce | [] | no_license | sunday2146/notes-python | 52b2441c981c1106e70a94b999e986999334239a | e19d2aee1aa9433598ac3c0a2a73b0c1e8fa6dc2 | refs/heads/master | 2022-01-12T22:55:45.401326 | 2019-01-18T03:18:26 | 2019-01-18T03:18:26 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,191 | py | """
1.将static静态文件添加到static文件夹中----为前端制作完成
2.将register.html文件放入templates文件夹下
3.views.py
def register(request):
""显示注册页面""
return render(request,'register.html')
4.配置url
from user import views
urlpatterns = [
url(r'^register$',views.register,name = 'register'),#注册
5.无法显示全部页面,须在register.html中增加
{% load staticfiles %}
并修改静态文件目录
{% load staticfiles %}
<head>
<meta http-equiv="Content-Type" content="text/html;charset=UTF-8">
<title>天天生鲜-注册</title> #修改处
<link rel="stylesheet" type="text/css" href="{% static 'css/reset.css' %} ">
<link rel="stylesheet" type="text/css" href="{% static 'css/main.css' %}">
<script type="text/javascript" src="{% static 'js/jquery-1.12.4.min.js'%}"></script>
<script type="text/javascript" src="{% static 'js/register.js'%}"></script>
</head>
<body>
<div class="register_con">
<div class="l_con fl"> ##修改处
<a class="reg_logo"><img src="{% static 'images/logo02.png'%}"></a>
6.
<form method = "post" action = "user/register_handle">
{%csrf_token%}
""" | [
"[email protected]"
] | |
cb398291bbb438e6bf0220616db35cda393e4eb2 | 254ef44b90485767a3aea8cbe77dc6bf77dddaeb | /589N叉树的前序遍历.py | 767e3339554f17bf5d43e7c275cfe267c364da5a | [] | no_license | XinZhaoFu/leetcode_moyu | fae00d52a52c090901021717df87b78d78192bdb | e80489923c60ed716d54c1bdeaaf52133d4e1209 | refs/heads/main | 2023-06-19T02:50:05.256149 | 2021-07-09T00:50:41 | 2021-07-09T00:50:41 | 331,243,022 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 676 | py | """
给定一个 N 叉树,返回其节点值的前序遍历。
例如,给定一个 3叉树 :
返回其前序遍历: [1,3,5,6,2,4]。
"""
"""
# Definition for a Node.
class Node(object):
def __init__(self, val=None, children=None):
self.val = val
self.children = children
"""
class Solution(object):
def preorder(self, root):
"""
:type root: Node
:rtype: List[int]
"""
self.res = []
def dfs(node):
if not node:
return 0
self.res.append(node.val)
for child in node.children:
dfs(child)
dfs(root)
return self.res
| [
"[email protected]"
] | |
f597844ce432d96d2774092744d6b634d89b6c85 | 45e376ae66b78b17788b1d3575b334b2cb1d0b1c | /tests/terraform/checks/resource/aws/test_CloudfrontDistributionDefaultRoot.py | d8d77e5bbb716f2cb97de4f4c46442fb83f1cd30 | [
"Apache-2.0"
] | permissive | bridgecrewio/checkov | aeb8febed2ed90e61d5755f8f9d80b125362644d | e64cbd27ffb6f09c2c9f081b45b7a821a3aa1a4d | refs/heads/main | 2023-08-31T06:57:21.990147 | 2023-08-30T23:01:47 | 2023-08-30T23:01:47 | 224,386,599 | 5,929 | 1,056 | Apache-2.0 | 2023-09-14T20:10:23 | 2019-11-27T08:55:14 | Python | UTF-8 | Python | false | false | 1,337 | py | import os
import unittest
from checkov.runner_filter import RunnerFilter
from checkov.terraform.checks.resource.aws.CloudfrontDistributionDefaultRoot import check
from checkov.terraform.runner import Runner
class TestCloudfrontDistributionDefaultRoot(unittest.TestCase):
def test(self):
runner = Runner()
current_dir = os.path.dirname(os.path.realpath(__file__))
test_files_dir = current_dir + "/example_CloudfrontDistributionDefaultRoot"
report = runner.run(root_folder=test_files_dir, runner_filter=RunnerFilter(checks=[check.id]))
summary = report.get_summary()
passing_resources = {
"aws_cloudfront_distribution.pass",
}
failing_resources = {
"aws_cloudfront_distribution.fail",
}
passed_check_resources = set([c.resource for c in report.passed_checks])
failed_check_resources = set([c.resource for c in report.failed_checks])
self.assertEqual(summary["passed"], 1)
self.assertEqual(summary["failed"], 1)
self.assertEqual(summary["skipped"], 0)
self.assertEqual(summary["parsing_errors"], 0)
self.assertEqual(passing_resources, passed_check_resources)
self.assertEqual(failing_resources, failed_check_resources)
if __name__ == "__main__":
unittest.main()
| [
"[email protected]"
] | |
16c4945708dd2b7588e59f769cc9178b68bbeade | f016dd6fd77bb2b135636f904748dbbab117d78b | /L07/视频笔记/4.4继承.py | c73e1dd32f1d67db92adfd7ce88c54ac9f2b604e | [
"Apache-2.0"
] | permissive | w7374520/Coursepy | b3eddfbeeb475ce213b6f627d24547a1d36909d8 | ac13f8c87b4c503135da51ad84c35c745345df20 | refs/heads/master | 2020-04-26T23:57:42.882813 | 2018-05-24T07:54:13 | 2018-05-24T07:54:13 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,539 | py | #!/usr/bin/env python
# -*- coding:utf-8 -*-
#Author:xp
#blog_url: http://blog.csdn.net/wuxingpu5/article/details/71209731
class Parent:
def foo(self):
print('from Parent.foo')
self.bar()
def bar(self):
print('From parent.bar')
class Sub(Parent):
def __init__(self):
pass
#self.bar=123 报错可以看出优先调用了自己的
def bar(self):
print('From Sub.bar')
s=Sub()
s.foo()
'''
执行结果
from Parent.foo
From Sub.bar
'''
#是否是示例 结果都为True
print(isinstance(s,Sub))
print(isinstance(s,Parent))
#继承反映的是一种什么是什么的关系
#--------------------------------------------------------------
#组合可以解决代码冗余问题 但是组合反映的是什么有什么的关系
class Date:
def __init__(self,year,mon,day):
self.year=year
self.mon=mon
self.day=day
def tell(self):
print('%s-%s-%s'%(self.year,self.mon,self.day))
class People:
def __init__(self,name,age,sex):
self.name=name
self.age=age
self.sex=sex
class Teacher(People):
def __init__(self,name,age,sex,salary,year,mon,day):
People.__init__(self,name,age,sex)
self.salary=salary
self.birth=Date(year,mon,day) #类的组合 老师有生日
class Student(People):
def __init__(self,name,age,sex,year,mon,day):
People.__init__(self,name,age,sex)
self.birth=Date(year,age,sex)
t=Teacher('eg',18,'male',3000,1954,12,21)
t.birth.tell() | [
"[email protected]"
] | |
e0a0021b739a598d5e7da40233741d987a92d645 | 1040b320168c49e3fd784d93ff30923527582d26 | /calm/dsl/api/quotas.py | 172ca96fefdcfea68cac18e809db25579b1930c6 | [
"Apache-2.0"
] | permissive | nutanix/calm-dsl | 87eb8a82f202ec0c71b5c8d8fe49db29bdcf2cfc | 56c52702cec4370f551785508d284e5cbe1a744a | refs/heads/master | 2023-08-31T16:43:51.009235 | 2023-08-28T05:20:41 | 2023-08-28T05:20:41 | 227,190,868 | 41 | 59 | Apache-2.0 | 2023-08-28T05:20:43 | 2019-12-10T18:38:58 | Python | UTF-8 | Python | false | false | 1,363 | py | from .resource import ResourceAPI
from .connection import REQUEST
class QuotasAPI(ResourceAPI):
def __init__(self, connection):
super().__init__(connection, resource_type="quotas", calm_api=True)
self.CREATE = self.PREFIX
self.UPDATE_STATE = self.PREFIX + "/update/state"
self.LIST = self.PREFIX + "/list"
self.UPDATE = self.PREFIX + "/{}"
def update_state(self, payload):
return self.connection._call(
self.UPDATE_STATE,
verify=False,
request_json=payload,
method=REQUEST.METHOD.PUT,
timeout=(5, 300),
)
def list(self, payload):
return self.connection._call(
self.LIST,
verify=False,
request_json=payload,
method=REQUEST.METHOD.POST,
timeout=(5, 300),
)
def create(self, payload):
return self.connection._call(
self.CREATE,
verify=False,
request_json=payload,
method=REQUEST.METHOD.POST,
timeout=(5, 300),
)
def update(self, payload, quota_uuid):
return self.connection._call(
self.UPDATE.format(quota_uuid),
verify=False,
request_json=payload,
method=REQUEST.METHOD.PUT,
timeout=(5, 300),
)
| [
"[email protected]"
] | |
2a730c0c977de022c76c2c650b814ec8d6a8a287 | bafb1c203362a9711f783115c7c573fdcd00a3d4 | /venv/Lib/site-packages/kivy/uix/splitter.py | 4a4f4150266aa94cc92fc95a9cbb437f991b1178 | [] | no_license | santokalayil/kivy_android_test_project | 0c41c40f6c8869767729cd153f4ce31ac09c0f1c | a4283ba4f4ca8961b2689ee7150297349aedb897 | refs/heads/main | 2023-04-14T08:18:40.453585 | 2021-04-27T19:15:21 | 2021-04-27T19:15:21 | 362,220,623 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 14,352 | py | '''Splitter
======
.. versionadded:: 1.5.0
.. image:: images/splitter.jpg
:align: right
The :class:`Splitter` is a widget that helps you re-size its child
widget/layout by letting you re-size it via dragging the boundary or
double tapping the boundary. This widget is similar to the
:class:`~kivy.uix.scrollview.ScrollView` in that it allows only one
child widget.
Usage::
splitter = Splitter(sizable_from = 'right')
splitter.add_widget(layout_or_widget_instance)
splitter.min_size = 100
splitter.max_size = 250
To change the size of the strip/border used for resizing::
splitter.strip_size = '10pt'
To change its appearance::
splitter.strip_cls = your_custom_class
You can also change the appearance of the `strip_cls`, which defaults to
:class:`SplitterStrip`, by overriding the `kv` rule in your app:
.. code-block:: kv
<SplitterStrip>:
horizontal: True if self.parent and self.parent.sizable_from[0] \
in ('t', 'b') else False
background_normal: 'path to normal horizontal image' \
if self.horizontal else 'path to vertical normal image'
background_down: 'path to pressed horizontal image' \
if self.horizontal else 'path to vertical pressed image'
'''
__all__ = ('Splitter', )
from kivy.compat import string_types
from kivy.factory import Factory
from kivy.uix.button import Button
from kivy.properties import (OptionProperty, NumericProperty, ObjectProperty,
ListProperty, BooleanProperty)
from kivy.uix.boxlayout import BoxLayout
class SplitterStrip(Button):
'''Class used for tbe graphical representation of a
:class:`kivy.uix.splitter.SplitterStripe`.
'''
pass
class Splitter(BoxLayout):
'''See module documentation.
:Events:
`on_press`:
Fired when the splitter is pressed.
`on_release`:
Fired when the splitter is released.
.. versionchanged:: 1.6.0
Added `on_press` and `on_release` events.
'''
border = ListProperty([4, 4, 4, 4])
'''Border used for the
:class:`~kivy.graphics.vertex_instructions.BorderImage`
graphics instruction.
This must be a list of four values: (bottom, right, top, left).
Read the BorderImage instructions for more information about how
to use it.
:attr:`border` is a :class:`~kivy.properties.ListProperty` and
defaults to (4, 4, 4, 4).
'''
strip_cls = ObjectProperty(SplitterStrip)
'''Specifies the class of the resize Strip.
:attr:`strip_cls` is an :class:`kivy.properties.ObjectProperty` and
defaults to :class:`~kivy.uix.splitter.SplitterStrip`, which is of type
:class:`~kivy.uix.button.Button`.
.. versionchanged:: 1.8.0
If you set a string, the :class:`~kivy.factory.Factory` will be used to
resolve the class.
'''
sizable_from = OptionProperty('left', options=(
'left', 'right', 'top', 'bottom'))
'''Specifies whether the widget is resizable. Options are:
`left`, `right`, `top` or `bottom`
:attr:`sizable_from` is an :class:`~kivy.properties.OptionProperty`
and defaults to `left`.
'''
strip_size = NumericProperty('10pt')
'''Specifies the size of resize strip
:attr:`strp_size` is a :class:`~kivy.properties.NumericProperty`
defaults to `10pt`
'''
min_size = NumericProperty('100pt')
'''Specifies the minimum size beyond which the widget is not resizable.
:attr:`min_size` is a :class:`~kivy.properties.NumericProperty` and
defaults to `100pt`.
'''
max_size = NumericProperty('500pt')
'''Specifies the maximum size beyond which the widget is not resizable.
:attr:`max_size` is a :class:`~kivy.properties.NumericProperty`
and defaults to `500pt`.
'''
_parent_proportion = NumericProperty(0.)
'''(internal) Specifies the distance that the slider has travelled
across its parent, used to automatically maintain a sensible
position if the parent is resized.
:attr:`_parent_proportion` is a
:class:`~kivy.properties.NumericProperty` and defaults to 0.
.. versionadded:: 1.9.0
'''
_bound_parent = ObjectProperty(None, allownone=True)
'''(internal) References the widget whose size is currently being
tracked by :attr:`_parent_proportion`.
:attr:`_bound_parent` is a
:class:`~kivy.properties.ObjectProperty` and defaults to None.
.. versionadded:: 1.9.0
'''
keep_within_parent = BooleanProperty(False)
'''If True, will limit the splitter to stay within its parent widget.
:attr:`keep_within_parent` is a
:class:`~kivy.properties.BooleanProperty` and defaults to False.
.. versionadded:: 1.9.0
'''
rescale_with_parent = BooleanProperty(False)
'''If True, will automatically change size to take up the same
proportion of the parent widget when it is resized, while
staying within :attr:`min_size` and :attr:`max_size`. As long as
these attributes can be satisfied, this stops the
:class:`Splitter` from exceeding the parent size during rescaling.
:attr:`rescale_with_parent` is a
:class:`~kivy.properties.BooleanProperty` and defaults to False.
.. versionadded:: 1.9.0
'''
__events__ = ('on_press', 'on_release')
def __init__(self, **kwargs):
self._container = None
self._strip = None
super(Splitter, self).__init__(**kwargs)
do_size = self._do_size
fbind = self.fbind
fbind('max_size', do_size)
fbind('min_size', do_size)
fbind('parent', self._rebind_parent)
def on_sizable_from(self, instance, sizable_from):
if not instance._container:
return
sup = super(Splitter, instance)
_strp = instance._strip
if _strp:
# remove any previous binds
_strp.unbind(on_touch_down=instance.strip_down)
_strp.unbind(on_touch_move=instance.strip_move)
_strp.unbind(on_touch_up=instance.strip_up)
self.unbind(disabled=_strp.setter('disabled'))
sup.remove_widget(instance._strip)
else:
cls = instance.strip_cls
if isinstance(cls, string_types):
cls = Factory.get(cls)
instance._strip = _strp = cls()
sz_frm = instance.sizable_from[0]
if sz_frm in ('l', 'r'):
_strp.size_hint = None, 1
_strp.width = instance.strip_size
instance.orientation = 'horizontal'
instance.unbind(strip_size=_strp.setter('width'))
instance.bind(strip_size=_strp.setter('width'))
else:
_strp.size_hint = 1, None
_strp.height = instance.strip_size
instance.orientation = 'vertical'
instance.unbind(strip_size=_strp.setter('height'))
instance.bind(strip_size=_strp.setter('height'))
index = 1
if sz_frm in ('r', 'b'):
index = 0
sup.add_widget(_strp, index)
_strp.bind(on_touch_down=instance.strip_down)
_strp.bind(on_touch_move=instance.strip_move)
_strp.bind(on_touch_up=instance.strip_up)
_strp.disabled = self.disabled
self.bind(disabled=_strp.setter('disabled'))
def add_widget(self, widget, index=0):
if self._container or not widget:
return Exception('Splitter accepts only one Child')
self._container = widget
sz_frm = self.sizable_from[0]
if sz_frm in ('l', 'r'):
widget.size_hint_x = 1
else:
widget.size_hint_y = 1
index = 0
if sz_frm in ('r', 'b'):
index = 1
super(Splitter, self).add_widget(widget, index)
self.on_sizable_from(self, self.sizable_from)
def remove_widget(self, widget, *largs):
super(Splitter, self).remove_widget(widget)
if widget == self._container:
self._container = None
def clear_widgets(self):
self.remove_widget(self._container)
def strip_down(self, instance, touch):
if not instance.collide_point(*touch.pos):
return False
touch.grab(self)
self.dispatch('on_press')
def on_press(self):
pass
def _rebind_parent(self, instance, new_parent):
if self._bound_parent is not None:
self._bound_parent.unbind(size=self.rescale_parent_proportion)
if self.parent is not None:
new_parent.bind(size=self.rescale_parent_proportion)
self._bound_parent = new_parent
self.rescale_parent_proportion()
def rescale_parent_proportion(self, *args):
if not self.parent:
return
if self.rescale_with_parent:
parent_proportion = self._parent_proportion
if self.sizable_from in ('top', 'bottom'):
new_height = parent_proportion * self.parent.height
self.height = max(self.min_size,
min(new_height, self.max_size))
else:
new_width = parent_proportion * self.parent.width
self.width = max(self.min_size, min(new_width, self.max_size))
def _do_size(self, instance, value):
if self.sizable_from[0] in ('l', 'r'):
self.width = max(self.min_size, min(self.width, self.max_size))
else:
self.height = max(self.min_size, min(self.height, self.max_size))
@staticmethod
def _is_moving(sz_frm, diff, pos, minpos, maxpos):
if sz_frm in ('l', 'b'):
cmp = minpos
else:
cmp = maxpos
if diff == 0:
return False
elif diff > 0 and pos <= cmp:
return False
elif diff < 0 and pos >= cmp:
return False
return True
def strip_move(self, instance, touch):
if touch.grab_current is not instance:
return False
max_size = self.max_size
min_size = self.min_size
sz_frm = self.sizable_from[0]
if sz_frm in ('t', 'b'):
diff_y = (touch.dy)
self_y = self.y
self_top = self.top
if not self._is_moving(sz_frm, diff_y, touch.y, self_y, self_top):
return
if self.keep_within_parent:
if sz_frm == 't' and (self_top + diff_y) > self.parent.top:
diff_y = self.parent.top - self_top
elif sz_frm == 'b' and (self_y + diff_y) < self.parent.y:
diff_y = self.parent.y - self_y
if sz_frm == 'b':
diff_y *= -1
if self.size_hint_y:
self.size_hint_y = None
if self.height > 0:
self.height += diff_y
else:
self.height = 1
height = self.height
self.height = max(min_size, min(height, max_size))
self._parent_proportion = self.height / self.parent.height
else:
diff_x = (touch.dx)
self_x = self.x
self_right = self.right
if not self._is_moving(sz_frm, diff_x, touch.x, self_x, self_right):
return
if self.keep_within_parent:
if sz_frm == 'l' and (self_x + diff_x) < self.parent.x:
diff_x = self.parent.x - self_x
elif (sz_frm == 'r' and
(self_right + diff_x) > self.parent.right):
diff_x = self.parent.right - self_right
if sz_frm == 'l':
diff_x *= -1
if self.size_hint_x:
self.size_hint_x = None
if self.width > 0:
self.width += diff_x
else:
self.width = 1
width = self.width
self.width = max(min_size, min(width, max_size))
self._parent_proportion = self.width / self.parent.width
def strip_up(self, instance, touch):
if touch.grab_current is not instance:
return
if touch.is_double_tap:
max_size = self.max_size
min_size = self.min_size
sz_frm = self.sizable_from[0]
s = self.size
if sz_frm in ('t', 'b'):
if self.size_hint_y:
self.size_hint_y = None
if s[1] - min_size <= max_size - s[1]:
self.height = max_size
else:
self.height = min_size
else:
if self.size_hint_x:
self.size_hint_x = None
if s[0] - min_size <= max_size - s[0]:
self.width = max_size
else:
self.width = min_size
touch.ungrab(instance)
self.dispatch('on_release')
def on_release(self):
pass
if __name__ == '__main__':
from kivy.app import App
from kivy.uix.button import Button
from kivy.uix.floatlayout import FloatLayout
class SplitterApp(App):
def build(self):
root = FloatLayout()
bx = BoxLayout()
bx.add_widget(Button())
bx.add_widget(Button())
bx2 = BoxLayout()
bx2.add_widget(Button())
bx2.add_widget(Button())
bx2.add_widget(Button())
spl = Splitter(
size_hint=(1, .25),
pos_hint={'top': 1},
sizable_from='bottom')
spl1 = Splitter(
sizable_from='left',
size_hint=(None, 1), width=90)
spl1.add_widget(Button())
bx.add_widget(spl1)
spl.add_widget(bx)
spl2 = Splitter(size_hint=(.25, 1))
spl2.add_widget(bx2)
spl2.sizable_from = 'right'
root.add_widget(spl)
root.add_widget(spl2)
return root
SplitterApp().run()
| [
"[email protected]"
] | |
9362bfbe15c850ef4711769abd97cb5b1a358e37 | f023692f73992354a0b7823d9c49ae730c95ab52 | /AtCoderBeginnerContest/1XX/107/D_pypy.py | 0cdd99bfb61357c3953e409610ae3e254b7b3a46 | [] | no_license | corutopi/AtCorder_python | a959e733f9a3549fab7162023e414ac2c99c4abe | a2c78cc647076071549e354c398155a65d5e331a | refs/heads/master | 2023-08-31T09:40:35.929155 | 2023-08-20T06:19:35 | 2023-08-20T06:19:35 | 197,030,129 | 1 | 0 | null | 2022-06-22T04:06:28 | 2019-07-15T15:57:34 | Python | UTF-8 | Python | false | false | 2,636 | py | # 解説を参考に作成
# https://qiita.com/DaikiSuyama/items/7295f5160a51684554a7
# https://algo-logic.info/binary-indexed-tree/
# import sys
# sys.setrecursionlimit(10 ** 6)
# import bisect
# from collections import deque
class BinaryIndexedTree:
"""
l = [1, 2, 3, 4, 5, 6, 7, 8] のlistを例とした場合、
以下のような範囲での演算結果(sum)を配列に持つ。
1: [1, 2, 3, 4, 5, 6, 7, 8]
2: [1, 2, 3, 4]
3: [1, 2] [5, 6]
4: [1] [3] [5] [7]
1 ~ r までの結果S(r)を、各層で必要な演算済みのデータを使うことで log(N) で計算できる.
l ~ r までの結果は S(r) - S(l - 1) で同じくlog(N)計算できる.
データ構造の作成は N*log(N).
配列データは1始まりとして計算.
"""
def __init__(self, n):
"""
:param n: num of date.
"""
self.num = n
self.tree = [0] * (n + 1)
def add(self, k, x):
"""
:param k: [1, self.num]
:param x: add num.
:return: None
"""
while k <= self.num:
self.tree[k] += x
k += k & -k
def sum(self, k):
"""
1 ~ k までの合計
:param k:
:return:
"""
re = 0
while k > 0:
re += self.tree[k]
k -= k & -k
return re
def sum_lr(self, l, r):
"""
sum of form l to r
:param l: 1 <= l <= r
:param r: l <= r <= self.num
:return:
"""
return self.sum(r) - self.sum(l - 1)
# from decorator import stop_watch
#
#
# @stop_watch
def solve(N, A):
import math
sorted_A = sorted(A)
ok = 0
ng = len(sorted_A)
middle = math.ceil((N * (N + 1)) // 2 / 2)
while abs(ok - ng) > 1:
mid = (ok + ng) // 2
tmp_A = [1 if a >= sorted_A[mid] else -1 for a in A]
sum_A = [0]
for a in tmp_A:
sum_A.append(sum_A[-1] + a)
tmp_m = min(sum_A)
up_A = [a - tmp_m + 1 for a in sum_A]
bit = BinaryIndexedTree(max(up_A))
over = 0
bit.add(up_A[0], 1)
for a in up_A[1:]:
over += bit.sum(a)
bit.add(a, 1)
if over >= middle:
ok = mid
else:
ng = mid
print(sorted_A[ok])
if __name__ == '__main__':
N = int(input())
A = [int(i) for i in input().split()]
solve(N, A)
# # test
# from random import randint
# from func import random_str
# N = 10 ** 5
# A = [randint(1, 10 ** 9) for _ in range(N)]
# solve(N, A)
| [
"[email protected]"
] | |
e1e69599554fc2a6265bebc374c81fc5ce30e2c7 | 25ebc03b92df764ff0a6c70c14c2848a49fe1b0b | /daily/20171208/tinyparse/src/00.py | 67a7bccef96e3d12ce568646dbc86e3a7ecbac8c | [] | no_license | podhmo/individual-sandbox | 18db414fafd061568d0d5e993b8f8069867dfcfb | cafee43b4cf51a321f4e2c3f9949ac53eece4b15 | refs/heads/master | 2023-07-23T07:06:57.944539 | 2023-07-09T11:45:53 | 2023-07-09T11:45:53 | 61,940,197 | 6 | 0 | null | 2022-10-19T05:01:17 | 2016-06-25T11:27:04 | Python | UTF-8 | Python | false | false | 67 | py | def f(x):
return g(x) # error
def g(x, y):
return g + y
| [
"[email protected]"
] | |
28c91f379d6f7e52de4f2d24f3ab989eb31b410d | c71b3aa6091d3cc0469198e64cd394fa9dae1817 | /setup.py | f108ef64003f3065d39aab473ffa5fb5a93686e0 | [
"MIT"
] | permissive | offermann/elizabeth | 8a30f65f93aee244437de4cd42084cb29607c724 | 3b89512e566895846136809e571abf50b93c5312 | refs/heads/master | 2020-01-23T21:40:46.324687 | 2016-11-23T07:21:03 | 2016-11-23T07:21:03 | 74,689,354 | 0 | 1 | null | 2016-11-24T16:27:52 | 2016-11-24T16:27:52 | null | UTF-8 | Python | false | false | 1,385 | py | from distutils.core import setup
import elizabeth
setup(
name='elizabeth',
version=elizabeth.__version__,
packages=['elizabeth'],
keywords=['fake', 'data', 'testing',
'generate', 'faker', 'elizabeth',
'bootstrap', 'db', 'generic',
'church', 'dummy'
],
package_data={
'elizabeth': [
'data/*/*',
]
},
url='https://github.com/lk-geimfari/elizabeth',
license='MIT',
author=elizabeth.__author__,
author_email='[email protected]',
description='Elizabeth is a library that help you generate fake data.',
long_description="Elizabeth (ex Church) is a library to generate fake data."
"It's very useful when you need to bootstrap "
"your database.",
classifiers=[
"Development Status :: 4 - Beta",
'Intended Audience :: Developers',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'License :: OSI Approved :: MIT License',
'Intended Audience :: Developers',
'Operating System :: OS Independent',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Software Development',
'Topic :: Software Development :: Testing',
],
)
| [
"[email protected]"
] | |
2db441df96d69d4eda30cd22d74ee1b62a47205e | 9680c27718346be69cf7695dba674e7a0ec662ca | /Pattern-Python/A shape.py | ad545c440e124919825e93bc861239a679658909 | [] | no_license | Md-Monirul-Islam/Python-code | 5a2cdbe7cd3dae94aa63298b5b0ef7e0e31cd298 | df98f37dd9d21784a65c8bb0e46d47a646259110 | refs/heads/main | 2023-01-19T05:15:04.963904 | 2020-11-19T06:10:09 | 2020-11-19T06:10:09 | 314,145,135 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 218 | py | for row in range(7):
for col in range(5):
if ((col==0 or col==4) and row!=0) or ((row==0 or row==3) and (col>0 and col<4)):
print('*',end='')
else:
print(end=' ')
print() | [
"[email protected]"
] | |
687e72bc28d39ba17e95104a1d67e294b817f16e | 92993cff825da80a8ff601572a0c52b0b7d3cbde | /algorithms/Svm/ADMM/L1/ADMM_L1_m162.py | 47a9f8d3349521c04157398120ef42791fb5ac66 | [] | no_license | yingzhuoy/MRs-of-linear-models | 06e8b1f84b08c6aa77553813824cf35c1806c5a7 | c3df8299e039a12613f2022b370b8c3e9c2dd822 | refs/heads/master | 2023-04-07T23:09:37.736952 | 2021-04-04T05:33:37 | 2021-04-04T05:33:37 | 265,124,549 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,222 | py | import numpy as np
from numpy import linalg
#import cvxopt
#from cvxopt import matrix,solvers
#import scipy.sparse.linalg
from algorithms.clf import Clf
"""
Preconditioned Conjugate Gradient Method
"""
def precond(M, r):
q = M * r
return q
def inner_prod(A, B):
A = np.matrix(A)
B = np.matrix(B)
return np.dot(A.reshape(-1,1).T, B.reshape(-1,1))
def cg(A, b, x=None, tol=1.0e-6, max_iter=128):
# precondition
A = np.matrix(A)
b = np.matrix(b)
normb = np.linalg.norm(b, 'fro')
m = b.shape[0]
M = np.eye(m)
x = np.zeros((m, m))
Aq = (A*x)
r = b - Aq # m x m
q = precond(M, r) # m x m
tau_old = np.linalg.norm(q, 'fro')
rho_old = inner_prod(r, q)
theta_old = 0
Ad = np.zeros((m, m))
d = np.zeros((m, m))
res = r.reshape(m, m)
tiny = 1e-30
for i in range(max_iter):
Aq = A * q
sigma = inner_prod(q, Aq)
if abs(sigma.item()) < tiny:
break
else:
alpha = rho_old / sigma;
alpha = alpha.item()
r = r - alpha * Aq
r = r.reshape(m, m)
u = precond(M, r)
theta = np.linalg.norm(u,'fro')/tau_old
c = 1 / np.sqrt(1+theta*theta)
tau = tau_old * theta * c
gam = c*c*theta_old*theta_old
eta = c*c*alpha
d = gam * d + eta * q
x = x + d
# stop
Ad = gam*Ad+eta*Aq
res = res - Ad
if np.linalg.norm(res, 'fro') < tol*normb:
break
else:
rho = inner_prod(r, u)
beta = rho / rho_old
beta = beta.item()
q = u + beta * q
rho_old = rho
tau_old = tau
theta_old = theta
return x
def admm(X, y, max_iter=3000):
# solve by inner point method
m, n = X.shape
X = np.column_stack((X, np.ones((m, 1))))
y = y.astype(np.float64)
data_num = len(y)
C = 1.0
kernel = np.dot(X, np.transpose(X))
p = np.matrix(np.multiply(kernel,np.outer(y, y)))
e = np.matrix(np.ones([data_num, 1], np.float64))
bounds = (0, C)
low, up = bounds
x = np.ones((m,1))
tau = 1.618
sigma = 1
# initial
u = np.ones((m, 1))
t = x
A = p + sigma * np.eye(m)
I = np.eye(m)
invA = cg(A, I)
for it in range(max_iter):
# update x
b = e + u + sigma * t
x = invA * b
# update y
t = x - (1/sigma)*u
t[t < low] = low
t[t > up] = up
# update u
u = u - tau*sigma*(x-t)
dual = -(0.5*x.T*(p*x) - e.T*x)
dual = dual.item()
#----bug----
#y1 = np.reshape(y, (-1, 1))
y1 = np.reshape(y+0.06721138118143437, (-1, 1))
lambda1 = np.multiply(x, y1)
w = np.dot(X.T, lambda1)
w = np.matrix(w).reshape(-1, 1)
tmp = np.maximum(1-np.multiply(y1, X*w),0)
primal = 0.5*np.linalg.norm(w)**2 + 1 * np.sum(tmp)
primal = primal.item()
# stop criteria
if np.abs(dual-primal)/(1+np.abs(dual)+np.abs(primal)) < 1e-12:
break
# print(t, np.linalg.norm(gradient))
# print(np.min(x), np.max(x))
# print(np.sum(x < -1e-4), np.sum(x>1+1e-4))
# print(np.abs(dual-primal)/(1+np.abs(dual)+np.abs(primal)))
#----bug----
#y1 = np.reshape(y, (-1, 1))
y1 = np.reshape(y+0.06721138118143437, (-1, 1))
alpha1 = x
lambda1 = np.multiply(y1,alpha1)
w = np.dot(X.T, lambda1)
w = np.array(w).reshape(-1)
b = w[n]
w = w[0:n]
return w, b
#L1-svm
class ADMM_L1_m162():
def fit(self, X, y):
y[y == 0] = -1
# add logitR to verify the correctness
#from sklearn.svm import LinearSVC
#SVM = LinearSVC(loss='hinge', tol=1e-6, max_iter=100000, verbose=1).fit(X, np.array(y).ravel())
#w1 = SVM.coef_; b1 = SVM.intercept_
#w1 = w1.reshape(-1); b1 = b1[0]
#import time
#t1 = time.time()
w, b = admm(X, y)
#t2 = time.time()
#print('time:', t2-t1)
#print('diff', np.linalg.norm(w1-w), b, b1)
clf = Clf(w, b)
return clf | [
"[email protected]"
] | |
5cb4bea24ec8cb56b25d43314f597cbdc352b4cc | e749e94163a0e20c551875583baef4e02e72de5e | /Github/IPS-10/test_script.py | e65add9190f155013f39296105ebadd5a65a640f | [] | no_license | tritims/TensorFlow-Program-Bugs | 3445200179f4b7f5cc4ac1c6f076468ec19e51bb | 158ba0a23e0cb74e73dbab08571b05fc36848f2a | refs/heads/master | 2022-07-08T16:33:38.511696 | 2020-05-20T14:20:47 | 2020-05-20T14:20:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 811 | py | import tensorflow as tf
import sys
import os
import subprocess
try:
assert len(sys.argv) == 2
version = ["-buggy", "-fix"][int(sys.argv[1])]
except:
print(
"Please run 'python test_script 0' for testing the buggy-version and "
"'python test_script 1' for testing the fix-version.\nAborted...")
exit(1)
interpreter_path = sys.executable
print("Running at: ", interpreter_path)
assert tf.__version__ == "1.8.0"
def get_target_dir():
for x in os.listdir(os.path.dirname(os.path.abspath(__file__))):
if version in x:
return x
raise ValueError("No dir ends with %s!" % version)
subprocess.call(
[interpreter_path, "./%s/src/train_softmax.py" % get_target_dir(), "--data_dir", "./data/test", "--max_nrof_epochs",
"5", "--epoch_size", "50"])
| [
"[email protected]"
] | |
09900557e13191fe7b23e98ddf0f10fc11d17428 | 5a52ccea88f90dd4f1acc2819997fce0dd5ffb7d | /alipay/aop/api/response/AlipayFundJointaccountAccountModifyResponse.py | 58d20e1c923c22e32b35d37c8b7fca6babba5937 | [
"Apache-2.0"
] | permissive | alipay/alipay-sdk-python-all | 8bd20882852ffeb70a6e929038bf88ff1d1eff1c | 1fad300587c9e7e099747305ba9077d4cd7afde9 | refs/heads/master | 2023-08-27T21:35:01.778771 | 2023-08-23T07:12:26 | 2023-08-23T07:12:26 | 133,338,689 | 247 | 70 | Apache-2.0 | 2023-04-25T04:54:02 | 2018-05-14T09:40:54 | Python | UTF-8 | Python | false | false | 470 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.response.AlipayResponse import AlipayResponse
class AlipayFundJointaccountAccountModifyResponse(AlipayResponse):
def __init__(self):
super(AlipayFundJointaccountAccountModifyResponse, self).__init__()
def parse_response_content(self, response_content):
response = super(AlipayFundJointaccountAccountModifyResponse, self).parse_response_content(response_content)
| [
"[email protected]"
] | |
1c557226222f8ae8fd3b3380ddf57faa8460ae07 | 82b946da326148a3c1c1f687f96c0da165bb2c15 | /sdk/python/pulumi_azure_native/documentdb/v20210401preview/sql_resource_sql_role_definition.py | cb30538f8830dbc7262f71c0d975a9f11388df98 | [
"BSD-3-Clause",
"Apache-2.0"
] | permissive | morrell/pulumi-azure-native | 3916e978382366607f3df0a669f24cb16293ff5e | cd3ba4b9cb08c5e1df7674c1c71695b80e443f08 | refs/heads/master | 2023-06-20T19:37:05.414924 | 2021-07-19T20:57:53 | 2021-07-19T20:57:53 | 387,815,163 | 0 | 0 | Apache-2.0 | 2021-07-20T14:18:29 | 2021-07-20T14:18:28 | null | UTF-8 | Python | false | false | 15,536 | py | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
from ._enums import *
from ._inputs import *
__all__ = ['SqlResourceSqlRoleDefinitionArgs', 'SqlResourceSqlRoleDefinition']
@pulumi.input_type
class SqlResourceSqlRoleDefinitionArgs:
def __init__(__self__, *,
account_name: pulumi.Input[str],
resource_group_name: pulumi.Input[str],
assignable_scopes: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
permissions: Optional[pulumi.Input[Sequence[pulumi.Input['PermissionArgs']]]] = None,
role_definition_id: Optional[pulumi.Input[str]] = None,
role_name: Optional[pulumi.Input[str]] = None,
type: Optional[pulumi.Input['RoleDefinitionType']] = None):
"""
The set of arguments for constructing a SqlResourceSqlRoleDefinition resource.
:param pulumi.Input[str] account_name: Cosmos DB database account name.
:param pulumi.Input[str] resource_group_name: The name of the resource group. The name is case insensitive.
:param pulumi.Input[Sequence[pulumi.Input[str]]] assignable_scopes: A set of fully qualified Scopes at or below which Role Assignments may be created using this Role Definition. This will allow application of this Role Definition on the entire database account or any underlying Database / Collection. Must have at least one element. Scopes higher than Database account are not enforceable as assignable Scopes. Note that resources referenced in assignable Scopes need not exist.
:param pulumi.Input[Sequence[pulumi.Input['PermissionArgs']]] permissions: The set of operations allowed through this Role Definition.
:param pulumi.Input[str] role_definition_id: The GUID for the Role Definition.
:param pulumi.Input[str] role_name: A user-friendly name for the Role Definition. Must be unique for the database account.
:param pulumi.Input['RoleDefinitionType'] type: Indicates whether the Role Definition was built-in or user created.
"""
pulumi.set(__self__, "account_name", account_name)
pulumi.set(__self__, "resource_group_name", resource_group_name)
if assignable_scopes is not None:
pulumi.set(__self__, "assignable_scopes", assignable_scopes)
if permissions is not None:
pulumi.set(__self__, "permissions", permissions)
if role_definition_id is not None:
pulumi.set(__self__, "role_definition_id", role_definition_id)
if role_name is not None:
pulumi.set(__self__, "role_name", role_name)
if type is not None:
pulumi.set(__self__, "type", type)
@property
@pulumi.getter(name="accountName")
def account_name(self) -> pulumi.Input[str]:
"""
Cosmos DB database account name.
"""
return pulumi.get(self, "account_name")
@account_name.setter
def account_name(self, value: pulumi.Input[str]):
pulumi.set(self, "account_name", value)
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> pulumi.Input[str]:
"""
The name of the resource group. The name is case insensitive.
"""
return pulumi.get(self, "resource_group_name")
@resource_group_name.setter
def resource_group_name(self, value: pulumi.Input[str]):
pulumi.set(self, "resource_group_name", value)
@property
@pulumi.getter(name="assignableScopes")
def assignable_scopes(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
A set of fully qualified Scopes at or below which Role Assignments may be created using this Role Definition. This will allow application of this Role Definition on the entire database account or any underlying Database / Collection. Must have at least one element. Scopes higher than Database account are not enforceable as assignable Scopes. Note that resources referenced in assignable Scopes need not exist.
"""
return pulumi.get(self, "assignable_scopes")
@assignable_scopes.setter
def assignable_scopes(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "assignable_scopes", value)
@property
@pulumi.getter
def permissions(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['PermissionArgs']]]]:
"""
The set of operations allowed through this Role Definition.
"""
return pulumi.get(self, "permissions")
@permissions.setter
def permissions(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['PermissionArgs']]]]):
pulumi.set(self, "permissions", value)
@property
@pulumi.getter(name="roleDefinitionId")
def role_definition_id(self) -> Optional[pulumi.Input[str]]:
"""
The GUID for the Role Definition.
"""
return pulumi.get(self, "role_definition_id")
@role_definition_id.setter
def role_definition_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "role_definition_id", value)
@property
@pulumi.getter(name="roleName")
def role_name(self) -> Optional[pulumi.Input[str]]:
"""
A user-friendly name for the Role Definition. Must be unique for the database account.
"""
return pulumi.get(self, "role_name")
@role_name.setter
def role_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "role_name", value)
@property
@pulumi.getter
def type(self) -> Optional[pulumi.Input['RoleDefinitionType']]:
"""
Indicates whether the Role Definition was built-in or user created.
"""
return pulumi.get(self, "type")
@type.setter
def type(self, value: Optional[pulumi.Input['RoleDefinitionType']]):
pulumi.set(self, "type", value)
class SqlResourceSqlRoleDefinition(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
account_name: Optional[pulumi.Input[str]] = None,
assignable_scopes: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
permissions: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['PermissionArgs']]]]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
role_definition_id: Optional[pulumi.Input[str]] = None,
role_name: Optional[pulumi.Input[str]] = None,
type: Optional[pulumi.Input['RoleDefinitionType']] = None,
__props__=None):
"""
An Azure Cosmos DB SQL Role Definition.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] account_name: Cosmos DB database account name.
:param pulumi.Input[Sequence[pulumi.Input[str]]] assignable_scopes: A set of fully qualified Scopes at or below which Role Assignments may be created using this Role Definition. This will allow application of this Role Definition on the entire database account or any underlying Database / Collection. Must have at least one element. Scopes higher than Database account are not enforceable as assignable Scopes. Note that resources referenced in assignable Scopes need not exist.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['PermissionArgs']]]] permissions: The set of operations allowed through this Role Definition.
:param pulumi.Input[str] resource_group_name: The name of the resource group. The name is case insensitive.
:param pulumi.Input[str] role_definition_id: The GUID for the Role Definition.
:param pulumi.Input[str] role_name: A user-friendly name for the Role Definition. Must be unique for the database account.
:param pulumi.Input['RoleDefinitionType'] type: Indicates whether the Role Definition was built-in or user created.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: SqlResourceSqlRoleDefinitionArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
An Azure Cosmos DB SQL Role Definition.
:param str resource_name: The name of the resource.
:param SqlResourceSqlRoleDefinitionArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(SqlResourceSqlRoleDefinitionArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
account_name: Optional[pulumi.Input[str]] = None,
assignable_scopes: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
permissions: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['PermissionArgs']]]]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
role_definition_id: Optional[pulumi.Input[str]] = None,
role_name: Optional[pulumi.Input[str]] = None,
type: Optional[pulumi.Input['RoleDefinitionType']] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = SqlResourceSqlRoleDefinitionArgs.__new__(SqlResourceSqlRoleDefinitionArgs)
if account_name is None and not opts.urn:
raise TypeError("Missing required property 'account_name'")
__props__.__dict__["account_name"] = account_name
__props__.__dict__["assignable_scopes"] = assignable_scopes
__props__.__dict__["permissions"] = permissions
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__.__dict__["resource_group_name"] = resource_group_name
__props__.__dict__["role_definition_id"] = role_definition_id
__props__.__dict__["role_name"] = role_name
__props__.__dict__["type"] = type
__props__.__dict__["name"] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:documentdb/v20210401preview:SqlResourceSqlRoleDefinition"), pulumi.Alias(type_="azure-native:documentdb:SqlResourceSqlRoleDefinition"), pulumi.Alias(type_="azure-nextgen:documentdb:SqlResourceSqlRoleDefinition"), pulumi.Alias(type_="azure-native:documentdb/v20200601preview:SqlResourceSqlRoleDefinition"), pulumi.Alias(type_="azure-nextgen:documentdb/v20200601preview:SqlResourceSqlRoleDefinition"), pulumi.Alias(type_="azure-native:documentdb/v20210301preview:SqlResourceSqlRoleDefinition"), pulumi.Alias(type_="azure-nextgen:documentdb/v20210301preview:SqlResourceSqlRoleDefinition"), pulumi.Alias(type_="azure-native:documentdb/v20210415:SqlResourceSqlRoleDefinition"), pulumi.Alias(type_="azure-nextgen:documentdb/v20210415:SqlResourceSqlRoleDefinition"), pulumi.Alias(type_="azure-native:documentdb/v20210515:SqlResourceSqlRoleDefinition"), pulumi.Alias(type_="azure-nextgen:documentdb/v20210515:SqlResourceSqlRoleDefinition"), pulumi.Alias(type_="azure-native:documentdb/v20210615:SqlResourceSqlRoleDefinition"), pulumi.Alias(type_="azure-nextgen:documentdb/v20210615:SqlResourceSqlRoleDefinition")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(SqlResourceSqlRoleDefinition, __self__).__init__(
'azure-native:documentdb/v20210401preview:SqlResourceSqlRoleDefinition',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'SqlResourceSqlRoleDefinition':
"""
Get an existing SqlResourceSqlRoleDefinition resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = SqlResourceSqlRoleDefinitionArgs.__new__(SqlResourceSqlRoleDefinitionArgs)
__props__.__dict__["assignable_scopes"] = None
__props__.__dict__["name"] = None
__props__.__dict__["permissions"] = None
__props__.__dict__["role_name"] = None
__props__.__dict__["type"] = None
return SqlResourceSqlRoleDefinition(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="assignableScopes")
def assignable_scopes(self) -> pulumi.Output[Optional[Sequence[str]]]:
"""
A set of fully qualified Scopes at or below which Role Assignments may be created using this Role Definition. This will allow application of this Role Definition on the entire database account or any underlying Database / Collection. Must have at least one element. Scopes higher than Database account are not enforceable as assignable Scopes. Note that resources referenced in assignable Scopes need not exist.
"""
return pulumi.get(self, "assignable_scopes")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
The name of the database account.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def permissions(self) -> pulumi.Output[Optional[Sequence['outputs.PermissionResponse']]]:
"""
The set of operations allowed through this Role Definition.
"""
return pulumi.get(self, "permissions")
@property
@pulumi.getter(name="roleName")
def role_name(self) -> pulumi.Output[Optional[str]]:
"""
A user-friendly name for the Role Definition. Must be unique for the database account.
"""
return pulumi.get(self, "role_name")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
The type of Azure resource.
"""
return pulumi.get(self, "type")
| [
"[email protected]"
] | |
183e2af0650d8c37d8a20a17832530637e0782da | d18c9e4bccc85d3c5770515966ce9866f8bc39dc | /tests/test_stream_xep_0077.py | c47c4de5941515e66302ac4bad41a3d39db6f0b6 | [
"MIT",
"BSD-3-Clause"
] | permissive | dhsc19/slixmpp | fa1192e05477ebff1ab1fcf92389f75c6a66e30f | 2ba89727a6627f86e66ec4f3baba464da1b0b19c | refs/heads/master | 2023-03-07T14:27:33.863558 | 2021-02-19T18:06:41 | 2021-02-19T18:06:41 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,044 | py | """
This only covers the component registration side of the XEP-0077 plugin
"""
import unittest
from slixmpp import ComponentXMPP, Iq
from slixmpp.xmlstream import register_stanza_plugin
from slixmpp.test import SlixTest
from slixmpp.plugins.xep_0077 import Register
class TestRegistration(SlixTest):
def setUp(self):
self.stream_start(
mode="component", plugins=["xep_0077"], jid="shakespeare.lit", server="lit"
)
def testRegistrationForm(self):
self.stream_start(
mode="component", plugins=["xep_0077"], jid="shakespeare.lit", server="lit"
)
self.recv(
"""
<iq type='get' id='reg1' to='shakespeare.lit' from='bill@server/resource'>
<query xmlns='jabber:iq:register'/>
</iq>
""",
)
self.send(
f"""
<iq type='result' id='reg1' from='shakespeare.lit' to='bill@server/resource'>
<query xmlns='jabber:iq:register'>
<instructions>{self.xmpp["xep_0077"].form_instructions}</instructions>
<username/>
<password/>
</query>
</iq>
""",
use_values=False # Fails inconsistently without this
)
def testRegistrationSuccessAndModif(self):
self.recv(
"""
<iq type='set' id='reg2' to='shakespeare.lit' from="bill@server/resource">
<query xmlns='jabber:iq:register'>
<username>bill</username>
<password>Calliope</password>
</query>
</iq>
"""
)
self.send("<iq type='result' id='reg2' from='shakespeare.lit' to='bill@server/resource'/>")
user_store = self.xmpp["xep_0077"]._user_store
self.assertEqual(user_store["bill@server"]["username"], "bill")
self.assertEqual(user_store["bill@server"]["password"], "Calliope")
self.recv(
"""
<iq type='get' id='reg1' to='shakespeare.lit' from="bill@server/resource">
<query xmlns='jabber:iq:register'/>
</iq>
""",
)
self.send(
f"""
<iq type='result' id='reg1' to="bill@server/resource" from='shakespeare.lit'>
<query xmlns='jabber:iq:register'>
<instructions>{self.xmpp["xep_0077"].form_instructions}</instructions>
<username>bill</username>
<password>Calliope</password>
<registered />
</query>
</iq>
""",
use_values=False # Fails inconsistently without this
)
def testRegistrationAndRemove(self):
self.recv(
"""
<iq type='set' id='reg2' to='shakespeare.lit' from="[email protected]/globe">
<query xmlns='jabber:iq:register'>
<username>bill</username>
<password>Calliope</password>
</query>
</iq>
"""
)
self.send("<iq type='result' id='reg2' from='shakespeare.lit' to='[email protected]/globe'/>")
pseudo_iq = self.xmpp.Iq()
pseudo_iq["from"] = "[email protected]/globe"
user = self.xmpp["xep_0077"].api["user_get"](None, None, None, pseudo_iq)
self.assertEqual(user["username"], "bill")
self.assertEqual(user["password"], "Calliope")
self.recv(
"""
<iq type='set' from='[email protected]/globe' id='unreg1'>
<query xmlns='jabber:iq:register'>
<remove/>
</query>
</iq>
"""
)
self.send("<iq type='result' to='[email protected]/globe' id='unreg1'/>")
user_store = self.xmpp["xep_0077"]._user_store
self.assertIs(user_store.get("[email protected]"), None)
suite = unittest.TestLoader().loadTestsFromTestCase(TestRegistration)
| [
"[email protected]"
] | |
c75f84f20e771674176cbd904fd7adf259635e28 | 758ac341257ea099e678fd08830a7b95f5d85e59 | /tc_gan/networks/tests/test_euler_ssn.py | 0faf3937702d1e05a5834e3d49e983876d109aa7 | [
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | ahmadianlab/tc-gan | 98d17e087e89d70bc571cb35e7e7b035e87ca0f2 | 06c549e8ae74bc6af62fddeed698565ea1f548c5 | refs/heads/master | 2020-04-12T16:52:15.051511 | 2018-12-21T01:06:53 | 2018-12-21T01:06:53 | 162,626,424 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,577 | py | import copy
import numpy
import pytest
from ... import ssnode
from ...utils import report_allclose_tols
from ..fixed_time_sampler import new_JDS
from ..ssn import BandwidthContrastStimulator, EulerSSNModel
from ..wgan import DEFAULT_PARAMS, grid_stimulator_inputs
def make_ssn(model_config):
kwds = dict(model_config)
kwds.pop('bandwidths', None)
kwds.pop('contrasts', None)
kwds.pop('sample_sites', None)
kwds.pop('batchsize', None)
kwds.pop('gen', None)
kwds.pop('disc', None)
stimulator, kwds = BandwidthContrastStimulator.consume_kwargs(**kwds)
model, kwds = EulerSSNModel.consume_kwargs(stimulator, **kwds)
assert not kwds
return model
JDS = copy.deepcopy(new_JDS)
@pytest.mark.parametrize('num_sites, batchsize', [
(10, 1),
(10, 2),
# (10, 100), # worked, but slow (~18 sec)
# (201, 100), # worked, but very slow
])
def test_compare_with_ssnode(num_sites, batchsize,
seqlen=4000, rtol=5e-4, atol=5e-4):
seed = num_sites * batchsize # let's co-vary seed as well
bandwidths = DEFAULT_PARAMS['bandwidths']
contrasts = DEFAULT_PARAMS['contrasts']
stimulator_contrasts, stimulator_bandwidths \
= grid_stimulator_inputs(contrasts, bandwidths, batchsize)
num_tcdom = stimulator_bandwidths.shape[-1]
skip_steps = seqlen - 1
model = make_ssn(dict(
DEFAULT_PARAMS,
num_sites=num_sites,
num_tcdom=num_tcdom,
seqlen=seqlen,
skip_steps=skip_steps,
**JDS
))
# ssnode_fps.shape: (batchsize, num_tcdom, 2N)
zs, ssnode_fps, info = ssnode.sample_fixed_points(
batchsize,
N=num_sites,
bandwidths=bandwidths,
contrast=contrasts,
seed=seed,
io_type=model.io_type,
atol=1e-10,
**JDS
)
# time_avg.shape: (batchsize, num_tcdom, 2N)
time_avg = model.compute_time_avg(
zs=zs,
stimulator_bandwidths=stimulator_bandwidths,
stimulator_contrasts=stimulator_contrasts)
report_allclose_tols(time_avg, ssnode_fps,
rtols=[1e-2, 1e-3, 5e-4, 1e-4],
atols=[1e-2, 1e-3, 5e-4, 1e-4])
numpy.testing.assert_allclose(time_avg, ssnode_fps, rtol=rtol, atol=atol)
@pytest.mark.parametrize('num_sites, batchsize', [
(201, 1),
# (201, 100), # worked, but very slow
(10, 100),
])
def test_compare_with_ssnode_slowtest(num_sites, batchsize):
test_compare_with_ssnode(num_sites, batchsize,
seqlen=10000, rtol=1e-4)
| [
"[email protected]"
] | |
6fd7ab16aec3e11a12f934268050bab75d09ea8b | 562d4bf000dbb66cd7109844c972bfc00ea7224c | /addons/efact/models/account/account_move.py | f03773b2e4e3da25665a8331f5175c34c931716c | [] | no_license | Mohamed33/odoo-efact-11-pos | e9da1d17b38ddfe5b2d0901b3dbadf7a76bd2059 | de38355aea74cdc643a347f7d52e1d287c208ff8 | refs/heads/master | 2023-03-10T15:24:44.052883 | 2021-03-06T13:25:58 | 2021-03-06T13:25:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,523 | py | from odoo import api,fields,models,_
from odoo.exceptions import UserError, ValidationError
class AccountMove(models.Model):
_inherit = ['account.move']
@api.multi
def post(self):
invoice = self._context.get('invoice', False)
self._post_validate()
for move in self:
move.line_ids.create_analytic_lines()
if move.name == '/':
new_name = False
journal = move.journal_id
if invoice and invoice.move_name and invoice.move_name != '/':
new_name = invoice.move_name
else:
if journal.sequence_id:
# If invoice is actually refund and journal has a refund_sequence then use that one or use the regular one
sequence = journal.sequence_id
if invoice and invoice.type in ['out_refund', 'in_refund'] and journal.refund_sequence:
if not journal.refund_sequence_id:
raise UserError(_('Please define a sequence for the credit notes'))
sequence = journal.refund_sequence_id
new_name = sequence.with_context(ir_sequence_date=move.date).next_by_id()
else:
raise UserError(_('Please define a sequence on the journal.'))
if new_name:
move.name = new_name
return self.write({'state': 'posted'}) | [
"[email protected]"
] | |
5097e022046f18b13e5260b89c8f292eb264a408 | 967056372d123ad5a86705156aea928d7352fe6a | /python实战/pytesttraining/src/ut/test_module.py | a20576ceafeee7df7faaa7acddff08cce684ca89 | [] | no_license | lxy39678/Python | ea179ef929eb9ddddb2460656aad07880ae67f84 | aba0434bc8ca7a2abdaa3ced3c4d84a8de819c61 | refs/heads/master | 2020-04-18T22:05:11.683134 | 2019-01-27T07:49:09 | 2019-01-27T07:49:09 | 167,783,919 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 902 | py | #!/usr/bin/env python
# encoding: utf-8
#author: Jim Yin
import unittest
import logging
def setUpModule():
print("setup method\n")
global foo
foo = list(range(10))
class simple_test(unittest.TestCase):
def test_1st(self):
logging.info('info')
logging.error('error')
print('simple_test1_1'+str(foo))
self.assertEqual(foo.pop(), 9)
def test_2nd(self):
print('simple_test1_2' + str(foo))
self.assertEqual(foo.pop(), 8)
class simple_test2(unittest.TestCase):
def test_1st(self):
#foo=[0,....,,,7]
print('simple_test2_1' + str(foo))
self.assertEqual(foo.pop(), 7)
def test_2nd(self):
print('simple_test2_2' + str(foo))
self.assertNotEqual(1.1,1.0)
def tearDownModule():
print("end method")
if __name__ == '__main__':
unittest.main() | [
"[email protected]"
] | |
e2997c7f2c9473f23e58fc3b9d8e197a81943c02 | 16e842bcb73638586d93c17e9838fb89bc2b60bc | /Module-13/13.1 Django Framework/django-model-forms/Django-Demos-feature-django-model-forms/model_form_project/model_app/models.py | 7aed7bad4cb083d98332cb7a9d1e9a1f370c6161 | [] | no_license | krupa-thakkar/Python | dcbda2fe8296ffdd25641cf1e039132f41e0e44e | cbf0ec0399084a9a0a4ba5cebe739580bff4ce06 | refs/heads/master | 2023-03-24T05:43:58.799704 | 2021-01-25T06:20:44 | 2021-01-25T06:20:44 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 271 | py | from django.db import models
class UserDetails(models.Model):
Firstname = models.CharField(max_length=100)
Lastname = models.CharField(max_length=255)
Mobile = models.CharField(max_length=11,default='123')
def __str__(self):
return self.title
| [
"[email protected]"
] | |
e692d7540ebbcd152e9cb9b958002b5c933ad7ec | eef72818143c9ffef4c759a1331e8227c14be792 | /sktime/forecasting/online_learning/tests/test_online_learning.py | 151298059c33206969c1a11bb443b9eb09866944 | [
"BSD-3-Clause"
] | permissive | jattenberg/sktime | 66a723d7844146ac1675d2e4e73f35a486babc65 | fbe4af4d8419a01ada1e82da1aa63c0218d13edb | refs/heads/master | 2023-08-12T07:32:22.462661 | 2022-08-16T10:19:22 | 2022-08-16T10:19:22 | 298,256,169 | 0 | 0 | BSD-3-Clause | 2020-09-24T11:20:23 | 2020-09-24T11:20:23 | null | UTF-8 | Python | false | false | 3,221 | py | #!/usr/bin/env python3 -u
# -*- coding: utf-8 -*-
# copyright: sktime developers, BSD-3-Clause License (see LICENSE file)
"""Test OnlineEnsembleForecaster."""
__author__ = ["magittan"]
import numpy as np
from sklearn.metrics import mean_squared_error
from sktime.datasets import load_airline
from sktime.forecasting.exp_smoothing import ExponentialSmoothing
from sktime.forecasting.model_selection import (
SlidingWindowSplitter,
temporal_train_test_split,
)
from sktime.forecasting.naive import NaiveForecaster
from sktime.forecasting.online_learning._online_ensemble import OnlineEnsembleForecaster
from sktime.forecasting.online_learning._prediction_weighted_ensembler import (
NNLSEnsemble,
NormalHedgeEnsemble,
)
cv = SlidingWindowSplitter(start_with_window=True, window_length=1, fh=1)
def test_weights_for_airline_averaging():
"""Test weights."""
y = load_airline()
y_train, y_test = temporal_train_test_split(y)
forecaster = OnlineEnsembleForecaster(
[
("ses", ExponentialSmoothing(seasonal="multiplicative", sp=12)),
(
"holt",
ExponentialSmoothing(
trend="add", damped_trend=False, seasonal="multiplicative", sp=12
),
),
(
"damped_trend",
ExponentialSmoothing(
trend="add", damped_trend=True, seasonal="multiplicative", sp=12
),
),
]
)
forecaster.fit(y_train)
expected = np.array([1 / 3, 1 / 3, 1 / 3])
np.testing.assert_allclose(forecaster.weights, expected, rtol=1e-8)
def test_weights_for_airline_normal_hedge():
"""Test weights."""
y = load_airline()
y_train, y_test = temporal_train_test_split(y)
hedge_expert = NormalHedgeEnsemble(n_estimators=3, loss_func=mean_squared_error)
forecaster = OnlineEnsembleForecaster(
[
("av5", NaiveForecaster(strategy="mean", window_length=5)),
("av10", NaiveForecaster(strategy="mean", window_length=10)),
("av20", NaiveForecaster(strategy="mean", window_length=20)),
],
ensemble_algorithm=hedge_expert,
)
forecaster.fit(y_train)
forecaster.update_predict(y=y_test, cv=cv, reset_forecaster=False)
expected = np.array([0.17077154, 0.48156709, 0.34766137])
np.testing.assert_allclose(forecaster.weights, expected, atol=1e-8)
def test_weights_for_airline_nnls():
"""Test weights."""
y = load_airline()
y_train, y_test = temporal_train_test_split(y)
hedge_expert = NNLSEnsemble(n_estimators=3, loss_func=mean_squared_error)
forecaster = OnlineEnsembleForecaster(
[
("av5", NaiveForecaster(strategy="mean", window_length=5)),
("av10", NaiveForecaster(strategy="mean", window_length=10)),
("av20", NaiveForecaster(strategy="mean", window_length=20)),
],
ensemble_algorithm=hedge_expert,
)
forecaster.fit(y_train)
forecaster.update_predict(y=y_test, cv=cv, reset_forecaster=False)
expected = np.array([0.04720766, 0, 1.03410876])
np.testing.assert_allclose(forecaster.weights, expected, atol=1e-8)
| [
"[email protected]"
] | |
baf86efde77e669c26a16a2636a47d246e9c6551 | 724d4b6d4d7a834f138b6fe620db30fb9e0fb686 | /design_principles/modularity_tutorial/parametrized_ml.py | ba372e0e5dd4f6b499090803355b13bba47bd53c | [] | no_license | spierre91/medium_code | 0b1d9b8c683b7432d7b1bfb980f436c1afb6e61f | 0ad618b7a557083216e77717705bc49aa847b17e | refs/heads/master | 2023-09-04T11:03:59.291613 | 2023-08-16T22:18:02 | 2023-08-16T22:18:02 | 214,266,919 | 85 | 80 | null | 2022-08-06T05:56:10 | 2019-10-10T19:20:05 | Python | UTF-8 | Python | false | false | 2,443 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Jun 9 15:22:18 2023
@author: sadrach.pierre
"""
import numpy as np
import pandas as pd
import catboost as cb
import seaborn as sns
from sklearn.model_selection import train_test_split, GridSearchCV
from sklearn.metrics import confusion_matrix, precision_score, accuracy_score
import matplotlib.pyplot as plt
# Step 1: Read in the data
data = pd.read_csv('synthetic_transaction_data_Dining.csv')
# Step 2: Define the list of merchant states
MERCHANT_STATES = ['New York', 'Florida']
# Step 3: Iterate over merchant states, perform model training and evaluation
cats = ['cardholder_name', 'card_number', 'card_type', 'merchant_name', 'merchant_category',
'merchant_state', 'merchant_city', 'merchant_category_code']
for state in MERCHANT_STATES:
print("Evaluation for '{}' data:".format(state))
# Filter data frames for the current state
filtered_data = data[data['merchant_state'] == state]
# Split the data into training and testing sets
X = filtered_data[['cardholder_name', 'card_number', 'card_type', 'merchant_name', 'merchant_category',
'merchant_state', 'merchant_city', 'transaction_amount', 'merchant_category_code']]
y = filtered_data['fraud_flag']
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)
# Perform hyperparameter tuning with grid search and build the model
param_grid = {
'depth': [4, 6, 8],
'learning_rate': [0.01, 0.05, 0.1],
'iterations': [100, 200, 300]
}
model = cb.CatBoostClassifier(cat_features=cats, random_state=42)
grid_search = GridSearchCV(estimator=model, param_grid=param_grid, cv=3)
grid_search.fit(X_train, y_train)
best_params = grid_search.best_params_
model = cb.CatBoostClassifier(cat_features=X.columns, random_state=42, **best_params)
model.fit(X_train, y_train)
# Evaluate the model
predictions = model.predict(X_test)
precision = precision_score(y_test, predictions)
accuracy = accuracy_score(y_test, predictions)
print('Precision:', precision)
print('Accuracy:', accuracy)
# Visualize the confusion matrix
cm = confusion_matrix(y_test, predictions)
sns.heatmap(cm, annot=True, cmap='Blues')
plt.xlabel('Predicted')
plt.ylabel('True')
plt.title('Confusion Matrix')
print('\n')
| [
"[email protected]"
] | |
951505c501fbaadeb5da86e0a7e8d1942d24ca8c | 0ba7a4571720e8e8af2944ed61fae7542b1fd556 | /plugins/modules/influxdb_user.py | 4080c1da7750743aeb3661988c34d375e3138a3e | [] | no_license | ansible-collection-migration/database.influxdb | 1395a40bb2f27482465f1ab190c9275d6cb1bceb | 81f060e5e02d1e0fa8a0679f45650c6f21ed4cb4 | refs/heads/master | 2020-12-18T13:02:11.364756 | 2020-02-03T21:58:43 | 2020-02-03T21:58:43 | 235,393,063 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,106 | py | #!/usr/bin/python
# (c) 2017, Vitaliy Zhhuta <zhhuta () gmail.com>
# insipred by Kamil Szczygiel <kamil.szczygiel () intel.com> influxdb_database module
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: influxdb_user
short_description: Manage InfluxDB users
description:
- Manage InfluxDB users
author: "Vitaliy Zhhuta (@zhhuta)"
requirements:
- "python >= 2.6"
- "influxdb >= 0.9"
options:
user_name:
description:
- Name of the user.
required: True
user_password:
description:
- Password to be set for the user.
required: false
admin:
description:
- Whether the user should be in the admin role or not.
- Since version 2.8, the role will also be updated.
default: no
type: bool
state:
description:
- State of the user.
choices: [ present, absent ]
default: present
grants:
description:
- Privileges to grant to this user. Takes a list of dicts containing the
"database" and "privilege" keys.
- If this argument is not provided, the current grants will be left alone.
If an empty list is provided, all grants for the user will be removed.
extends_documentation_fragment:
- database.influxdb.influxdb
'''
EXAMPLES = '''
- name: Create a user on localhost using default login credentials
influxdb_user:
user_name: john
user_password: s3cr3t
- name: Create a user on localhost using custom login credentials
influxdb_user:
user_name: john
user_password: s3cr3t
login_username: "{{ influxdb_username }}"
login_password: "{{ influxdb_password }}"
- name: Create an admin user on a remote host using custom login credentials
influxdb_user:
user_name: john
user_password: s3cr3t
admin: yes
hostname: "{{ influxdb_hostname }}"
login_username: "{{ influxdb_username }}"
login_password: "{{ influxdb_password }}"
- name: Create a user on localhost with privileges
influxdb_user:
user_name: john
user_password: s3cr3t
login_username: "{{ influxdb_username }}"
login_password: "{{ influxdb_password }}"
grants:
- database: 'collectd'
privilege: 'WRITE'
- database: 'graphite'
privilege: 'READ'
- name: Destroy a user using custom login credentials
influxdb_user:
user_name: john
login_username: "{{ influxdb_username }}"
login_password: "{{ influxdb_password }}"
state: absent
'''
RETURN = '''
#only defaults
'''
from ansible.module_utils.urls import ConnectionError
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_native
import ansible_collections.database.influxdb.plugins.module_utils.influxdb as influx
def find_user(module, client, user_name):
user_result = None
try:
users = client.get_list_users()
for user in users:
if user['user'] == user_name:
user_result = user
break
except (ConnectionError, influx.exceptions.InfluxDBClientError) as e:
module.fail_json(msg=to_native(e))
return user_result
def check_user_password(module, client, user_name, user_password):
try:
client.switch_user(user_name, user_password)
client.get_list_users()
except influx.exceptions.InfluxDBClientError as e:
if e.code == 401:
return False
except ConnectionError as e:
module.fail_json(msg=to_native(e))
finally:
# restore previous user
client.switch_user(module.params['username'], module.params['password'])
return True
def set_user_password(module, client, user_name, user_password):
if not module.check_mode:
try:
client.set_user_password(user_name, user_password)
except ConnectionError as e:
module.fail_json(msg=to_native(e))
def create_user(module, client, user_name, user_password, admin):
if not module.check_mode:
try:
client.create_user(user_name, user_password, admin)
except ConnectionError as e:
module.fail_json(msg=to_native(e))
def drop_user(module, client, user_name):
if not module.check_mode:
try:
client.drop_user(user_name)
except influx.exceptions.InfluxDBClientError as e:
module.fail_json(msg=e.content)
module.exit_json(changed=True)
def set_user_grants(module, client, user_name, grants):
changed = False
try:
current_grants = client.get_list_privileges(user_name)
# Fix privileges wording
for i, v in enumerate(current_grants):
if v['privilege'] == 'ALL PRIVILEGES':
v['privilege'] = 'ALL'
current_grants[i] = v
elif v['privilege'] == 'NO PRIVILEGES':
del(current_grants[i])
# check if the current grants are included in the desired ones
for current_grant in current_grants:
if current_grant not in grants:
if not module.check_mode:
client.revoke_privilege(current_grant['privilege'],
current_grant['database'],
user_name)
changed = True
# check if the desired grants are included in the current ones
for grant in grants:
if grant not in current_grants:
if not module.check_mode:
client.grant_privilege(grant['privilege'],
grant['database'],
user_name)
changed = True
except influx.exceptions.InfluxDBClientError as e:
module.fail_json(msg=e.content)
return changed
def main():
argument_spec = influx.InfluxDb.influxdb_argument_spec()
argument_spec.update(
state=dict(default='present', type='str', choices=['present', 'absent']),
user_name=dict(required=True, type='str'),
user_password=dict(required=False, type='str', no_log=True),
admin=dict(default='False', type='bool'),
grants=dict(type='list')
)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True
)
state = module.params['state']
user_name = module.params['user_name']
user_password = module.params['user_password']
admin = module.params['admin']
grants = module.params['grants']
influxdb = influx.InfluxDb(module)
client = influxdb.connect_to_influxdb()
user = find_user(module, client, user_name)
changed = False
if state == 'present':
if user:
if not check_user_password(module, client, user_name, user_password) and user_password is not None:
set_user_password(module, client, user_name, user_password)
changed = True
try:
if admin and not user['admin']:
client.grant_admin_privileges(user_name)
changed = True
elif not admin and user['admin']:
client.revoke_admin_privileges(user_name)
changed = True
except influx.exceptions.InfluxDBClientError as e:
module.fail_json(msg=to_native(e))
else:
user_password = user_password or ''
create_user(module, client, user_name, user_password, admin)
changed = True
if grants is not None:
if set_user_grants(module, client, user_name, grants):
changed = True
module.exit_json(changed=changed)
if state == 'absent':
if user:
drop_user(module, client, user_name)
else:
module.exit_json(changed=False)
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
47b2b5549a877f3b8f29674f02a58a285f2fd471 | 435b287d58ed65bfa65bf042134e1bb1bfaed0dd | /day13/intcode.py | ed741db6a87696a5e6ec3c8c8a3fafa89e171c25 | [] | no_license | marxin/AoC2019 | 2ef5b79b37351e86be3e44c9d400332f6dae3ae0 | 2a76ec78908be2f3c3c145fef6e52ade11c48a7b | refs/heads/master | 2020-12-01T20:53:38.068563 | 2019-12-25T08:38:05 | 2019-12-25T08:38:05 | 230,766,575 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 14,335 | py | #!/usr/bin/env python3
import time
from termcolor import colored
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--verbose', '-v', help="Verbose mode", action = 'store_true')
args = parser.parse_args()
init_data = [1,380,379,385,1008,2663,456801,381,1005,381,12,99,109,2664,1101,0,0,383,1101,0,0,382,20101,0,382,1,20102,1,383,2,21102,37,1,0,1105,1,578,4,382,4,383,204,1,1001,382,1,382,1007,382,44,381,1005,381,22,1001,383,1,383,1007,383,23,381,1005,381,18,1006,385,69,99,104,-1,104,0,4,386,3,384,1007,384,0,381,1005,381,94,107,0,384,381,1005,381,108,1106,0,161,107,1,392,381,1006,381,161,1101,-1,0,384,1106,0,119,1007,392,42,381,1006,381,161,1102,1,1,384,21001,392,0,1,21102,1,21,2,21102,1,0,3,21102,138,1,0,1105,1,549,1,392,384,392,21001,392,0,1,21101,0,21,2,21102,1,3,3,21101,0,161,0,1106,0,549,1101,0,0,384,20001,388,390,1,20101,0,389,2,21102,1,180,0,1105,1,578,1206,1,213,1208,1,2,381,1006,381,205,20001,388,390,1,21001,389,0,2,21101,0,205,0,1106,0,393,1002,390,-1,390,1102,1,1,384,20102,1,388,1,20001,389,391,2,21102,1,228,0,1105,1,578,1206,1,261,1208,1,2,381,1006,381,253,20101,0,388,1,20001,389,391,2,21102,1,253,0,1105,1,393,1002,391,-1,391,1101,1,0,384,1005,384,161,20001,388,390,1,20001,389,391,2,21102,1,279,0,1105,1,578,1206,1,316,1208,1,2,381,1006,381,304,20001,388,390,1,20001,389,391,2,21101,0,304,0,1105,1,393,1002,390,-1,390,1002,391,-1,391,1102,1,1,384,1005,384,161,21001,388,0,1,20102,1,389,2,21102,1,0,3,21101,338,0,0,1105,1,549,1,388,390,388,1,389,391,389,20102,1,388,1,20102,1,389,2,21102,1,4,3,21101,0,365,0,1106,0,549,1007,389,22,381,1005,381,75,104,-1,104,0,104,0,99,0,1,0,0,0,0,0,0,315,20,18,1,1,22,109,3,22101,0,-2,1,21202,-1,1,2,21102,0,1,3,21101,0,414,0,1106,0,549,22102,1,-2,1,21202,-1,1,2,21101,429,0,0,1106,0,601,1202,1,1,435,1,386,0,386,104,-1,104,0,4,386,1001,387,-1,387,1005,387,451,99,109,-3,2105,1,0,109,8,22202,-7,-6,-3,22201,-3,-5,-3,21202,-4,64,-2,2207,-3,-2,381,1005,381,492,21202,-2,-1,-1,22201,-3,-1,-3,2207,-3,-2,381,1006,381,481,21202,-4,8,-2,2207,-3,-2,381,1005,381,518,21202,-2,-1,-1,22201,-3,-1,-3,2207,-3,-2,381,1006,381,507,2207,-3,-4,381,1005,381,540,21202,-4,-1,-1,22201,-3,-1,-3,2207,-3,-4,381,1006,381,529,21202,-3,1,-7,109,-8,2106,0,0,109,4,1202,-2,44,566,201,-3,566,566,101,639,566,566,2102,1,-1,0,204,-3,204,-2,204,-1,109,-4,2106,0,0,109,3,1202,-1,44,594,201,-2,594,594,101,639,594,594,20101,0,0,-2,109,-3,2106,0,0,109,3,22102,23,-2,1,22201,1,-1,1,21102,1,509,2,21101,264,0,3,21102,1012,1,4,21102,1,630,0,1106,0,456,21201,1,1651,-2,109,-3,2106,0,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,0,0,0,2,0,0,2,0,2,0,0,2,0,2,0,0,2,2,0,2,0,2,2,0,0,0,0,2,0,0,2,2,2,0,2,0,2,0,2,2,2,0,1,1,0,0,0,2,2,0,0,0,0,2,2,2,0,0,2,0,2,0,0,0,0,2,2,2,2,0,0,2,0,2,2,2,2,0,0,0,2,0,2,0,0,0,1,1,0,2,0,0,0,2,0,2,2,2,0,0,2,0,2,0,0,0,0,2,2,0,0,2,2,0,0,2,0,0,0,0,2,2,0,2,0,0,2,0,0,0,1,1,0,0,2,0,2,2,0,0,0,2,0,2,2,2,0,2,0,2,2,2,2,0,2,2,2,0,0,0,0,0,2,0,2,0,2,0,2,2,0,0,2,0,1,1,0,0,0,0,0,2,0,2,2,0,0,2,0,2,0,2,2,0,0,0,0,2,2,0,2,2,2,0,2,2,0,2,2,0,0,2,2,0,0,0,2,0,1,1,0,0,2,2,2,0,0,0,0,0,2,2,0,0,2,0,0,0,2,2,0,2,2,0,0,2,0,0,2,0,0,0,2,2,0,2,2,2,0,2,0,0,1,1,0,0,2,0,0,2,0,2,2,2,2,2,0,2,0,0,0,0,2,2,2,2,2,2,2,0,2,2,2,2,2,2,2,0,2,0,0,0,2,2,2,0,1,1,0,2,0,0,2,2,2,0,2,2,2,0,2,2,0,0,0,2,0,2,2,0,2,0,2,2,2,0,2,0,0,0,0,0,0,2,0,2,2,2,2,0,1,1,0,0,2,2,2,2,0,2,2,2,2,0,2,2,2,0,0,0,2,0,0,0,0,0,2,2,2,2,2,2,0,0,2,0,2,2,2,2,2,0,0,0,1,1,0,0,2,2,2,2,2,0,0,2,2,0,0,2,2,0,2,0,0,0,0,2,2,0,2,0,2,2,2,2,0,2,0,0,2,2,0,2,2,0,2,0,1,1,0,2,0,2,2,0,0,2,0,2,0,2,2,0,2,0,2,0,2,2,0,0,2,2,2,2,0,2,2,2,0,2,2,0,0,0,2,2,0,0,2,0,1,1,0,2,0,0,2,2,2,2,2,0,0,2,0,0,2,2,2,0,2,0,2,0,2,0,0,0,2,0,0,0,0,2,2,2,2,0,2,2,0,0,0,0,1,1,0,0,2,0,0,2,2,0,2,0,2,0,2,2,0,0,2,0,2,0,0,2,2,2,2,2,2,0,0,0,0,2,2,2,2,2,0,2,0,2,2,0,1,1,0,0,0,2,2,2,2,2,2,2,0,0,2,0,0,0,0,2,0,2,2,2,2,0,2,0,2,0,0,2,0,2,2,2,2,0,0,0,2,2,2,0,1,1,0,0,0,2,2,0,2,0,2,2,0,2,0,2,0,0,0,2,0,2,0,2,2,0,2,0,0,2,0,0,2,0,2,0,2,2,2,0,0,2,2,0,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,4,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,78,97,10,89,31,40,53,97,63,60,92,10,54,27,53,42,36,34,79,30,8,70,22,20,18,67,79,30,81,50,67,46,39,15,72,26,35,61,6,36,2,26,65,94,82,27,37,6,71,66,84,19,69,5,62,89,57,49,1,9,59,67,30,74,71,37,66,77,43,4,59,42,85,4,87,1,24,64,85,25,29,67,97,15,22,6,34,97,97,47,22,19,40,89,45,36,93,77,26,85,30,40,65,21,45,91,18,77,45,13,74,18,47,67,79,1,31,22,1,96,94,60,44,56,79,64,74,56,91,79,41,23,9,57,9,86,63,82,55,92,63,63,94,73,76,40,88,18,26,66,29,27,20,1,94,90,43,11,67,33,27,47,34,73,65,67,77,54,92,84,6,29,41,8,8,38,83,36,74,29,26,70,68,57,54,38,75,37,24,64,30,89,43,61,6,4,65,81,39,85,91,22,28,17,47,95,52,40,76,77,81,52,59,19,37,90,23,33,5,82,3,64,46,70,22,24,9,96,97,69,48,66,58,97,51,15,86,6,23,7,35,52,57,3,79,82,71,87,64,91,93,69,77,95,1,57,5,2,65,35,57,14,35,12,14,60,45,52,67,32,26,93,63,54,45,8,48,83,5,49,95,60,78,98,54,62,9,1,39,57,63,82,52,90,64,38,95,8,12,72,22,53,78,63,72,65,59,1,87,95,81,79,38,92,61,60,59,3,39,31,47,69,70,6,55,44,49,54,49,50,11,87,85,89,15,70,58,5,87,65,79,86,92,98,49,73,8,79,30,55,4,30,11,55,80,28,63,28,33,9,49,70,34,83,29,97,67,65,89,50,88,29,40,5,3,11,87,85,43,2,51,18,58,39,81,8,15,2,42,95,64,8,76,60,73,67,30,28,11,84,56,73,14,66,43,21,40,31,48,11,65,27,9,37,60,91,34,11,83,45,9,77,70,97,9,13,68,20,17,15,6,13,44,59,51,91,73,60,37,40,18,69,48,14,44,96,71,21,27,90,9,91,14,80,38,69,69,52,28,15,54,63,46,32,78,54,79,95,83,16,44,29,26,92,31,51,66,14,94,49,1,93,43,57,50,82,45,95,83,74,50,87,47,55,62,31,1,88,1,77,59,64,26,48,22,61,56,20,54,59,62,3,59,28,98,45,53,47,72,73,72,43,30,23,94,10,76,63,63,8,30,92,25,61,61,32,64,25,57,61,95,81,23,67,28,59,48,68,21,85,48,32,93,98,50,89,27,46,38,63,38,87,76,76,10,71,36,91,2,47,2,36,37,90,25,97,27,71,67,77,4,11,57,68,87,94,12,83,91,94,92,35,49,46,4,31,64,39,12,92,26,12,75,29,11,5,83,8,23,73,62,74,55,75,38,40,90,73,71,38,15,75,10,38,55,74,82,13,32,55,90,47,6,25,65,88,85,40,13,66,54,39,82,19,15,18,74,19,54,70,30,56,28,2,20,50,44,51,7,4,79,97,90,71,97,5,25,95,22,36,61,30,16,68,61,23,22,60,93,9,92,98,40,41,11,47,7,57,15,51,51,77,22,32,4,27,10,76,76,50,81,96,46,28,38,69,41,43,47,86,66,54,22,33,45,75,75,51,37,62,62,25,71,35,49,93,44,18,92,39,32,11,31,96,2,33,94,45,14,82,57,79,81,57,6,19,63,35,11,55,18,38,22,43,82,76,35,7,21,74,50,83,7,55,94,23,79,85,20,4,65,18,12,62,35,74,23,20,96,71,25,95,45,95,4,18,82,71,79,4,12,41,44,23,8,86,6,78,5,54,68,60,12,73,18,95,31,86,23,5,36,40,97,35,48,28,15,9,27,54,14,22,97,63,41,37,12,20,38,41,27,70,35,10,89,31,90,44,46,44,49,66,71,58,74,7,24,6,96,68,27,16,89,80,1,38,26,88,60,47,27,46,32,34,44,74,51,70,13,57,14,31,40,71,55,22,87,23,9,37,38,18,17,34,84,84,49,74,81,31,4,45,11,71,89,16,56,91,61,61,67,92,14,88,89,10,11,77,38,40,89,76,7,5,74,54,64,97,25,20,1,41,9,41,97,1,31,21,96,98,88,52,71,25,62,42,8,91,84,43,75,37,22,32,58,87,22,6,13,62,48,85,81,48,70,3,13,93,88,52,7,66,84,27,37,21,62,72,40,30,28,12,88,48,47,96,98,47,76,80,98,42,25,72,13,15,31,81,40,16,85,77,82,41,67,93,73,58,86,68,85,28,60,13,87,9,12,40,20,4,92,51,456801]
opcode_names = {
1: 'ADD',
2: 'MULT',
3: 'READ',
4: 'WRITE',
5: 'JMPEQ',
6: 'JMPNEQ',
7: 'LT',
8: 'EQ',
9: 'SET BASE',
99: 'EXIT'
}
class Instruction:
def __init__(self, program, insn):
self.program = program
self.raw = insn
self.modes = [0, 0, 0, 0]
if insn >= 100:
self.opcode = insn % 100
modes = int(insn / 100)
i = 0
while modes > 0:
self.modes[i] = modes % 10
modes = int(modes / 10)
i += 1
else:
self.opcode = insn
def extend(self, index):
while len(self.program.data) <= index:
self.program.data.append(0)
def get_arg(self, arg, mode_index):
mode = self.modes[mode_index]
index = None
if mode == 0:
index = self.program.data[arg]
elif mode == 1:
index = arg
elif mode == 2:
index = self.program.data[arg] + self.program.base
else:
assert False
self.extend(index)
if args.verbose:
print(' getting at data[%d]=%d' % (index, self.program.data[index]))
return self.program.data[index]
def store(self, arg, mode_index, value):
mode = self.modes[mode_index]
index = None
index = None
if mode == 0:
index = self.program.data[arg]
elif mode == 1:
assert False
elif mode == 2:
index = self.program.data[arg] + self.program.base
else:
assert False
self.extend(index)
self.program.data[index] = value
if args.verbose:
print(' storing at data[%d]=%d' % (index, value))
return index
def print(self):
print('pc: %d, base: %d, raw: %d, opcode: %d (%s), modes: %s' % (self.program.pc, self.program.base, self.raw, self.opcode, opcode_names[self.opcode], str(self.modes)))
class Program:
def __init__(self, data):
self.data = data.copy()
self.data[0] = 2
self.pc = 0
self.base = 0
self.output_values = []
self.last_modified = -1
self.exit = False
self.display = {}
self.paddlex = None
self.ballx = None
def print_data(self):
for i, v in enumerate(self.data):
if i == self.pc:
print(colored('PC:', 'blue'), end = '')
if i == self.last_modified:
print(colored(v, 'red'), end = '')
else:
print(v, end = '')
print(',', end = '')
print()
def process_one_instruction(self):
self.last_modified = -1
insn = Instruction(self, self.data[self.pc])
if args.verbose:
insn.print()
if insn.opcode == 1:
# ADD
r = insn.store(self.pc + 3, 2, insn.get_arg(self.pc + 1, 0) + insn.get_arg(self.pc + 2, 1))
self.pc += 4
self.last_modified = r
elif insn.opcode == 2:
# MULT
r = insn.store(self.pc + 3, 2, insn.get_arg(self.pc + 1, 0) * insn.get_arg(self.pc + 2, 1))
self.pc += 4
self.last_modified = r
elif insn.opcode == 3:
# READ
input_value = 0
if self.ballx < self.paddlex:
input_value = -1
if self.ballx > self.paddlex:
input_value = 1
# print('Setting intpu (%d/%d) to %d' % (self.ballx, self.paddlex, input_value))
# time.sleep(0.01)
r = insn.store(self.pc + 1, 0, input_value)
self.pc += 2
self.last_modified = r
elif insn.opcode == 4:
# WRITE
output = insn.get_arg(self.pc + 1, 0)
if args.verbose:
print(colored('Output: %d' % output, 'red'))
self.output_values.append(output)
self.pc += 2
elif insn.opcode == 5 or insn.opcode == 6:
# EQ/NEQ
cmp = insn.get_arg(self.pc + 1, 0)
if insn.opcode == 6:
cmp = not cmp
if cmp:
self.pc = insn.get_arg(self.pc + 2, 1)
else:
self.pc += 3
elif insn.opcode == 7:
# LT
r = int(insn.get_arg(self.pc + 1, 0) < insn.get_arg(self.pc + 2, 1))
r = insn.store(self.pc + 3, 2, r)
self.pc += 4
self.last_modified = r
elif insn.opcode == 8:
# EQ
r = int(insn.get_arg(self.pc + 1, 0) == insn.get_arg(self.pc + 2, 1))
r = insn.store(self.pc + 3, 2, r)
self.pc += 4
self.last_modified = r
elif insn.opcode == 9:
# SET BASE
self.base += insn.get_arg(self.pc + 1, 0)
self.pc += 2
elif insn.opcode == 99:
print('Exit')
self.exit = True
else:
print('Unknown opcode: %d' % insn.opcode)
assert False
def displayport(self):
for i in range(24):
for j in range(80):
v = ' '
t = (j, i)
if t in self.display and program.display[t] > 0:
v = str(self.display[t])
if v == '1':
v = '*'
elif v == '2':
v = '#'
elif v == '3':
v = '='
elif v == '4':
v = 'o'
print(v, end = '')
print()
def run(self):
step = 0
self.print_data()
self.last_output = None
counter = 1
while True:
step += 1
if args.verbose:
print('Step %d:' % step)
r = self.process_one_instruction()
if args.verbose:
self.print_data()
if len(self.output_values) == 3:
if self.output_values[0] == -1:
print('Score: %d' % self.output_values[2])
self.displayport()
else:
pos = tuple(self.output_values[:2])
val = self.output_values[2]
if not pos in self.display:
self.display[pos] = 0
self.display[pos] = val
if val == 3:
self.paddlex = self.output_values[0]
elif val == 4:
self.ballx = self.output_values[0]
self.output_values = []
if self.exit:
return self.last_output
program = Program(init_data)
program.run()
program.displayport()
| [
"[email protected]"
] | |
4d0575b00916108e551507643d17db31afd73b54 | 0b01cb61a4ae4ae236a354cbfa23064e9057e434 | /alipay/aop/api/request/AlipayEbppPdeductSignAddRequest.py | 86e8b05744659d68438f3023124b0cc1e40d40f5 | [
"Apache-2.0"
] | permissive | hipacloud/alipay-sdk-python-all | e4aec2869bf1ea6f7c6fb97ac7cc724be44ecd13 | bdbffbc6d5c7a0a3dd9db69c99443f98aecf907d | refs/heads/master | 2022-11-14T11:12:24.441822 | 2020-07-14T03:12:15 | 2020-07-14T03:12:15 | 277,970,730 | 0 | 0 | Apache-2.0 | 2020-07-08T02:33:15 | 2020-07-08T02:33:14 | null | UTF-8 | Python | false | false | 12,513 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.FileItem import FileItem
from alipay.aop.api.constant.ParamConstants import *
class AlipayEbppPdeductSignAddRequest(object):
def __init__(self, biz_model=None):
self._biz_model = biz_model
self._agent_channel = None
self._agent_code = None
self._bill_key = None
self._biz_type = None
self._charge_inst = None
self._deduct_prod_code = None
self._deduct_type = None
self._ext_user_info = None
self._extend_field = None
self._notify_config = None
self._out_agreement_id = None
self._owner_name = None
self._pay_config = None
self._pay_password_token = None
self._pid = None
self._sign_expire_date = None
self._sub_biz_type = None
self._user_id = None
self._version = "1.0"
self._terminal_type = None
self._terminal_info = None
self._prod_code = None
self._notify_url = None
self._return_url = None
self._udf_params = None
self._need_encrypt = False
@property
def biz_model(self):
return self._biz_model
@biz_model.setter
def biz_model(self, value):
self._biz_model = value
@property
def agent_channel(self):
return self._agent_channel
@agent_channel.setter
def agent_channel(self, value):
self._agent_channel = value
@property
def agent_code(self):
return self._agent_code
@agent_code.setter
def agent_code(self, value):
self._agent_code = value
@property
def bill_key(self):
return self._bill_key
@bill_key.setter
def bill_key(self, value):
self._bill_key = value
@property
def biz_type(self):
return self._biz_type
@biz_type.setter
def biz_type(self, value):
self._biz_type = value
@property
def charge_inst(self):
return self._charge_inst
@charge_inst.setter
def charge_inst(self, value):
self._charge_inst = value
@property
def deduct_prod_code(self):
return self._deduct_prod_code
@deduct_prod_code.setter
def deduct_prod_code(self, value):
self._deduct_prod_code = value
@property
def deduct_type(self):
return self._deduct_type
@deduct_type.setter
def deduct_type(self, value):
self._deduct_type = value
@property
def ext_user_info(self):
return self._ext_user_info
@ext_user_info.setter
def ext_user_info(self, value):
self._ext_user_info = value
@property
def extend_field(self):
return self._extend_field
@extend_field.setter
def extend_field(self, value):
self._extend_field = value
@property
def notify_config(self):
return self._notify_config
@notify_config.setter
def notify_config(self, value):
self._notify_config = value
@property
def out_agreement_id(self):
return self._out_agreement_id
@out_agreement_id.setter
def out_agreement_id(self, value):
self._out_agreement_id = value
@property
def owner_name(self):
return self._owner_name
@owner_name.setter
def owner_name(self, value):
self._owner_name = value
@property
def pay_config(self):
return self._pay_config
@pay_config.setter
def pay_config(self, value):
self._pay_config = value
@property
def pay_password_token(self):
return self._pay_password_token
@pay_password_token.setter
def pay_password_token(self, value):
self._pay_password_token = value
@property
def pid(self):
return self._pid
@pid.setter
def pid(self, value):
self._pid = value
@property
def sign_expire_date(self):
return self._sign_expire_date
@sign_expire_date.setter
def sign_expire_date(self, value):
self._sign_expire_date = value
@property
def sub_biz_type(self):
return self._sub_biz_type
@sub_biz_type.setter
def sub_biz_type(self, value):
self._sub_biz_type = value
@property
def user_id(self):
return self._user_id
@user_id.setter
def user_id(self, value):
self._user_id = value
@property
def version(self):
return self._version
@version.setter
def version(self, value):
self._version = value
@property
def terminal_type(self):
return self._terminal_type
@terminal_type.setter
def terminal_type(self, value):
self._terminal_type = value
@property
def terminal_info(self):
return self._terminal_info
@terminal_info.setter
def terminal_info(self, value):
self._terminal_info = value
@property
def prod_code(self):
return self._prod_code
@prod_code.setter
def prod_code(self, value):
self._prod_code = value
@property
def notify_url(self):
return self._notify_url
@notify_url.setter
def notify_url(self, value):
self._notify_url = value
@property
def return_url(self):
return self._return_url
@return_url.setter
def return_url(self, value):
self._return_url = value
@property
def udf_params(self):
return self._udf_params
@udf_params.setter
def udf_params(self, value):
if not isinstance(value, dict):
return
self._udf_params = value
@property
def need_encrypt(self):
return self._need_encrypt
@need_encrypt.setter
def need_encrypt(self, value):
self._need_encrypt = value
def add_other_text_param(self, key, value):
if not self.udf_params:
self.udf_params = dict()
self.udf_params[key] = value
def get_params(self):
params = dict()
params[P_METHOD] = 'alipay.ebpp.pdeduct.sign.add'
params[P_VERSION] = self.version
if self.biz_model:
params[P_BIZ_CONTENT] = json.dumps(obj=self.biz_model.to_alipay_dict(), ensure_ascii=False, sort_keys=True, separators=(',', ':'))
if self.agent_channel:
if hasattr(self.agent_channel, 'to_alipay_dict'):
params['agent_channel'] = json.dumps(obj=self.agent_channel.to_alipay_dict(), ensure_ascii=False, sort_keys=True, separators=(',', ':'))
else:
params['agent_channel'] = self.agent_channel
if self.agent_code:
if hasattr(self.agent_code, 'to_alipay_dict'):
params['agent_code'] = json.dumps(obj=self.agent_code.to_alipay_dict(), ensure_ascii=False, sort_keys=True, separators=(',', ':'))
else:
params['agent_code'] = self.agent_code
if self.bill_key:
if hasattr(self.bill_key, 'to_alipay_dict'):
params['bill_key'] = json.dumps(obj=self.bill_key.to_alipay_dict(), ensure_ascii=False, sort_keys=True, separators=(',', ':'))
else:
params['bill_key'] = self.bill_key
if self.biz_type:
if hasattr(self.biz_type, 'to_alipay_dict'):
params['biz_type'] = json.dumps(obj=self.biz_type.to_alipay_dict(), ensure_ascii=False, sort_keys=True, separators=(',', ':'))
else:
params['biz_type'] = self.biz_type
if self.charge_inst:
if hasattr(self.charge_inst, 'to_alipay_dict'):
params['charge_inst'] = json.dumps(obj=self.charge_inst.to_alipay_dict(), ensure_ascii=False, sort_keys=True, separators=(',', ':'))
else:
params['charge_inst'] = self.charge_inst
if self.deduct_prod_code:
if hasattr(self.deduct_prod_code, 'to_alipay_dict'):
params['deduct_prod_code'] = json.dumps(obj=self.deduct_prod_code.to_alipay_dict(), ensure_ascii=False, sort_keys=True, separators=(',', ':'))
else:
params['deduct_prod_code'] = self.deduct_prod_code
if self.deduct_type:
if hasattr(self.deduct_type, 'to_alipay_dict'):
params['deduct_type'] = json.dumps(obj=self.deduct_type.to_alipay_dict(), ensure_ascii=False, sort_keys=True, separators=(',', ':'))
else:
params['deduct_type'] = self.deduct_type
if self.ext_user_info:
if hasattr(self.ext_user_info, 'to_alipay_dict'):
params['ext_user_info'] = json.dumps(obj=self.ext_user_info.to_alipay_dict(), ensure_ascii=False, sort_keys=True, separators=(',', ':'))
else:
params['ext_user_info'] = self.ext_user_info
if self.extend_field:
if hasattr(self.extend_field, 'to_alipay_dict'):
params['extend_field'] = json.dumps(obj=self.extend_field.to_alipay_dict(), ensure_ascii=False, sort_keys=True, separators=(',', ':'))
else:
params['extend_field'] = self.extend_field
if self.notify_config:
if hasattr(self.notify_config, 'to_alipay_dict'):
params['notify_config'] = json.dumps(obj=self.notify_config.to_alipay_dict(), ensure_ascii=False, sort_keys=True, separators=(',', ':'))
else:
params['notify_config'] = self.notify_config
if self.out_agreement_id:
if hasattr(self.out_agreement_id, 'to_alipay_dict'):
params['out_agreement_id'] = json.dumps(obj=self.out_agreement_id.to_alipay_dict(), ensure_ascii=False, sort_keys=True, separators=(',', ':'))
else:
params['out_agreement_id'] = self.out_agreement_id
if self.owner_name:
if hasattr(self.owner_name, 'to_alipay_dict'):
params['owner_name'] = json.dumps(obj=self.owner_name.to_alipay_dict(), ensure_ascii=False, sort_keys=True, separators=(',', ':'))
else:
params['owner_name'] = self.owner_name
if self.pay_config:
if hasattr(self.pay_config, 'to_alipay_dict'):
params['pay_config'] = json.dumps(obj=self.pay_config.to_alipay_dict(), ensure_ascii=False, sort_keys=True, separators=(',', ':'))
else:
params['pay_config'] = self.pay_config
if self.pay_password_token:
if hasattr(self.pay_password_token, 'to_alipay_dict'):
params['pay_password_token'] = json.dumps(obj=self.pay_password_token.to_alipay_dict(), ensure_ascii=False, sort_keys=True, separators=(',', ':'))
else:
params['pay_password_token'] = self.pay_password_token
if self.pid:
if hasattr(self.pid, 'to_alipay_dict'):
params['pid'] = json.dumps(obj=self.pid.to_alipay_dict(), ensure_ascii=False, sort_keys=True, separators=(',', ':'))
else:
params['pid'] = self.pid
if self.sign_expire_date:
if hasattr(self.sign_expire_date, 'to_alipay_dict'):
params['sign_expire_date'] = json.dumps(obj=self.sign_expire_date.to_alipay_dict(), ensure_ascii=False, sort_keys=True, separators=(',', ':'))
else:
params['sign_expire_date'] = self.sign_expire_date
if self.sub_biz_type:
if hasattr(self.sub_biz_type, 'to_alipay_dict'):
params['sub_biz_type'] = json.dumps(obj=self.sub_biz_type.to_alipay_dict(), ensure_ascii=False, sort_keys=True, separators=(',', ':'))
else:
params['sub_biz_type'] = self.sub_biz_type
if self.user_id:
if hasattr(self.user_id, 'to_alipay_dict'):
params['user_id'] = json.dumps(obj=self.user_id.to_alipay_dict(), ensure_ascii=False, sort_keys=True, separators=(',', ':'))
else:
params['user_id'] = self.user_id
if self.terminal_type:
params['terminal_type'] = self.terminal_type
if self.terminal_info:
params['terminal_info'] = self.terminal_info
if self.prod_code:
params['prod_code'] = self.prod_code
if self.notify_url:
params['notify_url'] = self.notify_url
if self.return_url:
params['return_url'] = self.return_url
if self.udf_params:
params.update(self.udf_params)
return params
def get_multipart_params(self):
multipart_params = dict()
return multipart_params
| [
"[email protected]"
] | |
3f0a3b27cc1ee9e8a2ddfae7039cafc76d780269 | c31c38b567b5a5053e71d0112c069b2728f83582 | /setup.py | 034492e1e4ba614cc4cf8489fd443a60fa06ace1 | [] | no_license | TyberiusPrime/Dicodon_optimization | e79d301d4039b3fa4c3f22d8b5490c717bba4c79 | badeb4daff0984fb6d1481b854afd80d29329fb3 | refs/heads/master | 2022-11-06T11:06:44.893498 | 2020-06-25T08:42:00 | 2020-06-25T08:42:00 | 274,868,388 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 577 | py | # -*- coding: utf-8 -*-
"""
Setup file for dicodon_usage.
Use setup.cfg to configure your project.
This file was generated with PyScaffold 3.2.3.
PyScaffold helps you to put up the scaffold of your new Python project.
Learn more under: https://pyscaffold.org/
"""
import sys
from pkg_resources import VersionConflict, require
from setuptools import setup
try:
require('setuptools>=38.3')
except VersionConflict:
print("Error: version of setuptools is too old (<38.3)!")
sys.exit(1)
if __name__ == "__main__":
setup(use_pyscaffold=True)
| [
"[email protected]"
] | |
488cc25f3e057e4d4e5c2f5515b8c69b37e885ad | 3851985ce5793de321c8a6d7eacf889a5c4d89e4 | /aries_cloudagent/core/tests/test_plugin_registry.py | 92ec76a398807d161e315d9e6b3ed2472e3f3324 | [
"LicenseRef-scancode-dco-1.1",
"Apache-2.0"
] | permissive | Nick-1979/aries-cloudagent-python | af812069c648f064e5fac05616b7b4910ab921ed | de322abbeaf16a7ca9769d31f9d030c0fb2846a5 | refs/heads/master | 2021-04-21T07:48:23.066245 | 2020-03-24T16:45:43 | 2020-03-24T16:45:43 | 249,762,233 | 1 | 0 | Apache-2.0 | 2020-03-24T16:40:53 | 2020-03-24T16:40:52 | null | UTF-8 | Python | false | false | 1,490 | py | from asynctest import TestCase as AsyncTestCase, mock as async_mock
from ...config.injection_context import InjectionContext
from ...utils.classloader import ClassLoader
from ..plugin_registry import PluginRegistry
class TestPluginRegistry(AsyncTestCase):
def setUp(self):
self.registry = PluginRegistry()
async def test_setup(self):
mod_name = "test_mod"
mod = async_mock.MagicMock()
mod.__name__ = mod_name
ctx = async_mock.MagicMock()
self.registry._plugins[mod_name] = mod
assert list(self.registry.plugin_names) == [mod_name]
assert list(self.registry.plugins) == [mod]
mod.setup = async_mock.CoroutineMock()
await self.registry.init_context(ctx)
mod.setup.assert_awaited_once_with(ctx)
async def test_register_routes(self):
mod_name = "test_mod"
mod = async_mock.MagicMock()
mod.__name__ = mod_name
app = async_mock.MagicMock()
self.registry._plugins[mod_name] = mod
mod.routes.register = async_mock.CoroutineMock()
with async_mock.patch.object(
ClassLoader, "load_module", async_mock.MagicMock(return_value=mod.routes),
) as load_module:
await self.registry.register_admin_routes(app)
load_module.assert_called_once_with(mod_name + ".routes")
mod.routes.register.assert_awaited_once_with(app)
def test_repr(self):
assert type(repr(self.registry)) is str
| [
"[email protected]"
] | |
d1d9d993c26ae3c749bd4f9afb49800aefeedb7b | c34d3dfeb068b1a8d7b017d352c91ec9401f115b | /experiment/tak/text2command/tutorial/venv/bin/ftfy | 060b4650d6eed19d0baefae8dbede2227b788770 | [] | no_license | HiroIshikawa/speech2craft | ef6c54359f513f5aefe801ae43f2588c8981d891 | 7717aed4942999780164b8c9060e1dc7bd502c1d | refs/heads/master | 2021-01-19T21:59:30.406605 | 2017-06-27T14:59:12 | 2017-06-27T14:59:12 | 88,735,488 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 463 | #!/Users/hiro99ishikawa/Dropbox/school_projects/6spring2017/175/project/experiment/spacy_tutorial/venv/bin/python3
# EASY-INSTALL-ENTRY-SCRIPT: 'ftfy==4.4.2','console_scripts','ftfy'
__requires__ = 'ftfy==4.4.2'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('ftfy==4.4.2', 'console_scripts', 'ftfy')()
)
| [
"[email protected]"
] | ||
5650104f810a270b0667179b4ddcce2860b9be30 | 72cc55e9599276203a99ba3e5dc211559b00ad76 | /book_liveblog/wsgi.py | 4c6fee4e22d83f50aa6c89a06c96105b42e32244 | [] | no_license | hanzhichao/django_liveblog | b551cfacc8e0991728aefddc10f54d5a20d0692c | 0a58ebcfb5fd102b7d83d025c6131aef81923a5c | refs/heads/master | 2021-09-05T22:52:03.262756 | 2018-01-31T13:49:57 | 2018-01-31T13:49:57 | 104,088,838 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 403 | py | """
WSGI config for book_liveblog project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "book_liveblog.settings")
application = get_wsgi_application()
| [
"[email protected]"
] | |
4d86d567418021ca56c5beb0432c631977639bc9 | f9d5f6e8e0021b03a3429605a059375a8d9193d1 | /package_from_installed.py | 7ef04fd8d2a902ffa3083ab2919a1a238e378d9b | [] | no_license | ftahmed/msvc2013onwine | a1d195e2828cce4117a95c325bddeb2b45b27443 | 996659d63be12696e092b40d946882a403d123e7 | refs/heads/master | 2021-01-17T22:18:46.268568 | 2015-05-12T20:27:42 | 2015-05-12T20:27:42 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,278 | py | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
From a system-installed copy of the toolchain, packages all the required bits
into a .zip file.
It assumes default install locations for tools, in particular:
- C:\Program Files (x86)\Microsoft Visual Studio 12.0\...
- C:\Program Files (x86)\Windows Kits\8.1\...
1. Start from a fresh Win7 VM image.
2. Install VS Pro. Deselect everything except MFC.
3. Install Windows 8 SDK. Select only the Windows SDK and Debugging Tools for
Windows.
4. Run this script, which will build a <sha1>.zip.
Express is not yet supported by this script, but patches welcome (it's not too
useful as the resulting zip can't be redistributed, and most will presumably
have a Pro license anyway).
"""
import hashlib
import os
import shutil
import sys
import tempfile
import zipfile
def BuildFileList():
result = []
# Subset of VS corresponding roughly to VC.
vs_path = r'C:\Program Files (x86)\Microsoft Visual Studio 12.0'
for path in [
'VC/atlmfc',
'VC/bin',
'VC/crt',
'VC/include',
'VC/lib',
'VC/redist',
('VC/redist/x86/Microsoft.VC120.CRT', 'sys32'),
('VC/redist/x86/Microsoft.VC120.MFC', 'sys32'),
('VC/redist/Debug_NonRedist/x86/Microsoft.VC120.DebugCRT', 'sys32'),
('VC/redist/Debug_NonRedist/x86/Microsoft.VC120.DebugMFC', 'sys32'),
('VC/redist/x64/Microsoft.VC120.CRT', 'sys64'),
('VC/redist/x64/Microsoft.VC120.MFC', 'sys64'),
('VC/redist/Debug_NonRedist/x64/Microsoft.VC120.DebugCRT', 'sys64'),
('VC/redist/Debug_NonRedist/x64/Microsoft.VC120.DebugMFC', 'sys64'),
]:
src = path[0] if isinstance(path, tuple) else path
combined = os.path.join(vs_path, src)
assert os.path.exists(combined) and os.path.isdir(combined)
for root, _, files in os.walk(combined):
for f in files:
final_from = os.path.normpath(os.path.join(root, f))
if isinstance(path, tuple):
result.append(
(final_from, os.path.normpath(os.path.join(path[1], f))))
else:
assert final_from.startswith(vs_path)
dest = final_from[len(vs_path) + 1:]
if dest.lower().endswith('\\xtree'):
# Patch for C4702 in xtree. http://crbug.com/346399.
(handle, patched) = tempfile.mkstemp()
with open(final_from, 'rb') as unpatched_f:
unpatched_contents = unpatched_f.read()
os.write(handle,
unpatched_contents.replace('warning(disable: 4127)',
'warning(disable: 4127 4702)'))
result.append((patched, dest))
else:
result.append((final_from, dest))
# Just copy the whole SDK.
sdk_path = r'C:\Program Files (x86)\Windows Kits\8.1'
for root, _, files in os.walk(sdk_path):
for f in files:
combined = os.path.normpath(os.path.join(root, f))
to = os.path.join('win8sdk', combined[len(sdk_path) + 1:])
result.append((combined, to))
# Generically drop all arm stuff that we don't need.
return [(f, t) for f, t in result if 'arm\\' not in f.lower()]
def GenerateSetEnvCmd(target_dir, pro):
"""Generate a batch file that gyp expects to exist to set up the compiler
environment.
This is normally generated by a full install of the SDK, but we
do it here manually since we do not do a full install."""
with open(os.path.join(
target_dir, r'win8sdk\bin\SetEnv.cmd'), 'w') as f:
f.write('@echo off\n'
':: Generated by win_toolchain\\toolchain2013.py.\n'
# Common to x86 and x64
'set PATH=%~dp0..\\..\\Common7\\IDE;%PATH%\n'
'set INCLUDE=%~dp0..\\..\\win8sdk\\Include\\um;'
'%~dp0..\\..\\win8sdk\\Include\\shared;'
'%~dp0..\\..\\VC\\include;'
'%~dp0..\\..\\VC\\atlmfc\\include\n'
'if "%1"=="/x64" goto x64\n')
# x86. If we're Pro, then use the amd64_x86 cross (we don't support x86
# host at all).
if pro:
f.write('set PATH=%~dp0..\\..\\win8sdk\\bin\\x86;'
'%~dp0..\\..\\VC\\bin\\amd64_x86;'
'%~dp0..\\..\\VC\\bin\\amd64;' # Needed for mspdb120.dll.
'%PATH%\n')
else:
f.write('set PATH=%~dp0..\\..\\win8sdk\\bin\\x86;'
'%~dp0..\\..\\VC\\bin;%PATH%\n')
f.write('set LIB=%~dp0..\\..\\VC\\lib;'
'%~dp0..\\..\\win8sdk\\Lib\\winv6.3\\um\\x86;'
'%~dp0..\\..\\VC\\atlmfc\\lib\n'
'goto :EOF\n')
# Express does not include a native 64 bit compiler, so we have to use
# the x86->x64 cross.
if not pro:
# x86->x64 cross.
f.write(':x64\n'
'set PATH=%~dp0..\\..\\win8sdk\\bin\\x64;'
'%~dp0..\\..\\VC\\bin\\x86_amd64;'
# Needed for mspdb120.dll. Must be after above though, so
# that cl.exe is the x86_amd64 one.
'%~dp0..\\..\\VC\\bin;'
'%PATH%\n')
else:
# x64 native.
f.write(':x64\n'
'set PATH=%~dp0..\\..\\win8sdk\\bin\\x64;'
'%~dp0..\\..\\VC\\bin\\amd64;'
'%PATH%\n')
f.write('set LIB=%~dp0..\\..\\VC\\lib\\amd64;'
'%~dp0..\\..\\win8sdk\\Lib\\winv6.3\\um\\x64;'
'%~dp0..\\..\\VC\\atlmfc\\lib\\amd64\n')
def AddEnvSetup(files):
"""We need to generate this file in the same way that the "from pieces"
script does, so pull that in here."""
tempdir = tempfile.mkdtemp()
os.makedirs(os.path.join(tempdir, 'win8sdk', 'bin'))
GenerateSetEnvCmd(tempdir, True)
files.append((os.path.join(tempdir, 'win8sdk', 'bin', 'SetEnv.cmd'),
'win8sdk\\bin\\SetEnv.cmd'))
if sys.platform != 'cygwin':
import ctypes.wintypes
GetFileAttributes = ctypes.windll.kernel32.GetFileAttributesW
GetFileAttributes.argtypes = (ctypes.wintypes.LPWSTR,)
GetFileAttributes.restype = ctypes.wintypes.DWORD
FILE_ATTRIBUTE_HIDDEN = 0x2
FILE_ATTRIBUTE_SYSTEM = 0x4
def IsHidden(file_path):
"""Returns whether the given |file_path| has the 'system' or 'hidden'
attribute set."""
p = GetFileAttributes(file_path)
assert p != 0xffffffff
return bool(p & (FILE_ATTRIBUTE_HIDDEN | FILE_ATTRIBUTE_SYSTEM))
def GetFileList(root):
"""Gets a normalized list of files under |root|."""
assert not os.path.isabs(root)
assert os.path.normpath(root) == root
file_list = []
for base, _, files in os.walk(root):
paths = [os.path.join(base, f) for f in files]
file_list.extend(x.lower() for x in paths if not IsHidden(x))
return sorted(file_list)
def MakeTimestampsFileName(root):
return os.path.join(root, '..', '.timestamps')
def CalculateHash(root):
"""Calculates the sha1 of the paths to all files in the given |root| and the
contents of those files, and returns as a hex string."""
file_list = GetFileList(root)
# Check whether we previously saved timestamps in $root/../.timestamps. If
# we didn't, or they don't match, then do the full calculation, otherwise
# return the saved value.
timestamps_file = MakeTimestampsFileName(root)
timestamps_data = {'files': [], 'sha1': ''}
if os.path.exists(timestamps_file):
with open(timestamps_file, 'rb') as f:
try:
timestamps_data = json.load(f)
except ValueError:
# json couldn't be loaded, empty data will force a re-hash.
pass
matches = len(file_list) == len(timestamps_data['files'])
if matches:
for disk, cached in zip(file_list, timestamps_data['files']):
if disk != cached[0] or os.stat(disk).st_mtime != cached[1]:
matches = False
break
if matches:
return timestamps_data['sha1']
digest = hashlib.sha1()
for path in file_list:
digest.update(path)
with open(path, 'rb') as f:
digest.update(f.read())
return digest.hexdigest()
def RenameToSha1(output):
"""Determine the hash in the same way that the unzipper does to rename the
# .zip file."""
print 'Extracting to determine hash...'
tempdir = tempfile.mkdtemp()
old_dir = os.getcwd()
os.chdir(tempdir)
rel_dir = 'vs2013_files'
with zipfile.ZipFile(
os.path.join(old_dir, output), 'r', zipfile.ZIP_DEFLATED, True) as zf:
zf.extractall(rel_dir)
print 'Hashing...'
sha1 = CalculateHash(rel_dir)
os.chdir(old_dir)
shutil.rmtree(tempdir)
final_name = sha1 + '.zip'
os.rename(output, final_name)
print 'Renamed %s to %s.' % (output, final_name)
def main():
print 'Building file list...'
files = BuildFileList()
AddEnvSetup(files)
if False:
for f in files:
print f[0], '->', f[1]
return 0
output = 'out.zip'
if os.path.exists(output):
os.unlink(output)
count = 0
with zipfile.ZipFile(output, 'w', zipfile.ZIP_DEFLATED, True) as zf:
for disk_name, archive_name in files:
sys.stdout.write('\r%d/%d ...%s' % (count, len(files), disk_name[-40:]))
sys.stdout.flush()
count += 1
zf.write(disk_name, archive_name)
sys.stdout.write('\rWrote to %s.%s\n' % (output, ' '*50))
sys.stdout.flush()
RenameToSha1(output)
return 0
if __name__ == '__main__':
sys.exit(main())
| [
"[email protected]"
] | |
498e9af00a73cf1f71cbf81f6d19da86078aa56d | 8613ec7f381a6683ae24b54fb2fb2ac24556ad0b | /70~79/ABC071/bnot.py | 9ea6b41f33667b764a8a5f3a347a202be7a65d15 | [] | no_license | Forest-Y/AtCoder | 787aa3c7dc4d999a71661465349428ba60eb2f16 | f97209da3743026920fb4a89fc0e4d42b3d5e277 | refs/heads/master | 2023-08-25T13:31:46.062197 | 2021-10-29T12:54:24 | 2021-10-29T12:54:24 | 301,642,072 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 135 | py | s = input()
for i in range(26):
if s.count(chr(ord("a") + i)) == 0:
print(chr(ord("a") + i))
exit()
print("None") | [
"[email protected]"
] | |
d9acb6046dfbc3236d8937d16f0d309fed1f6164 | b06e21f2731fd1000fbe5694312aa9cebf543809 | /ElectronPhononCoupling/core/ddbfile.py | 935e1037cec2ce342aed7719a77cf2a39c0006d5 | [] | no_license | Maruf001/ElectronPhononCoupling | 932be124a58b1d49ebbdb5655ea7e30fcf9b6e20 | 434be79b20397bcc8ab4789f7fbde9de55deca77 | refs/heads/master | 2023-03-17T09:42:27.631568 | 2021-01-06T21:37:06 | 2021-01-06T21:37:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 18,980 | py |
from __future__ import print_function
__author__ = "Gabriel Antonius, Samuel Ponce"
import os
import warnings
import numpy as np
from numpy import zeros
import netCDF4 as nc
from .mpi import MPI, comm, size, rank, mpi_watch
from .constants import tol5, tol6, me_amu, kb_HaK
from .functions import get_bose
from . import EpcFile
__all__ = ['DdbFile']
class DdbFile(EpcFile):
_rprim = np.identity(3)
gprimd = np.identity(3)
omega = None
asr = True
def __init__(self, *args, **kwargs):
self.asr = kwargs.pop('asr', True)
super(DdbFile, self).__init__(*args, **kwargs)
def set_amu(self, amu):
"""
Set the values for the atom masses.
Arguments:
amu: [ntypat]
Atom masses for each atom type, in atomic mass units.
"""
self.amu = np.array(amu)
def read_nc(self, fname=None):
"""Open the DDB.nc file and read it."""
fname = fname if fname else self.fname
super(DdbFile, self).read_nc(fname)
with nc.Dataset(fname, 'r') as root:
self.natom = len(root.dimensions['number_of_atoms'])
self.ncart = len(root.dimensions['number_of_cartesian_directions']) # 3
self.ntypat = len(root.dimensions['number_of_atom_species'])
self.typat = root.variables['atom_species'][:self.natom]
self.amu = root.variables['atomic_masses_amu'][:self.ntypat]
self.rprim = root.variables['primitive_vectors'][:self.ncart,:self.ncart]
self.xred = root.variables['reduced_atom_positions'][:self.natom,:self.ncart]
self.qred = root.variables['q_point_reduced_coord'][:]
# The d2E/dRdR' matrix
self.E2D = np.zeros((self.natom, self.ncart, self.natom, self.ncart), dtype=np.complex)
self.E2D.real = root.variables['second_derivative_of_energy'][:,:,:,:,0]
self.E2D.imag = root.variables['second_derivative_of_energy'][:,:,:,:,1]
self.E2D = np.einsum('aibj->bjai', self.E2D) # Indicies are reversed when writing them from Fortran.
self.BECT = root.variables['born_effective_charge_tensor'][:self.ncart,:self.natom,:self.ncart]
def broadcast(self):
"""Broadcast the data from master to all workers."""
comm.Barrier()
if rank == 0:
dim = np.array([self.natom, self.ncart, self.ntypat], dtype=np.int)
else:
dim = np.empty(3, dtype=np.int)
comm.Bcast([dim, MPI.INT])
if rank != 0:
self.natom, self.ncart, self.ntypat = dim[:]
self.typat = np.empty(self.natom, dtype='i')
self.amu = np.empty(self.ntypat, dtype=np.float)
rprim = np.empty((self.ncart, self.ncart), dtype=np.float)
self.xred = np.empty((self.natom, self.ncart), dtype=np.float)
self.qred = np.empty((self.ncart), dtype=np.float)
self.E2D = np.empty((self.natom, self.ncart, self.natom, self.ncart),
dtype=np.complex)
self.BECT = np.empty((self.ncart, self.natom, self.ncart), dtype=np.float)
else:
rprim = self.rprim
comm.Bcast([self.typat, MPI.INT])
comm.Bcast([self.amu, MPI.DOUBLE])
comm.Bcast([self.xred, MPI.DOUBLE])
comm.Bcast([self.qred, MPI.DOUBLE])
comm.Bcast([self.E2D, MPI.COMPLEX])
comm.Bcast([self.BECT, MPI.DOUBLE])
comm.Bcast([rprim, MPI.DOUBLE])
self.rprim = rprim
@property
def is_gamma(self):
return np.allclose(self.qred,[0.0,0.0,0.0])
@property
def rprim(self):
return self._rprim
@rprim.setter
def rprim(self, value):
self._rprim = np.array(value)
self.gprimd = np.linalg.inv(np.matrix(self._rprim))
@property
def nmode(self):
return 3 * self.natom
def get_mass_scaled_dynmat_cart(self):
"""
Format the dynamical matrix in a 3Nx3N matrix,
scale with masses, and transform into Cartesian coordinates.
"""
# Retrive the amu for each atom
amu = zeros(self.natom)
for ii in np.arange(self.natom):
jj = self.typat[ii]
amu[ii] = self.amu[jj-1]
# Transform from 2nd-order matrix (non-cartesian coordinates,
# masses not included, asr not included ) from self to
# dynamical matrix, in cartesian coordinates, asr not imposed.
E2D_cart = zeros((3,self.natom,3,self.natom),dtype=complex)
for ii in np.arange(self.natom):
for jj in np.arange(self.natom):
for dir1 in np.arange(3):
for dir2 in np.arange(3):
for dir3 in np.arange(3):
for dir4 in np.arange(3):
E2D_cart[dir1,ii,dir2,jj] += (self.E2D[ii,dir3,jj,dir4] *
self.gprimd[dir1,dir3] * self.gprimd[dir2,dir4])
# Reduce the 4 dimensional E2D_cart matrice to 2 dimensional Dynamical matrice
# with scaled masses.
Dyn_mat = zeros((3*self.natom,3*self.natom),dtype=complex)
for ii in np.arange(self.natom):
for dir1 in np.arange(3):
ipert1 = ii * 3 + dir1
for jj in np.arange(self.natom):
for dir2 in np.arange(3):
ipert2 = jj * 3 + dir2
Dyn_mat[ipert1,ipert2] = (E2D_cart[dir1,ii,dir2,jj] *
me_amu / np.sqrt(amu[ii]*amu[jj]))
# Hermitianize the dynamical matrix
#dynmat = np.matrix(Dyn_mat)
dynmat = Dyn_mat
dynmat = 0.5 * (dynmat + dynmat.transpose().conjugate())
return dynmat
def get_E2D_cart(self):
"""
Transform the 2nd-order matrix from non-cartesian coordinates
to cartesian coordinates...and also swap atom and cartesian indicies.
"""
# Transform from 2nd-order matrix (non-cartesian coordinates,
# masses not included, asr not included ) from self to
# dynamical matrix, in cartesian coordinates, asr not imposed.
E2D_cart = zeros((3,self.natom,3,self.natom),dtype=complex)
for ii in np.arange(self.natom):
for jj in np.arange(self.natom):
for dir1 in np.arange(3):
for dir2 in np.arange(3):
for dir3 in np.arange(3):
for dir4 in np.arange(3):
E2D_cart[dir1,ii,dir2,jj] += (self.E2D[ii,dir3,jj,dir4] *
self.gprimd[dir1,dir3] * self.gprimd[dir2,dir4])
return E2D_cart
def compute_dynmat(self, asr=None, zero_negative=True):
"""
Diagonalize the dynamical matrix.
Returns:
omega: the frequencies, in Ha
eigvect: the eigenvectors, in reduced coord
"""
asr = asr if asr is not None else self.asr
# Retrive the amu for each atom
amu = zeros(self.natom)
for ii in np.arange(self.natom):
jj = self.typat[ii]
amu[ii] = self.amu[jj-1]
dynmat = self.get_mass_scaled_dynmat_cart()
# Diagonalize the matrix
eigval, eigvect = np.linalg.eigh(dynmat)
# Scale the eigenvectors
for ii in np.arange(self.natom):
for dir1 in np.arange(3):
ipert = ii * 3 + dir1
eigvect[ipert] = eigvect[ipert] * np.sqrt(me_amu / amu[ii])
# Nullify imaginary frequencies
if zero_negative:
for i, eig in enumerate(eigval):
if eig < 0.0:
warnings.warn("An eigenvalue is negative with value: {} ... but proceed with value 0.0".format(eig))
eigval[i] = 0.0
# Impose the accoustic sum rule
if asr and self.is_gamma:
eigval[0] = 0.0
eigval[1] = 0.0
eigval[2] = 0.0
# Frequencies
self.omega = np.sqrt(np.abs(eigval)) * np.sign(eigval)
self.eigvect = eigvect
return self.omega, self.eigvect
def get_reduced_displ(self, noscale=False):
"""
Compute the mode eigenvectors, scaled by the mode displacements
Also transform from cartesian to reduced coordinates.
Returns: polvec[nmode,3,natom]
"""
# Minimal value for omega (Ha)
omega_tolerance = 1e-5
self.polvec = zeros((self.nmode,3,self.natom), dtype=complex)
xi_at = zeros(3, dtype=complex)
omega, eigvect = self.compute_dynmat()
for imode in range(self.nmode):
# Skip mode with zero frequency (leave displacements null)
if omega[imode].real < omega_tolerance and not noscale:
continue
if noscale:
z0 = 1.
else:
z0 = 1. / np.sqrt(2.0 * omega[imode].real)
for iatom in np.arange(self.natom):
for idir in range(3):
xi_at[idir] = eigvect[3*iatom+idir,imode] * z0
for idir in range(3):
for jdir in range(3):
self.polvec[imode,idir,iatom] += xi_at[jdir] * self.gprimd[jdir,idir]
return self.polvec
def get_reduced_displ_squared(self):
"""
Compute the squared reduced displacements (scaled by phonon frequencies)
for the Fan and the DDW terms.
"""
# Minimal value for omega (Ha)
omega_tolerance = 1e-5
natom = self.natom
omega, eigvect = self.compute_dynmat()
displ_FAN = zeros((3,3), dtype=complex)
displ_DDW = zeros((3,3), dtype=complex)
displ_red_FAN2 = zeros((3*natom,natom,natom,3,3), dtype=complex)
displ_red_DDW2 = zeros((3*natom,natom,natom,3,3), dtype=complex)
for imode in np.arange(3*natom):
# Skip mode with zero frequency (leave displacements null)
if omega[imode].real < omega_tolerance:
continue
for iatom1 in np.arange(natom):
for iatom2 in np.arange(natom):
for idir1 in np.arange(0,3):
for idir2 in np.arange(0,3):
displ_FAN[idir1,idir2] = (
eigvect[3*iatom2+idir2,imode].conj() *
eigvect[3*iatom1+idir1,imode] / (2.0 * omega[imode].real)
)
displ_DDW[idir1,idir2] = (
eigvect[3*iatom2+idir2,imode].conj() *
eigvect[3*iatom2+idir1,imode] +
eigvect[3*iatom1+idir2,imode].conj() *
eigvect[3*iatom1+idir1,imode]) / (4.0 * omega[imode].real)
# Now switch to reduced coordinates in 2 steps (more efficient)
tmp_displ_FAN = zeros((3,3),dtype=complex)
tmp_displ_DDW = zeros((3,3),dtype=complex)
for idir1 in np.arange(3):
for idir2 in np.arange(3):
tmp_displ_FAN[:,idir1] = tmp_displ_FAN[:,idir1] + displ_FAN[:,idir2] * self.gprimd[idir2,idir1]
tmp_displ_DDW[:,idir1] = tmp_displ_DDW[:,idir1] + displ_DDW[:,idir2] * self.gprimd[idir2,idir1]
displ_red_FAN = zeros((3,3),dtype=complex)
displ_red_DDW = zeros((3,3),dtype=complex)
for idir1 in np.arange(3):
for idir2 in np.arange(3):
displ_red_FAN[idir1,:] = displ_red_FAN[idir1,:] + tmp_displ_FAN[idir2,:] * self.gprimd[idir2,idir1]
displ_red_DDW[idir1,:] = displ_red_DDW[idir1,:] + tmp_displ_DDW[idir2,:] * self.gprimd[idir2,idir1]
displ_red_FAN2[imode,iatom1,iatom2,:,:] = displ_red_FAN[:,:]
displ_red_DDW2[imode,iatom1,iatom2,:,:] = displ_red_DDW[:,:]
self.displ_red_FAN2 = displ_red_FAN2
self.displ_red_DDW2 = displ_red_DDW2
return displ_red_FAN2, displ_red_DDW2
def get_bose(self, temperatures):
"""
Get the Bose-Einstein occupations on a range of temperatures.
Returns: bose(3*natom, Ntemperatures)
"""
if self.omega is None:
self.compute_dynmat()
bose = zeros((3*self.natom, len(temperatures)))
#bose[:,:] = get_bose(self.omega, temperatures)
for imode, omega in enumerate(self.omega):
bose[imode,:] = get_bose(omega, temperatures)
return bose
def get_born_effective_charges_cart(self, gsr):
"""
Arguments
---------
gsr: GsrFile object
"""
zions = gsr.atoms_zion
Z_cart = np.zeros((self.natom, self.ncart, self.ncart), dtype=np.complex)
for iat in range(self.natom):
zion = zions[iat]
Z_cart[iat,:,:] = np.dot(self.rprim, np.dot(self.gprimd, self.BECT[:,iat,:])) / (2 * np.pi) + zion * np.identity(self.ncart)
return Z_cart
def get_born_effective_charges_mode(self, gsr):
r"""
Compute the Born effective charges in the mode basis,
that is, for each mode, a vector defined as
Z_{\nu,j} = \sum_{\kappa, j'} Z_{\kappa, j', j} \xi^{\nu}_{\kappa, j'}
gsr: GsrFile object
"""
Z_cart = self.get_born_effective_charges_cart(gsr)
Z_nu = np.zeros((self.nmode, self.ncart), dtype=np.complex)
omega, eigvect = self.compute_dynmat()
for imode in range(self.nmode):
for icart in range(self.ncart):
for jat in range(self.natom):
for jcart in range(self.ncart):
jpert = jat * 3 + jcart
Z_nu[imode,icart] += Z_cart[jat,jcart,icart] * eigvect[jpert,imode]
return Z_nu, omega
def get_born_effective_charges_mode_dot_q(self, gsr, q=None):
r"""
Compute the Born effective charges in the mode basis,
that is, for each mode, a vector defined as
Z_{\nu,j} = \sum_{\kappa, j'} Z_{\kappa, j', j} \xi^{\nu}_{\kappa, j'}
gsr: GsrFile object
q: Direction of the qavevector in reduced coordinates.
For q=Gamma, one has to pic
"""
Z_cart = self.get_born_effective_charges_cart(gsr)
Z_nu = np.zeros((self.nmode, self.ncart), dtype=np.complex)
omega, eigvect = self.compute_dynmat()
Z_nu, omega = self.get_born_effective_charges_mode(gsr)
if q is None:
q = self.qred
qnorm = np.linalg.norm(q)
if np.isclose(qnorm, 0):
qnorm = 1
raise Exception('Cannot compute Z.q for q=0. Please chose a unitary direction for q')
qhat = np.array(q) / qnorm
Z_nu_q = np.zeros((self.nmode), dtype=np.complex)
for imode in range(self.nmode):
Z_nu_q[imode] = np.dot(Z_nu[imode,:], qhat)
return Z_nu_q, omega
# Only LEGACY code below --------------------------------------
#
# This old function reads the DDB from the ascii file.
# It is left here for legacy.
#
#def DDB_file_open(self, filefullpath):
# """Open the DDB file and read it."""
# if not (os.path.isfile(filefullpath)):
# raise Exception('The file "%s" does not exists!' %filefullpath)
# with open(filefullpath,'r') as DDB:
# Flag = 0
# Flag2 = False
# Flag3 = False
# ikpt = 0
# for line in DDB:
# if line.find('natom') > -1:
# self.natom = np.int(line.split()[1])
# if line.find('nkpt') > -1:
# self.nkpt = np.int(line.split()[1])
# self.kpt = zeros((self.nkpt,3))
# if line.find('ntypat') > -1:
# self.ntypat = np.int(line.split()[1])
# if line.find('nband') > -1:
# self.nband = np.int(line.split()[1])
# if line.find('acell') > -1:
# line = line.replace('D','E')
# tmp = line.split()
# self.acell = [np.float(tmp[1]),np.float(tmp[2]),np.float(tmp[3])]
# if Flag2:
# line = line.replace('D','E')
# for ii in np.arange(3,self.ntypat):
# self.amu[ii] = np.float(line.split()[ii-3])
# Flag2 = False
# if line.find('amu') > -1:
# line = line.replace('D','E')
# self.amu = zeros((self.ntypat))
# if self.ntypat > 3:
# for ii in np.arange(3):
# self.amu[ii] = np.float(line.split()[ii+1])
# Flag2 = True
# else:
# for ii in np.arange(self.ntypat):
# self.amu[ii] = np.float(line.split()[ii+1])
# if line.find(' kpt ') > -1:
# line = line.replace('D','E')
# tmp = line.split()
# self.kpt[0,0:3] = [float(tmp[1]),float(tmp[2]),float(tmp[3])]
# ikpt = 1
# continue
# if ikpt < self.nkpt and ikpt > 0:
# line = line.replace('D','E')
# tmp = line.split()
# self.kpt[ikpt,0:3] = [float(tmp[0]),float(tmp[1]),float(tmp[2])]
# ikpt += 1
# continue
# if Flag == 2:
# line = line.replace('D','E')
# tmp = line.split()
# self.rprim[2,0:3] = [float(tmp[0]),float(tmp[1]),float(tmp[2])]
# Flag = 0
# if Flag == 1:
# line = line.replace('D','E')
# tmp = line.split()
# self.rprim[1,0:3] = [float(tmp[0]),float(tmp[1]),float(tmp[2])]
# Flag = 2
# if line.find('rprim') > -1:
# line = line.replace('D','E')
# tmp = line.split()
# self.rprim[0,0:3] = [float(tmp[1]),float(tmp[2]),float(tmp[3])]
# Flag = 1
# if Flag3:
# line = line.replace('D','E')
# for ii in np.arange(12,self.natom):
# self.typat[ii] = np.float(line.split()[ii-12])
# Flag3 = False
# if line.find(' typat') > -1:
# self.typat = zeros((self.natom))
# if self.natom > 12:
# for ii in np.arange(12):
# self.typat[ii] = np.float(line.split()[ii+1])
# Flag3 = True
# else:
# for ii in np.arange(self.natom):
# self.typat[ii] = np.float(line.split()[ii+1])
# # Read the actual d2E/dRdR matrix
# if Flag == 3:
# line = line.replace('D','E')
# tmp = line.split()
# if not tmp:
# break
# self.E2D[int(tmp[0])-1,int(tmp[1])-1,int(tmp[2])-1,int(tmp[3])-1] = \
# complex(float(tmp[4]),float(tmp[5]))
# # Read the current Q-point
# if line.find('qpt') > -1:
# line = line.replace('D','E')
# tmp = line.split()
# self.iqpt = [np.float(tmp[1]),np.float(tmp[2]),np.float(tmp[3])]
# Flag = 3
# self.E2D = zeros((3,self.natom,3,self.natom),dtype=complex)
| [
"[email protected]"
] | |
a27d5d8af270a654ded5ce5a4130f7bad3dffe0a | 2e9dbde82d0f9a215d30ee4a807cc5afe4ad848e | /clock/profiles/middleware.py | 1ef4e6c8eed256e32552917668cf7d25d7261ccc | [
"MIT"
] | permissive | mimischi/django-clock | b053788249ec47b23fc2f741be30b896f54ab149 | 3914da6a48b89cb80ab5205c6ce1c279012472fe | refs/heads/develop | 2020-04-04T06:00:47.981324 | 2018-05-27T12:29:05 | 2018-05-27T12:29:05 | 43,012,938 | 6 | 4 | MIT | 2018-05-27T12:29:06 | 2015-09-23T16:25:29 | Python | UTF-8 | Python | false | false | 1,219 | py | from django.conf.urls.i18n import is_language_prefix_patterns_used
from django.middleware.locale import LocaleMiddleware
from django.utils import translation
from clock.profiles.models import UserProfile
class LocaleMiddlewareExtended(LocaleMiddleware):
"""This middleware extends Djangos normal LocaleMiddleware and looks for the
language preferred by the user.
Normally only the current session is searched for the preferred language,
but the user may want to define it in his profile. This solves the problem
and therefore keeps the set language across logouts/different devices.
"""
def get_language_for_user(self, request):
if request.user.is_authenticated:
try:
account = UserProfile.objects.get(user=request.user)
return account.language
except UserProfile.DoesNotExist:
pass
return translation.get_language_from_request(
request, check_path=is_language_prefix_patterns_used
)
def process_request(self, request):
language = self.get_language_for_user(request)
translation.activate(language)
request.LANGUAGE_CODE = translation.get_language()
| [
"[email protected]"
] | |
107df7a0e5ce28dc0a41353af47e8273ee9ee549 | ce76b3ef70b885d7c354b6ddb8447d111548e0f1 | /work_or_world/right_group/work_or_part/young_life/world/new_thing_or_small_woman.py | 21ff767cbbc1678056687a4bb608c2f90f9a18ff | [] | no_license | JingkaiTang/github-play | 9bdca4115eee94a7b5e4ae9d3d6052514729ff21 | 51b550425a91a97480714fe9bc63cb5112f6f729 | refs/heads/master | 2021-01-20T20:18:21.249162 | 2016-08-19T07:20:12 | 2016-08-19T07:20:12 | 60,834,519 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 190 | py |
#! /usr/bin/env python
def year_or_work(str_arg):
world(str_arg)
print('number')
def world(str_arg):
print(str_arg)
if __name__ == '__main__':
year_or_work('come_child')
| [
"[email protected]"
] | |
b2752676dc368bf1edfcca13a05dff4b1d09244d | 4ce2cff60ddbb9a3b6fc2850187c86f866091b13 | /tfrecords/src/wai/tfrecords/object_detection/core/batcher_test.py | 1e22a572cceb6031539e660c79130b2c7b99c527 | [
"Apache-2.0",
"MIT"
] | permissive | 8176135/tensorflow | 18cb8a0432ab2a0ea5bacd03309e647f39cb9dd0 | 2c3b4b1d66a80537f3e277d75ec1d4b43e894bf1 | refs/heads/master | 2020-11-26T05:00:56.213093 | 2019-12-19T08:13:44 | 2019-12-19T08:13:44 | 228,970,478 | 0 | 0 | null | 2019-12-19T03:51:38 | 2019-12-19T03:51:37 | null | UTF-8 | Python | false | false | 5,985 | py | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for object_detection.core.batcher."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from six.moves import range
import tensorflow as tf
from wai.tfrecords.object_detection.core import batcher
slim = tf.contrib.slim
class BatcherTest(tf.test.TestCase):
def test_batch_and_unpad_2d_tensors_of_different_sizes_in_1st_dimension(self):
with self.test_session() as sess:
batch_size = 3
num_batches = 2
examples = tf.Variable(tf.constant(2, dtype=tf.int32))
counter = examples.count_up_to(num_batches * batch_size + 2)
boxes = tf.tile(
tf.reshape(tf.range(4), [1, 4]), tf.stack([counter, tf.constant(1)]))
batch_queue = batcher.BatchQueue(
tensor_dict={'boxes': boxes},
batch_size=batch_size,
batch_queue_capacity=100,
num_batch_queue_threads=1,
prefetch_queue_capacity=100)
batch = batch_queue.dequeue()
for tensor_dict in batch:
for tensor in tensor_dict.values():
self.assertAllEqual([None, 4], tensor.get_shape().as_list())
tf.initialize_all_variables().run()
with slim.queues.QueueRunners(sess):
i = 2
for _ in range(num_batches):
batch_np = sess.run(batch)
for tensor_dict in batch_np:
for tensor in tensor_dict.values():
self.assertAllEqual(tensor, np.tile(np.arange(4), (i, 1)))
i += 1
with self.assertRaises(tf.errors.OutOfRangeError):
sess.run(batch)
def test_batch_and_unpad_2d_tensors_of_different_sizes_in_all_dimensions(
self):
with self.test_session() as sess:
batch_size = 3
num_batches = 2
examples = tf.Variable(tf.constant(2, dtype=tf.int32))
counter = examples.count_up_to(num_batches * batch_size + 2)
image = tf.reshape(
tf.range(counter * counter), tf.stack([counter, counter]))
batch_queue = batcher.BatchQueue(
tensor_dict={'image': image},
batch_size=batch_size,
batch_queue_capacity=100,
num_batch_queue_threads=1,
prefetch_queue_capacity=100)
batch = batch_queue.dequeue()
for tensor_dict in batch:
for tensor in tensor_dict.values():
self.assertAllEqual([None, None], tensor.get_shape().as_list())
tf.initialize_all_variables().run()
with slim.queues.QueueRunners(sess):
i = 2
for _ in range(num_batches):
batch_np = sess.run(batch)
for tensor_dict in batch_np:
for tensor in tensor_dict.values():
self.assertAllEqual(tensor, np.arange(i * i).reshape((i, i)))
i += 1
with self.assertRaises(tf.errors.OutOfRangeError):
sess.run(batch)
def test_batch_and_unpad_2d_tensors_of_same_size_in_all_dimensions(self):
with self.test_session() as sess:
batch_size = 3
num_batches = 2
examples = tf.Variable(tf.constant(1, dtype=tf.int32))
counter = examples.count_up_to(num_batches * batch_size + 1)
image = tf.reshape(tf.range(1, 13), [4, 3]) * counter
batch_queue = batcher.BatchQueue(
tensor_dict={'image': image},
batch_size=batch_size,
batch_queue_capacity=100,
num_batch_queue_threads=1,
prefetch_queue_capacity=100)
batch = batch_queue.dequeue()
for tensor_dict in batch:
for tensor in tensor_dict.values():
self.assertAllEqual([4, 3], tensor.get_shape().as_list())
tf.initialize_all_variables().run()
with slim.queues.QueueRunners(sess):
i = 1
for _ in range(num_batches):
batch_np = sess.run(batch)
for tensor_dict in batch_np:
for tensor in tensor_dict.values():
self.assertAllEqual(tensor, np.arange(1, 13).reshape((4, 3)) * i)
i += 1
with self.assertRaises(tf.errors.OutOfRangeError):
sess.run(batch)
def test_batcher_when_batch_size_is_one(self):
with self.test_session() as sess:
batch_size = 1
num_batches = 2
examples = tf.Variable(tf.constant(2, dtype=tf.int32))
counter = examples.count_up_to(num_batches * batch_size + 2)
image = tf.reshape(
tf.range(counter * counter), tf.stack([counter, counter]))
batch_queue = batcher.BatchQueue(
tensor_dict={'image': image},
batch_size=batch_size,
batch_queue_capacity=100,
num_batch_queue_threads=1,
prefetch_queue_capacity=100)
batch = batch_queue.dequeue()
for tensor_dict in batch:
for tensor in tensor_dict.values():
self.assertAllEqual([None, None], tensor.get_shape().as_list())
tf.initialize_all_variables().run()
with slim.queues.QueueRunners(sess):
i = 2
for _ in range(num_batches):
batch_np = sess.run(batch)
for tensor_dict in batch_np:
for tensor in tensor_dict.values():
self.assertAllEqual(tensor, np.arange(i * i).reshape((i, i)))
i += 1
with self.assertRaises(tf.errors.OutOfRangeError):
sess.run(batch)
if __name__ == '__main__':
tf.test.main()
| [
"[email protected]"
] | |
62e64dfe9d700d8ec7b1cead731218ebd10a44ea | bb970bbe151d7ac48d090d86fe1f02c6ed546f25 | /arouse/_dj/db/backends/oracle/compiler.py | a098adba42b387847e1a7ea80c74a94da3510a2b | [
"Python-2.0",
"BSD-3-Clause"
] | permissive | thektulu/arouse | 95016b4028c2b8e9b35c5062a175ad04286703b6 | 97cadf9d17c14adf919660ab19771a17adc6bcea | refs/heads/master | 2021-01-13T12:51:15.888494 | 2017-01-09T21:43:32 | 2017-01-09T21:43:32 | 78,466,406 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,048 | py | from arouse._dj.db.models.sql import compiler
class SQLCompiler(compiler.SQLCompiler):
def as_sql(self, with_limits=True, with_col_aliases=False, subquery=False):
"""
Creates the SQL for this query. Returns the SQL string and list
of parameters. This is overridden from the original Query class
to handle the additional SQL Oracle requires to emulate LIMIT
and OFFSET.
If 'with_limits' is False, any limit/offset information is not
included in the query.
"""
# The `do_offset` flag indicates whether we need to construct
# the SQL needed to use limit/offset with Oracle.
do_offset = with_limits and (self.query.high_mark is not None or self.query.low_mark)
if not do_offset:
sql, params = super(SQLCompiler, self).as_sql(
with_limits=False,
with_col_aliases=with_col_aliases,
subquery=subquery,
)
else:
sql, params = super(SQLCompiler, self).as_sql(
with_limits=False,
with_col_aliases=True,
subquery=subquery,
)
# Wrap the base query in an outer SELECT * with boundaries on
# the "_RN" column. This is the canonical way to emulate LIMIT
# and OFFSET on Oracle.
high_where = ''
if self.query.high_mark is not None:
high_where = 'WHERE ROWNUM <= %d' % (self.query.high_mark,)
sql = (
'SELECT * FROM (SELECT "_SUB".*, ROWNUM AS "_RN" FROM (%s) '
'"_SUB" %s) WHERE "_RN" > %d' % (sql, high_where, self.query.low_mark)
)
return sql, params
class SQLInsertCompiler(compiler.SQLInsertCompiler, SQLCompiler):
pass
class SQLDeleteCompiler(compiler.SQLDeleteCompiler, SQLCompiler):
pass
class SQLUpdateCompiler(compiler.SQLUpdateCompiler, SQLCompiler):
pass
class SQLAggregateCompiler(compiler.SQLAggregateCompiler, SQLCompiler):
pass
| [
"[email protected]"
] | |
7bf98c286a1ea5b67caef02f49147b68a2c65ed7 | 85b8a52f1be2c4838f885f0e5a4d6963f4109dfe | /codes_/0242_Valid_Anagram.py | 2e9af90a62a0b06ef6ba06c0cf8cb3861493ee08 | [
"MIT"
] | permissive | SaitoTsutomu/leetcode | 4cc5bac4f983b287ec1540d188589ce3dd6e409a | 4656d66ab721a5c7bc59890db9a2331c6823b2bf | refs/heads/master | 2023-03-12T11:37:29.051395 | 2021-02-27T06:11:34 | 2021-02-27T06:11:34 | 281,815,531 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 312 | py | # %% [242. *Valid Anagram](https://leetcode.com/problems/valid-anagram/)
# 問題:2つの文字列がアナグラムかどうかを返す
# 解法:collections.Counterを用いる
class Solution:
def isAnagram(self, s: str, t: str) -> bool:
return collections.Counter(s) == collections.Counter(t)
| [
"[email protected]"
] | |
a9298b5a6ccd581226c2b508678daf7a0288d332 | d2c92cfe95a60a12660f1a10c0b952f0df3f0e8e | /zz91mobile/mobile/pingpp/certificate_blacklist.py | 420deff41b859b0b1c656b590b60a8b8b9d0878d | [] | no_license | snamper/zzpython | 71bf70ec3762289bda4bba80525c15a63156a3ae | 20415249fa930ccf66849abb5edca8ae41c81de6 | refs/heads/master | 2021-12-21T16:12:22.190085 | 2017-09-30T06:26:05 | 2017-09-30T06:26:05 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 968 | py | import hashlib
from pingpp.error import APIError
BLACKLISTED_DIGESTS = [
'05c0b3643694470a888c6e7feb5c9e24e823dc531',
'5b7dc7fbc98d78bf76d4d4fa6f597a0c901fad5c'
]
def verify(certificate):
"""Verifies a PEM encoded certificate against a blacklist of known revoked
fingerprints.
returns True on success, raises RuntimeError on failure.
"""
sha = hashlib.sha1()
sha.update(certificate)
fingerprint = sha.hexdigest()
if fingerprint in BLACKLISTED_DIGESTS:
raise APIError("Invalid server certificate. You tried to "
"connect to a server that has a revoked "
"SSL certificate, which means we cannot "
"securely send data to that server. "
"Please email [email protected] if you "
"need help connecting to the correct API "
"server.")
return True
| [
"[email protected]"
] | |
9ea296767e1557de0a4f4913f45b4b01c53f2940 | 665add8c434df0445294931aac7098e8a0fa605b | /ch5/designer/connect.py | 2cc9044ec0e4fa25e176af549690aa94c514b07e | [] | no_license | swkim01/RaspberryPiWithIOT | f43cef567ca48f2ce9deec0cba87fa801dcbcbe2 | d4b5c9aeb09490429a551f357d3c83ab04deed82 | refs/heads/master | 2023-04-14T20:04:33.924243 | 2023-04-12T05:15:32 | 2023-04-12T05:15:32 | 48,477,439 | 4 | 14 | null | null | null | null | UTF-8 | Python | false | false | 3,111 | py | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'connect.ui',
# licensing of 'connect.ui' applies.
#
# Created: Wed Aug 28 17:40:14 2019
# by: pyside2-uic running on PySide2 5.11.2
#
# WARNING! All changes made in this file will be lost!
from PySide2 import QtCore, QtGui, QtWidgets
class Ui_Dialog(object):
def setupUi(self, Dialog):
Dialog.setObjectName("Dialog")
Dialog.resize(252, 150)
self.buttonBox = QtWidgets.QDialogButtonBox(Dialog)
self.buttonBox.setGeometry(QtCore.QRect(80, 110, 161, 32))
self.buttonBox.setOrientation(QtCore.Qt.Horizontal)
self.buttonBox.setStandardButtons(QtWidgets.QDialogButtonBox.Cancel|QtWidgets.QDialogButtonBox.Ok)
self.buttonBox.setObjectName("buttonBox")
self.gridLayoutWidget = QtWidgets.QWidget(Dialog)
self.gridLayoutWidget.setGeometry(QtCore.QRect(10, 10, 231, 101))
self.gridLayoutWidget.setObjectName("gridLayoutWidget")
self.gridLayout = QtWidgets.QGridLayout(self.gridLayoutWidget)
self.gridLayout.setContentsMargins(0, 0, 0, 0)
self.gridLayout.setObjectName("gridLayout")
self.label = QtWidgets.QLabel(self.gridLayoutWidget)
self.label.setObjectName("label")
self.gridLayout.addWidget(self.label, 0, 0, 1, 1)
self.server = QtWidgets.QLineEdit(self.gridLayoutWidget)
self.server.setObjectName("server")
self.gridLayout.addWidget(self.server, 0, 1, 1, 1)
self.label_2 = QtWidgets.QLabel(self.gridLayoutWidget)
self.label_2.setObjectName("label_2")
self.gridLayout.addWidget(self.label_2, 1, 0, 1, 1)
self.port = QtWidgets.QLineEdit(self.gridLayoutWidget)
self.port.setObjectName("port")
self.gridLayout.addWidget(self.port, 1, 1, 1, 1)
self.label_3 = QtWidgets.QLabel(self.gridLayoutWidget)
self.label_3.setObjectName("label_3")
self.gridLayout.addWidget(self.label_3, 2, 0, 1, 1)
self.name = QtWidgets.QLineEdit(self.gridLayoutWidget)
self.name.setObjectName("name")
self.gridLayout.addWidget(self.name, 2, 1, 1, 1)
self.retranslateUi(Dialog)
QtCore.QObject.connect(self.buttonBox, QtCore.SIGNAL("accepted()"), Dialog.accept)
QtCore.QObject.connect(self.buttonBox, QtCore.SIGNAL("rejected()"), Dialog.reject)
QtCore.QMetaObject.connectSlotsByName(Dialog)
def retranslateUi(self, Dialog):
Dialog.setWindowTitle(QtWidgets.QApplication.translate("Dialog", "Connect", None, -1))
self.label.setText(QtWidgets.QApplication.translate("Dialog", "Server:", None, -1))
self.server.setText(QtWidgets.QApplication.translate("Dialog", "localhost", None, -1))
self.label_2.setText(QtWidgets.QApplication.translate("Dialog", "Port:", None, -1))
self.port.setText(QtWidgets.QApplication.translate("Dialog", "8080", None, -1))
self.label_3.setText(QtWidgets.QApplication.translate("Dialog", "Name:", None, -1))
self.name.setText(QtWidgets.QApplication.translate("Dialog", "홍길동", None, -1))
| [
"[email protected]"
] | |
3101f4b4d66bc96addf9fdcaea38f55c99999bca | e4638ff152796e13f5d176c3aa303246bc57fced | /ontask/migrations/0033_auto_20180829_0940.py | 9cee4f91ea8ff26b9f7ad6702ef99420218c36f6 | [
"MIT",
"LGPL-2.0-or-later",
"Python-2.0",
"BSD-3-Clause",
"Apache-2.0"
] | permissive | LucasFranciscoCorreia/ontask_b | 8989ec86905d308e929b6149b52b942321be2311 | 5473e9faa24c71a2a1102d47ebc2cbf27608e42a | refs/heads/master | 2020-07-25T16:52:00.173780 | 2019-09-13T23:31:28 | 2019-09-13T23:31:28 | 208,359,655 | 0 | 0 | MIT | 2019-09-13T23:00:53 | 2019-09-13T23:00:52 | null | UTF-8 | Python | false | false | 414 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.14 on 2018-08-29 00:10
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('ontask', '0032_auto_20180829_0939'),
]
operations = [
migrations.AlterUniqueTogether(
name='scheduledaction',
unique_together=set([]),
),
]
| [
"[email protected]"
] | |
fe3662dcd8f362491af0c4761067ffe9eed642c7 | 9f0f5816b9d810c9ce01c56588024e1c804809fe | /study/day3/day3_3.py | 9fc5cd1c0a6e16afeeab8578680ad7c92ea6c11c | [] | no_license | parkwisdom/Python-Study-step1 | bf8cc8c5f89bfb9ccbb395a3827e23d4f0d6ae9a | bae2f5653c5a0d1eac1d4b89476ece7e0802d33b | refs/heads/master | 2020-04-03T13:49:58.990930 | 2018-10-30T00:37:29 | 2018-10-30T00:37:29 | 155,300,210 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,885 | py | import re
# re.compile('\\white')#\white
pat = re.compile('Java|Python')
res=pat.match('Python')
print(res)
res=pat.match('Java')
print(res)
res=pat.match('PythonJava')
print(res)
res=pat.match('PythonRuby')
print(res)
res=pat.match('RubyPython')
print(res)
print(re.search('How','How are you'))
print(re.search('are','How are you'))
print(re.match('are','How are you'))
print(re.search('^How','How are you'))
print(re.search('^are','How are you'))
print(re.search('you$','How are you'))
print(re.search('you$','How are you.Hi'))
re.compile('(ABC)+')#ABC가 계속 반복되는 경우
res=pat.search('ABCABCABCABCABCABCDABCD ok?')
print(res)
# print(res.group(1))
pat=re.compile('(\w+)\s+((\d+)[-](\d+)[-](\d+))')
res=pat.search("kim 010-1234-5678")
print(res.group(1))
print(res.group(2))
print(res.group(3))
print(res.group(4))
print(res.group(5))
pat=re.compile('(어제|오늘|내일)')
print(pat.sub('DAY','어제 날씨 그리고 오늘 날씨'))
"""
웹텍스트 스크래핑 ->치환/삭제(sub)->형태소 분석기(형태소 단위로 분해, 8개 품사)->ex)오늘 뉴스 사건 사고
{'오늘':5,'뉴스':1,'사건':10,....}
"""
pat=re.compile('(어제|오늘|내일)')
print(pat.sub('DAY','어제 날씨 그리고 오늘 날씨',count=1))
pat=re.compile("(?P<name>\w+)\s+(?P<phone>(\d+)[-](\d+)[-](\d+))")
print(pat.sub("\g<phone> \g<name>","kim 010-1234-5678"))
print(pat.sub("\g<2> \g<1>","kim 010-1234-5678"))
"""
정규 표현식 예 : 의미
^Test : Test로 시작하는 문자열
test$ : test로 끝나는 문자열
^xyz$ : xyz로 시작하고 xyz로 끝나는 문자열(xyz도 해당
abc : abc가 들어가는 문자열
ab* : a뒤에 b가 0개 이상 있는 문자열(a,ab,abbbbbb)
ab+ : a뒤에 b가 1개 이상 있는 문자열(ab, abbbbb)
ab? : b가 1개 있을 수도 있고 없을 수도 있다(ab,a)
a?b+$ : a는 있을수도 없을수도 있고, 그뒤에 반드시 한개 이상의 b로 끝나는 문자열
ab{2} : a 뒤에 b가 2개있는 문자열(abb)
ab{3,} : a뒤에 b가 3개 이상 있는 문자열(abbb,abbbbbb)
ab{2,4} : a뒤에 2개 이상 4개 이하의 b가 있는 문자열(abb,abbb,abbbb)
a(bc)* : a뒤에 bc가 0번 이상 반복되는 문자열
a(bc){1,3}: a뒤에 bc가 1번 이상 3번 이하 반복되는 문자열
hi|bye : hi 또는 bye가 있는 문자열
(a|bc)de : ade 또는 bcde문자열
(a|b)*c : a와 b가 뒤섞여서 0번 이상 반복되며, 그뒤에 c가 오는 문자열(aababaaac)
. : 한 문자
.. : 두문자
... : 세문자
a.[0-9] : a뒤에 문자가 1개 있으며, 그 뒤에 숫자가 붙는 문자열
^.{3}$ : 반드시 3문자
[ab] : a또는 b(a|b와 같음)
[a-d] : 소문자 a~d(a|b|c|d 또는 [abcd]와 같음
^[a-zA-Z]: 영문자로 시작하는 문자열
[0-9]% :%문자앞에 하난의 숫자가 있는 문자열
[a-zA-Z0-9]$ : 숫자,영문자로 끝나는 문자열
XML 확장가능한
""" | [
"[email protected]"
] | |
7e25a96700cf91cac44a590dab054dbb0d049013 | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/verbs/_twinned.py | d2947990b7968c1468d068757b1b53885272e675 | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 228 | py |
from xai.brain.wordbase.verbs._twin import _TWIN
#calss header
class _TWINNED(_TWIN, ):
def __init__(self,):
_TWIN.__init__(self)
self.name = "TWINNED"
self.specie = 'verbs'
self.basic = "twin"
self.jsondata = {}
| [
"[email protected]"
] | |
47c2c3d706674d36d2e15fa9fd99cbd6aa7e8634 | 0179aa7d1a6ea99f6191e596e23a4ac900de979f | /gcp/evaluation/dtw_utils.py | b9ef712e91a591cb92dc3c1385638cac8c4430ed | [] | no_license | Douxation/video-gcp | 562aa10caf68fe6fe0bd53a5989e95746de3c93c | 4608a543fe60c550363de864be7a38c4f663836a | refs/heads/master | 2022-12-04T15:44:23.105262 | 2020-07-10T19:48:20 | 2020-07-10T19:48:20 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,889 | py | import numpy as np
import torch
from scipy.spatial.distance import cdist
from math import isinf
try:
import gcp.evaluation.cutils as cutils
except:
pass
def dtw_dist(x, y, dist=None, warp=1, w=np.inf, s=1.0):
"""
Computes Dynamic Time Warping (DTW) of two sequences.
:param array x: N1*M array
:param array y: N2*M array
:param func dist: distance used as cost measure
:param int warp: how many shifts are computed.
:param int w: window size limiting the maximal distance between indices of matched entries |i,j|.
:param float s: weight applied on off-diagonal moves of the path. As s gets larger, the warping path is increasingly biased towards the diagonal
Returns the minimum distance, the cost matrix, the accumulated cost matrix, and the wrap path.
"""
r, c = len(x), len(y)
D1 = np.zeros((r, c))
for i in range(r):
for j in range(c):
# if (isinf(w) or (max(0, i - w) <= j <= min(c, i + w))):
D1[i, j] = dist(x[i], y[j])
return dtw(D1, warp, w, s)
def dtw(inp_D0, warp=1, w=np.inf, s=1.0):
"""
Computes Dynamic Time Warping (DTW) of two sequences.
:param int warp: how many shifts are computed.
:param int w: window size limiting the maximal distance between indices of matched entries |i,j|.
:param float s: weight applied on off-diagonal moves of the path. As s gets larger, the warping path is increasingly biased towards the diagonal
Returns the minimum distance, the cost matrix, the accumulated cost matrix, and the wrap path.
"""
r, c = inp_D0.shape
assert w >= abs(r - c)
assert s > 0
if not isinf(w):
D0 = np.full((r + 1, c + 1), np.inf)
for i in range(1, r + 1):
D0[i, max(1, i - w):min(c + 1, i + w + 1)] = 0
D0[0, 0] = 0
else:
D0 = np.zeros((r + 1, c + 1))
D0[0, 1:] = np.inf
D0[1:, 0] = np.inf
D1 = D0[1:, 1:] # view
D0[1:, 1:] = inp_D0 # TODO to support w, this needs to be modified to mask the assignment.
C = D1.copy()
jrange = range(c)
for i in range(r):
if not isinf(w):
jrange = range(max(0, i - w), min(c, i + w + 1))
for j in jrange:
min_list = [D0[i, j]]
for k in range(1, warp + 1):
i_k = min(i + k, r)
j_k = min(j + k, c)
min_list += [D0[i_k, j] * s, D0[i, j_k] * s]
D1[i, j] += min(min_list)
if r == 1:
path = np.zeros(c), range(c)
elif c == 1:
path = range(r), np.zeros(r)
else:
path = _traceback(D0)
return D1[-1, -1] / sum(D1.shape), C, D1, path
def basic_dtw(C):
"""
Computes Dynamic Time Warping (DTW) of two sequences.
Returns the minimum distance, the cost matrix, the accumulated cost matrix, and the wrap path.
:param C: the cost matrix
:return:
"""
r, c = C.shape
D = np.zeros((r + 1, c + 1))
D[0, 1:] = np.inf
D[1:, 0] = np.inf
D[1:, 1:] = C
for i in range(r):
for j in range(c):
candidates = [D[i, j], D[i + 1, j], D[i, j + 1]]
D[i + 1, j + 1] += min(candidates)
path = _traceback(D)
return D[-1, -1] / (r + c), D[1:, 1:], path
def c_dtw(C):
"""
Computes Dynamic Time Warping (DTW) of two sequences efficiently in C.
Returns the minimum distance, the cost matrix, the accumulated cost matrix, and the wrap path.
:param C: the cost matrix
:return:
"""
r, c = C.shape
D = np.zeros((r + 1, c + 1))
D[0, 1:] = np.inf
D[1:, 0] = np.inf
D[1:, 1:] = C
cutils.min_cumsum(D)
path = _traceback(D)
return D[-1, -1] / (r + c), D[1:, 1:], path
def batched_dtw(C, end_ind):
b, r, c = C.shape
D = np.zeros((b, r + 1, c + 1))
D[:, 0, 1:] = np.inf
D[:, 1:, 0] = np.inf
D[:, 1:, 1:] = C
for i in range(r):
for j in range(c):
candidates = [D[:, i, j], D[:, i + 1, j], D[:, i, j + 1]]
D[:, i + 1, j + 1] += np.min(np.stack(candidates), axis=0)
paths, path_lengths = _batched_traceback(D, end_ind)
return D[np.arange(b), -1, end_ind+1] / (r + end_ind+1), D[:, 1:, 1:], paths, path_lengths
def torch_dtw(C, end_ind):
b, r, c = C.shape
D = torch.zeros((b, r + 1, c + 1))
D[:, 0, 1:] = torch.Tensor([float("Inf")])
D[:, 1:, 0] = torch.Tensor([float("Inf")])
D[:, 1:, 1:] = C
for i in range(r):
for j in range(c):
candidates = [D[:, i, j], D[:, i + 1, j], D[:, i, j + 1]]
D[:, i + 1, j + 1].add_(torch.min(torch.stack(candidates), dim=0).values)
paths, path_lengths = _torched_traceback(D, end_ind)
return D[torch.arange(b), -1, (end_ind.float()+1).long()] / (r + end_ind.float()+1), D[:, 1:, 1:], paths, path_lengths
def accelerated_dtw(x, y, dist=None, inp_D0=None, warp=1):
"""
Computes Dynamic Time Warping (DTW) of two sequences in a faster way.
Instead of iterating through each element and calculating each distance,
this uses the cdist function from scipy (https://docs.scipy.org/doc/scipy/reference/generated/scipy.spatial.distance.cdist.html)
:param array x: N1*M array
:param array y: N2*M array
:param string or func dist: distance parameter for cdist. When string is given, cdist uses optimized functions for the distance metrics.
If a string is passed, the distance function can be 'braycurtis', 'canberra', 'chebyshev', 'cityblock', 'correlation', 'cosine', 'dice', 'euclidean', 'hamming', 'jaccard', 'kulsinski', 'mahalanobis', 'matching', 'minkowski', 'rogerstanimoto', 'russellrao', 'seuclidean', 'sokalmichener', 'sokalsneath', 'sqeuclidean', 'wminkowski', 'yule'.
:param int warp: how many shifts are computed.
Returns the minimum distance, the cost matrix, the accumulated cost matrix, and the wrap path.
"""
assert len(x)
assert len(y)
if np.ndim(x) == 1:
x = x.reshape(-1, 1)
if np.ndim(y) == 1:
y = y.reshape(-1, 1)
r, c = len(x), len(y)
D0 = np.zeros((r + 1, c + 1))
D0[0, 1:] = np.inf
D0[1:, 0] = np.inf
D1 = D0[1:, 1:]
if inp_D0 is not None:
D0[1:, 1:] = inp_D0
else:
D0[1:, 1:] = cdist(x, y, dist)
C = D1.copy()
for i in range(r):
for j in range(c):
min_list = [D0[i, j]]
for k in range(1, warp + 1):
min_list += [D0[min(i + k, r), j],
D0[i, min(j + k, c)]]
D1[i, j] += min(min_list)
if len(x) == 1:
path = np.zeros(len(y)), range(len(y))
elif len(y) == 1:
path = range(len(x)), np.zeros(len(x))
else:
path = _traceback(D0)
return D1[-1, -1] / sum(D1.shape), C, D1, path
def _traceback(D):
# TODO I suspect this doesn't work with fancy stuff (w, s, warp)
i, j = np.array(D.shape) - 2
p, q = [i], [j]
while (i > 0) or (j > 0):
tb = np.argmin((D[i, j], D[i, j + 1], D[i + 1, j]))
if tb == 0:
i -= 1
j -= 1
elif tb == 1:
i -= 1
elif tb == 2:
j -= 1
else:
raise ValueError
p.insert(0, i)
q.insert(0, j)
return np.array(p), np.array(q)
def _batched_traceback(D, end_ind):
b, r, c = D.shape
i, j = np.asarray(np.ones((b,)) * (r - 2), dtype=int), end_ind
p, q = [i.copy()], [j.copy()]
path_lengths = np.zeros_like(i)
cnt = 0
while (i > 0).any() or (j > 0).any():
cnt += 1
path_lengths[(i == 0) & (j == 0) & (path_lengths == 0)] = cnt
tb = np.argmin(np.stack((D[np.arange(b), i, j], D[np.arange(b), i, j + 1], D[np.arange(b), i + 1, j])), axis=0)
i[(tb == 0) & (i > 0)] -= 1
j[(tb == 0) & (j > 0)] -= 1
i[(tb == 1) & (i > 0)] -= 1
j[(tb == 2) & (j > 0)] -= 1
p.insert(0, i.copy())
q.insert(0, j.copy())
return (np.array(p), np.array(q)), path_lengths
def _torched_traceback(D, end_ind):
b, r, c = D.shape
i, j = (torch.ones((b,)) * (r - 2)).long(), end_ind
p, q = [i.clone()], [j.clone()]
path_lengths = torch.zeros_like(i)
cnt = 0
while (i > 0).any() or (j > 0).any():
cnt += 1
path_lengths[(i == 0) & (j == 0) & (path_lengths == 0)] = cnt
tb = torch.argmin(
torch.stack((D[torch.arange(b), i, j], D[torch.arange(b), i, j + 1], D[torch.arange(b), i + 1, j])), dim=0)
i[(tb == 0) & (i > 0)] -= 1
j[(tb == 0) & (j > 0)] -= 1
i[(tb == 1) & (i > 0)] -= 1
j[(tb == 2) & (j > 0)] -= 1
p.insert(0, i.clone())
q.insert(0, j.clone())
return (torch.stack(p), torch.stack(q)), path_lengths
if __name__ == "__main__":
b, r, c = 8, 1024, 1000
min_length = int(c - 1)
EPS = 1e-5
import numpy as np
import time
np.random.seed(40)
DD = np.random.rand(b, r, c)
end_ind = min_length + np.asarray(np.random.rand(b) * (c - min_length - 1), dtype=int)
dd, dd2, pp, pp2, t1, t2 = [], [], [], [], 0.0, 0.0
for D, i in zip(DD, end_ind):
s = time.time()
d, cost_matrix, acc_cost_matrix, path = dtw(D[:, :i+1])
t1 += time.time() - s
dd.append(d); pp.append(path)
s = time.time()
d2, acc_cost_matrix_2, path_2 = c_dtw(D[:, :i+1])
t2 += time.time() - s
dd2.append(d2); pp2.append(path_2)
print("DTW: {}".format(t1))
print("C DTW: {}".format(t2))
def check(cond, name):
print("{}: PASS".format(name)) if cond else print("{}: FAIL".format(name))
check(not np.any((np.array(dd) - dd2) > EPS), "Distance")
check(not np.any(np.concatenate([(np.array(pp[i][0]) - np.array(pp2[i][0])) > EPS for i in range(b)])) and \
not np.any(np.concatenate([(np.array(pp[i][1]) - np.array(pp2[i][1])) > EPS for i in range(b)])), "Paths")
| [
"[email protected]"
] | |
6b87f507408b3fc904e01f2243e6137866d10c72 | f7258525ad6c311a138a82fc59c4d84e318cc30f | /book1/aa.py | ea38e13a6b2d90fbb8e0d06ce860a7287256fa16 | [] | no_license | lianzhang132/book | e358ae555de96e36dbf9ac6c1f7f887444d91e81 | 71ed81a6464997c77dd75b4849ef6eecf7a2e075 | refs/heads/master | 2020-07-11T03:18:07.352745 | 2019-08-26T08:45:16 | 2019-08-26T08:45:16 | 204,434,003 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,174 | py | class Auther(models.Model):
nid = models.AutoField(primary_key=True)
name= models.CharField(max_length=32)
age=models.IntegerField()
authorinfo=models.OneToOneField(to="Auther_info",on_delete=models.CASCADE)
def __str__(self):
return (self.name)
class Auther_info(models.Model):
nid = models.AutoField(primary_key=True)
birthday = models.DateField()
age = models.IntegerField()
telephone =models.BigIntegerField()
addr=models.CharField(max_length=86)
class Public_info(models.Model):
nid = models.AutoField(primary_key=True)
name=models.CharField(max_length=32)
city=models.CharField(max_length=32)
email=models.CharField(max_length=32)
def __str__(self):
return (self.name)
class Book(models.Model):
nid = models.AutoField(primary_key=True)
title= models.CharField(max_length=32)
pub_date=models.DateField()
price = models.DecimalField(max_digits=8,decimal_places=2)
#一对多
public=models.ForeignKey(to="Public_info",to_field="nid",on_delete=models.CASCADE)
#多对多
authers=models.ManyToManyField(to="Auther")
def __str__(self):
return (self.title)
| [
"[email protected]"
] | |
7b64769558d2289317314543419e07fade2628ca | 155ab3ed394669fa552e6bcb47ad9ae62519c028 | /Arcade/Python/34 - multiplicationTable.py | efe69edc398e1e10b349046b3ff45dd73382cdb0 | [] | no_license | rbiegelmeyer/CodeFights-Python | e3336b9249c55b0bd44ca81917ee38557ab06adc | e0e63f14677db9741a0531fdc8aa6e44d2155f33 | refs/heads/master | 2021-09-28T22:05:07.442470 | 2018-11-20T21:10:45 | 2018-11-20T21:10:45 | 108,603,059 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 91 | py | def multiplicationTable(n):
return [[(x+1)*(y+1) for x in range(n)] for y in range(n)]
| [
"[email protected]"
] | |
c565ec43d09ff8f46b60ef4c43cf2f2aab5929a4 | 600df3590cce1fe49b9a96e9ca5b5242884a2a70 | /third_party/chromite/cbuildbot/builders/generic_builders.py | 1f47b675cd72b18f023d22c575debb47b20797c0 | [
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference",
"LGPL-2.0-or-later",
"GPL-1.0-or-later",
"MIT",
"Apache-2.0"
] | permissive | metux/chromium-suckless | efd087ba4f4070a6caac5bfbfb0f7a4e2f3c438a | 72a05af97787001756bae2511b7985e61498c965 | refs/heads/orig | 2022-12-04T23:53:58.681218 | 2017-04-30T10:59:06 | 2017-04-30T23:35:58 | 89,884,931 | 5 | 3 | BSD-3-Clause | 2022-11-23T20:52:53 | 2017-05-01T00:09:08 | null | UTF-8 | Python | false | false | 12,874 | py | # Copyright 2015 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Module containing the generic builders."""
from __future__ import print_function
import multiprocessing
import os
import sys
import tempfile
import traceback
from chromite.cbuildbot import constants
from chromite.cbuildbot import failures_lib
from chromite.cbuildbot import manifest_version
from chromite.cbuildbot import results_lib
from chromite.cbuildbot import trybot_patch_pool
from chromite.cbuildbot.stages import build_stages
from chromite.cbuildbot.stages import report_stages
from chromite.cbuildbot.stages import sync_stages
from chromite.lib import cidb
from chromite.lib import commandline
from chromite.lib import cros_build_lib
from chromite.lib import cros_logging as logging
from chromite.lib import git
from chromite.lib import parallel
class Builder(object):
"""Parent class for all builder types.
This class functions as an abstract parent class for various build types.
Its intended use is builder_instance.Run().
Attributes:
_run: The BuilderRun object for this run.
archive_stages: Dict of BuildConfig keys to ArchiveStage values.
patch_pool: TrybotPatchPool.
"""
def __init__(self, builder_run):
"""Initializes instance variables. Must be called by all subclasses."""
self._run = builder_run
# TODO: all the fields below should not be part of the generic builder.
# We need to restructure our SimpleBuilder and see about creating a new
# base in there for holding them.
if self._run.config.chromeos_official:
os.environ['CHROMEOS_OFFICIAL'] = '1'
self.archive_stages = {}
self.patch_pool = trybot_patch_pool.TrybotPatchPool()
self._build_image_lock = multiprocessing.Lock()
def Initialize(self):
"""Runs through the initialization steps of an actual build."""
if self._run.options.resume:
results_lib.LoadCheckpoint(self._run.buildroot)
self._RunStage(report_stages.BuildStartStage)
self._RunStage(build_stages.CleanUpStage)
def _GetStageInstance(self, stage, *args, **kwargs):
"""Helper function to get a stage instance given the args.
Useful as almost all stages just take in builder_run.
"""
# Normally the default BuilderRun (self._run) is used, but it can
# be overridden with "builder_run" kwargs (e.g. for child configs).
builder_run = kwargs.pop('builder_run', self._run)
return stage(builder_run, *args, **kwargs)
def _SetReleaseTag(self):
"""Sets run.attrs.release_tag from the manifest manager used in sync.
Must be run after sync stage as syncing enables us to have a release tag,
and must be run before any usage of attrs.release_tag.
TODO(mtennant): Find a bottleneck place in syncing that can set this
directly. Be careful, as there are several kinds of syncing stages, and
sync stages have been known to abort with sys.exit calls.
"""
manifest_manager = getattr(self._run.attrs, 'manifest_manager', None)
if manifest_manager:
self._run.attrs.release_tag = manifest_manager.current_version
else:
self._run.attrs.release_tag = None
logging.debug('Saved release_tag value for run: %r',
self._run.attrs.release_tag)
def _RunStage(self, stage, *args, **kwargs):
"""Wrapper to run a stage.
Args:
stage: A BuilderStage class.
args: args to pass to stage constructor.
kwargs: kwargs to pass to stage constructor.
Returns:
Whatever the stage's Run method returns.
"""
stage_instance = self._GetStageInstance(stage, *args, **kwargs)
return stage_instance.Run()
@staticmethod
def _RunParallelStages(stage_objs):
"""Run the specified stages in parallel.
Args:
stage_objs: BuilderStage objects.
"""
steps = [stage.Run for stage in stage_objs]
try:
parallel.RunParallelSteps(steps)
except BaseException as ex:
logging.error('BaseException in _RunParallelStages %s' % ex,
exc_info=True)
# If a stage threw an exception, it might not have correctly reported
# results (e.g. because it was killed before it could report the
# results.) In this case, attribute the exception to any stages that
# didn't report back correctly (if any).
for stage in stage_objs:
for name in stage.GetStageNames():
if not results_lib.Results.StageHasResults(name):
results_lib.Results.Record(name, ex, str(ex))
if cidb.CIDBConnectionFactory.IsCIDBSetup():
db = cidb.CIDBConnectionFactory.GetCIDBConnectionForBuilder()
if db:
for stage in stage_objs:
for build_stage_id in stage.GetBuildStageIDs():
if not db.HasBuildStageFailed(build_stage_id):
failures_lib.ReportStageFailureToCIDB(db,
build_stage_id,
ex)
raise
def _RunSyncStage(self, sync_instance):
"""Run given |sync_instance| stage and be sure attrs.release_tag set."""
try:
sync_instance.Run()
finally:
self._SetReleaseTag()
def SetVersionInfo(self):
"""Sync the builder's version info with the buildbot runtime."""
self._run.attrs.version_info = self.GetVersionInfo()
def GetVersionInfo(self):
"""Returns a manifest_version.VersionInfo object for this build.
Chrome OS Subclasses must override this method. Site specific builds which
don't use Chrome OS versioning should leave this alone.
"""
# Placeholder version for non-Chrome OS builds.
return manifest_version.VersionInfo('1.0.0')
def GetSyncInstance(self):
"""Returns an instance of a SyncStage that should be run.
Subclasses must override this method.
"""
raise NotImplementedError()
def GetCompletionInstance(self):
"""Returns the MasterSlaveSyncCompletionStage for this build.
Subclasses may override this method.
Returns:
None
"""
return None
def RunStages(self):
"""Subclasses must override this method. Runs the appropriate code."""
raise NotImplementedError()
def _ReExecuteInBuildroot(self, sync_instance):
"""Reexecutes self in buildroot and returns True if build succeeds.
This allows the buildbot code to test itself when changes are patched for
buildbot-related code. This is a no-op if the buildroot == buildroot
of the running chromite checkout.
Args:
sync_instance: Instance of the sync stage that was run to sync.
Returns:
True if the Build succeeded.
"""
if not self._run.options.resume:
results_lib.WriteCheckpoint(self._run.options.buildroot)
args = sync_stages.BootstrapStage.FilterArgsForTargetCbuildbot(
self._run.options.buildroot, constants.PATH_TO_CBUILDBOT,
self._run.options)
# Specify a buildroot explicitly (just in case, for local trybot).
# Suppress any timeout options given from the commandline in the
# invoked cbuildbot; our timeout will enforce it instead.
args += ['--resume', '--timeout', '0', '--notee', '--nocgroups',
'--buildroot', os.path.abspath(self._run.options.buildroot)]
# Set --version. Note that --version isn't legal without --buildbot.
if (self._run.options.buildbot and
hasattr(self._run.attrs, 'manifest_manager')):
ver = self._run.attrs.manifest_manager.current_version
args += ['--version', ver]
pool = getattr(sync_instance, 'pool', None)
if pool:
filename = os.path.join(self._run.options.buildroot,
'validation_pool.dump')
pool.Save(filename)
args += ['--validation_pool', filename]
# Reset the cache dir so that the child will calculate it automatically.
if not self._run.options.cache_dir_specified:
commandline.BaseParser.ConfigureCacheDir(None)
with tempfile.NamedTemporaryFile(prefix='metadata') as metadata_file:
metadata_file.write(self._run.attrs.metadata.GetJSON())
metadata_file.flush()
args += ['--metadata_dump', metadata_file.name]
# Re-run the command in the buildroot.
# Finally, be generous and give the invoked cbuildbot 30s to shutdown
# when something occurs. It should exit quicker, but the sigterm may
# hit while the system is particularly busy.
return_obj = cros_build_lib.RunCommand(
args, cwd=self._run.options.buildroot, error_code_ok=True,
kill_timeout=30)
return return_obj.returncode == 0
def _InitializeTrybotPatchPool(self):
"""Generate patch pool from patches specified on the command line.
Do this only if we need to patch changes later on.
"""
changes_stage = sync_stages.PatchChangesStage.StageNamePrefix()
check_func = results_lib.Results.PreviouslyCompletedRecord
if not check_func(changes_stage) or self._run.options.bootstrap:
options = self._run.options
self.patch_pool = trybot_patch_pool.TrybotPatchPool.FromOptions(
gerrit_patches=options.gerrit_patches,
local_patches=options.local_patches,
sourceroot=options.sourceroot,
remote_patches=options.remote_patches)
def _GetBootstrapStage(self):
"""Constructs and returns the BootStrapStage object.
We return None when there are no chromite patches to test, and
--test-bootstrap wasn't passed in.
"""
stage = None
patches_needed = sync_stages.BootstrapStage.BootstrapPatchesNeeded(
self._run, self.patch_pool)
chromite_branch = git.GetChromiteTrackingBranch()
if (patches_needed or
self._run.options.test_bootstrap or
chromite_branch != self._run.options.branch):
stage = sync_stages.BootstrapStage(self._run, self.patch_pool)
return stage
def Run(self):
"""Main runner for this builder class. Runs build and prints summary.
Returns:
Whether the build succeeded.
"""
self._InitializeTrybotPatchPool()
if self._run.options.bootstrap:
bootstrap_stage = self._GetBootstrapStage()
if bootstrap_stage:
# BootstrapStage blocks on re-execution of cbuildbot.
bootstrap_stage.Run()
return bootstrap_stage.returncode == 0
print_report = True
exception_thrown = False
success = True
sync_instance = None
try:
self.Initialize()
sync_instance = self.GetSyncInstance()
self._RunSyncStage(sync_instance)
if self._run.ShouldPatchAfterSync():
# Filter out patches to manifest, since PatchChangesStage can't handle
# them. Manifest patches are patched in the BootstrapStage.
non_manifest_patches = self.patch_pool.FilterManifest(negate=True)
if non_manifest_patches:
self._RunStage(sync_stages.PatchChangesStage, non_manifest_patches)
# Now that we have a fully synced & patched tree, we can let the builder
# extract version information from the sources for this particular build.
self.SetVersionInfo()
if self._run.ShouldReexecAfterSync():
print_report = False
success = self._ReExecuteInBuildroot(sync_instance)
else:
self._RunStage(report_stages.BuildReexecutionFinishedStage)
self._RunStage(report_stages.ConfigDumpStage)
self.RunStages()
except Exception as ex:
if isinstance(ex, failures_lib.ExitEarlyException):
# One stage finished and exited early, not a failure.
raise
exception_thrown = True
build_id, db = self._run.GetCIDBHandle()
if results_lib.Results.BuildSucceededSoFar(db, build_id):
# If the build is marked as successful, but threw exceptions, that's a
# problem. Print the traceback for debugging.
if isinstance(ex, failures_lib.CompoundFailure):
print(str(ex))
traceback.print_exc(file=sys.stdout)
raise
if not (print_report and isinstance(ex, failures_lib.StepFailure)):
# If the failed build threw a non-StepFailure exception, we
# should raise it.
raise
finally:
if print_report:
results_lib.WriteCheckpoint(self._run.options.buildroot)
completion_instance = self.GetCompletionInstance()
self._RunStage(report_stages.ReportStage, completion_instance)
build_id, db = self._run.GetCIDBHandle()
success = results_lib.Results.BuildSucceededSoFar(db, build_id)
if exception_thrown and success:
success = False
logging.PrintBuildbotStepWarnings()
print("""\
Exception thrown, but all stages marked successful. This is an internal error,
because the stage that threw the exception should be marked as failing.""")
return success
| [
"[email protected]"
] | |
17d6a1d1e5388c6b85ab8a475b79ab605c31a328 | 8997a0bf1e3b6efe5dd9d5f307e1459f15501f5a | /html_parsing/get_population_from_wikidata.py | b298fdfe997103ec021047c1272812e4fb2d6d89 | [
"CC-BY-4.0"
] | permissive | stepik/SimplePyScripts | 01092eb1b2c1c33756427abb2debbd0c0abf533f | 3259d88cb58b650549080d6f63b15910ae7e4779 | refs/heads/master | 2023-05-15T17:35:55.743164 | 2021-06-11T22:59:07 | 2021-06-11T22:59:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,613 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
__author__ = 'ipetrash'
import requests
from bs4 import BeautifulSoup
def get_populations(url: str) -> dict:
rs = requests.get(url)
root = BeautifulSoup(rs.content, 'html.parser')
# P1082 -- идентификатор для population
population_node = root.select_one('#P1082')
populations = dict()
# Перебор строк в соседнем от population столбце
for row in population_node.select('.wikibase-statementview'):
# Небольшая хитрость -- берем только первые 2 значения, поидеи это будут: количество людей и дата
number_str, data_str = row.select('.wikibase-snakview-value')[:2]
# Вытаскиваем текст из
number_str = number_str.text.strip()
data_str = data_str.text.strip()
# Делаем разделение и берем последнуюю часть, после приводим к числу
# "1 July 2012" -> 2012, "2010" -> 2010
year = int(data_str.split()[-1])
# Добавляем в словарь
populations[year] = number_str
return populations
def get_population_by_year(populations: dict, year: int) -> str:
# Если такой год не будет найден, вернем -1
return populations.get(year, -1)
# Аналогично get_population_by_year, но сначала вытащит данные из
# указанного url, а после достанет значение по year
def get_population_from_url_by_year(url: str, year: int) -> str:
populations = get_populations(url)
return get_population_by_year(populations, year)
if __name__ == '__main__':
url = 'https://www.wikidata.org/wiki/Q148'
populations = get_populations(url)
print(populations) # {2012: '1,375,198,619', 2010: '1,359,755,102', 2015: '1,397,028,553', ...
# Выводим данные с сортировкой по ключу: по возрастанию
for year in sorted(populations):
print("{}: {}".format(year, populations[year]))
# 2010: 1,359,755,102
# 2011: 1,367,480,264
# 2012: 1,375,198,619
# 2013: 1,382,793,212
# 2014: 1,390,110,388
# 2015: 1,397,028,553
# 2016: 1,403,500,365
# 2017: 1,409,517,397
print(get_population_by_year(populations, 2012)) # 1,375,198,619
print(get_population_by_year(populations, 2013)) # 1,382,793,212
print(get_population_by_year(populations, 2014)) # 1,390,110,388
| [
"[email protected]"
] | |
457f651d068dae1e551d1eb386df946c6f285aa0 | 200bcbebf6e4009abe2807cd5d844d655fb431c2 | /ch16/ex_1.py | 4a5bec19fa2e81e6d802f1d059478721dc42b9d3 | [] | no_license | SHUHAIB-AREEKKAN/think_python_solutions | 7b022fd7e19e87495c2c9722e0b1516d4fa83b85 | bf1996d922574c367ea49e9954940662166c46b2 | refs/heads/master | 2021-01-25T08:07:19.865147 | 2017-06-08T07:43:05 | 2017-06-08T07:43:05 | 93,716,778 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 215 | py |
class Distance():
""" x start ,and y end poin"""
def distance_between_points(x,y):
print("distance between"+str(y-x))
finder=Distance()
finder.x=10
finder.y=30
distance_between_points(finder.x,finder.y)
| [
"[email protected]"
] | |
c51427dac75e8302202610302e6adea7783b101a | 1e5cbe7d3085a5406c3bf4c0dd3c64ec08005e19 | /p017.py | b6a8c9aab5a2ed049721f5f7a8d9ce1d7b608766 | [] | no_license | mingrammer/project-euler | 0dfdd0ba83592c49003cb54708e2c520de27f6ac | 4ae57ac9279472c68a27efc50f6d0317f9b73f17 | refs/heads/master | 2021-01-21T16:38:28.983077 | 2018-09-26T10:24:21 | 2018-09-26T10:24:21 | 38,953,014 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 484 | py | # Number letter counts
one_digit = [3, 3, 5, 4, 4, 3, 5, 5, 4]
ten_to_twenty = [3, 6, 6, 8, 8, 7, 7, 9, 8, 8]
two_digit = [6, 6, 5, 5, 5, 7, 6, 6]
hundred = 7
thousand = 8
and_letter = 3
sum_len = 0
sum_len += sum(one_digit)
sum_len += sum(ten_to_twenty)
sum_len += 10*sum(two_digit) + 8*sum(one_digit)
sum_len += 100*sum(one_digit) + 900*hundred + 9*99*and_letter + 9*(sum(ten_to_twenty) + 10*sum(two_digit) + 9*sum(one_digit))
sum_len += thousand + one_digit[0]
print(sum_len)
| [
"[email protected]"
] | |
410bb29639ebcc51003884b5c567fb3451c74f12 | 3dcc44bf8acd3c6484b57578d8c5595d8119648d | /MOVED_TO_ROSETTA_TOOLS/pdb_util/read_pdb.py | acd9665f3b78b4dcff882cfab945127df48531b7 | [] | no_license | rhiju/rhiju_python | f0cab4dfd4dd75b72570db057a48e3d65e1d92c6 | eeab0750fb50a3078a698d190615ad6684dc2411 | refs/heads/master | 2022-10-29T01:59:51.848906 | 2022-10-04T21:28:41 | 2022-10-04T21:28:41 | 8,864,938 | 0 | 3 | null | null | null | null | UTF-8 | Python | false | false | 948 | py | #!/usr/bin/python
# quick and dirty PDB reader
def read_pdb( filename ):
coords = {}
pdb_lines = {}
sequence = {}
for line in open( filename ):
if (len(line)>54 and (line[0:4] == 'ATOM' or line[0:4] == 'HETA' ) ):
resnum = int( line[22:26] )
chain = line[21]
atom_name = line[12:16]
position = [float(line[30:38]),float(line[38:46]),float(line[46:54])]
if not ( chain in coords.keys() ):
coords[chain] = {}
pdb_lines[chain] = {}
sequence[ chain ] = {}
sequence[chain][resnum] = line[17:20]
if not ( resnum in coords[chain].keys() ):
coords[chain][resnum] = {}
pdb_lines[chain][resnum] = {}
coords[chain][resnum][atom_name] = position
pdb_lines[chain][resnum][atom_name] = line[:-1]
return ( coords, pdb_lines, sequence )
| [
"[email protected]"
] | |
bd06eae999b50905759544ab6ba8529547dfa5a9 | 6e466f7432de5f0b66a72583bc33bf0c96120cd4 | /userprofiles/views.py | 2985d63d975451dcb8e22f6676228a78da2b4370 | [] | no_license | Venezolanos/cines-unidos | c9ecc471f1d972af0a3dde89b979e3b8426e712c | e9f5f3b69d9a87098cb9ebf8e392677f111f7f51 | refs/heads/master | 2020-05-29T14:40:30.130621 | 2016-08-25T19:58:53 | 2016-08-25T19:58:53 | 65,574,580 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 356 | py | from django.shortcuts import render
from django.views.generic import CreateView
from .models import UserProfile
class UserCreateView(CreateView):
model = UserProfile
template_name = 'userprofiles/user_create.html'
success_url = '/'
form_class = 'UserForm'
def form_valid(self, form):
form.save()
return super(UserCreate, self).form_valid(form) | [
"[email protected]"
] | |
9a6e984e05f946fb47176fd1e463fb0fbbe1f739 | 8168895c50924ce7f2df0570c490cc4c9cea3619 | /jupytext/cli.py | a1555c3dd4e7c4583ae59f99c151a3128ffb1ede | [
"MIT"
] | permissive | GapData/jupytext | fe146bc59ab1cfe623840fad5a900b70dc84d05b | ae943a9a701090fe16c69e0e87b784f833a2ea14 | refs/heads/master | 2020-04-01T18:27:54.812349 | 2018-10-16T21:13:14 | 2018-10-16T21:13:14 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,838 | py | """Command line conversion tools `jupytext` and `nbsrc`
"""
import os
import sys
import argparse
from .jupytext import readf, reads, writef, writes
from .formats import NOTEBOOK_EXTENSIONS, JUPYTEXT_FORMATS, check_file_version, one_format_as_string, parse_one_format
from .combine import combine_inputs_with_outputs
from .compare import test_round_trip_conversion, NotebookDifference
from .languages import _SCRIPT_EXTENSIONS
def convert_notebook_files(nb_files, fmt, input_format=None, output=None,
test_round_trip=False, test_round_trip_strict=False, stop_on_first_error=True,
update=True):
"""
Export R markdown notebooks, python or R scripts, or Jupyter notebooks,
to the opposite format
:param nb_files: one or more notebooks files
:param input_format: input format, e.g. "py:percent"
:param fmt: destination format, e.g. "py:percent"
:param output: None, destination file, or '-' for stdout
:param test_round_trip: should round trip conversion be tested?
:param test_round_trip_strict: should round trip conversion be tested, with strict notebook comparison?
:param stop_on_first_error: when testing, should we stop on first error, or compare the full notebook?
:param update: preserve the current outputs of .ipynb file
when possible
:return:
"""
ext, format_name = parse_one_format(fmt)
if ext not in NOTEBOOK_EXTENSIONS:
raise TypeError('Destination extension {} is not a notebook'.format(ext))
if not nb_files:
if not input_format:
raise ValueError('Reading notebook from the standard input requires the --from field.')
parse_one_format(input_format)
nb_files = [sys.stdin]
if len(nb_files) > 1 and output:
raise ValueError("Output argument can only be used with a single notebook")
notebooks_in_error = 0
for nb_file in nb_files:
if nb_file == sys.stdin:
dest = None
current_ext, _ = parse_one_format(input_format)
notebook = reads(nb_file.read(), ext=current_ext, format_name=format_name)
else:
dest, current_ext = os.path.splitext(nb_file)
notebook = None
if current_ext not in NOTEBOOK_EXTENSIONS:
raise TypeError('File {} is not a notebook'.format(nb_file))
if input_format:
format_ext, format_name = parse_one_format(input_format)
if current_ext != format_ext:
raise ValueError("Format extension in --from field '{}' is "
"not consistent with notebook extension "
"'{}'".format(format_name, current_ext))
else:
input_format = None
if not notebook:
notebook = readf(nb_file, format_name=format_name)
if test_round_trip or test_round_trip_strict:
try:
test_round_trip_conversion(notebook, ext, format_name, update,
allow_expected_differences=not test_round_trip_strict,
stop_on_first_error=stop_on_first_error)
except NotebookDifference as error:
notebooks_in_error += 1
print('{}: {}'.format(nb_file, str(error)))
continue
if output == '-':
sys.stdout.write(writes(notebook, ext=ext, format_name=format_name))
continue
if output:
dest, dest_ext = os.path.splitext(output)
if dest_ext != ext:
raise TypeError('Destination extension {} is not consistent'
'with format {} '.format(dest_ext, ext))
save_notebook_as(notebook, nb_file, dest + ext, format_name, update)
if notebooks_in_error:
exit(notebooks_in_error)
def save_notebook_as(notebook, nb_file, nb_dest, format_name, combine):
"""Save notebook to file, in desired format"""
if combine and os.path.isfile(nb_dest) and \
os.path.splitext(nb_dest)[1] == '.ipynb':
check_file_version(notebook, nb_file, nb_dest)
nb_outputs = readf(nb_dest)
combine_inputs_with_outputs(notebook, nb_outputs)
writef(notebook, nb_dest, format_name=format_name)
def canonize_format(format_or_ext, file_path=None):
"""Return the canonical form of the format"""
if not format_or_ext:
if file_path and file_path != '-':
_, ext = os.path.splitext(file_path)
if ext not in NOTEBOOK_EXTENSIONS:
raise TypeError('Output extensions should be in {}'.format(", ".join(NOTEBOOK_EXTENSIONS)))
return ext.replace('.', '')
raise ValueError('Please specificy either --to or --output')
if '.' + format_or_ext in NOTEBOOK_EXTENSIONS:
return format_or_ext
if ':' in format_or_ext:
return format_or_ext
for ext in _SCRIPT_EXTENSIONS:
if _SCRIPT_EXTENSIONS[ext]['language'] == format_or_ext:
return ext.replace('.', '')
return {'notebook': 'ipynb', 'markdown': 'md', 'rmarkdown': 'Rmd'}[format_or_ext]
def cli_jupytext(args=None):
"""Command line parser for jupytext"""
parser = argparse.ArgumentParser(
description='Jupyter notebooks as markdown documents, '
'Julia, Python or R scripts')
notebook_formats = (['notebook', 'rmarkdown', 'markdown'] +
[_SCRIPT_EXTENSIONS[ext]['language'] for ext in _SCRIPT_EXTENSIONS] +
[ext.replace('.', '') for ext in NOTEBOOK_EXTENSIONS] +
[one_format_as_string(fmt.extension, fmt.format_name)
for fmt in JUPYTEXT_FORMATS])
parser.add_argument('--to',
choices=notebook_formats,
help="Destination format")
parser.add_argument('--from',
dest='input_format',
choices=notebook_formats,
help="Input format")
parser.add_argument('notebooks',
help='One or more notebook(s) to be converted. Input '
'is read from stdin when no notebook is '
'provided , but then the --from field is '
'mandatory',
nargs='*')
parser.add_argument('-o', '--output',
help='Destination file. Defaults to original file, '
'with extension changed to destination format. '
"Use '-' for printing the notebook on stdout.")
parser.add_argument('--update', action='store_true',
help='Preserve outputs of .ipynb destination '
'(when file exists and inputs match)')
test = parser.add_mutually_exclusive_group()
test.add_argument('--test', dest='test', action='store_true',
help='Test that notebook is stable under '
'round trip conversion, up to expected changes')
test.add_argument('--test-strict', dest='test_strict', action='store_true',
help='Test that notebook is strictly stable under '
'round trip conversion')
parser.add_argument('-x', '--stop', dest='stop_on_first_error', action='store_true',
help='Stop on first round trip conversion error, and report stack traceback')
args = parser.parse_args(args)
args.to = canonize_format(args.to, args.output)
if args.input_format:
args.input_format = canonize_format(args.input_format)
if not args.notebooks and not args.output:
args.output = '-'
if not args.input_format:
if not args.notebooks:
raise ValueError('Please specificy either --from or notebooks')
if args.update and not (args.test or args.test_strict) and args.to != 'ipynb':
raise ValueError('--update works exclusively with --to notebook ')
return args
def jupytext(args=None):
"""Entry point for the jupytext script"""
try:
args = cli_jupytext(args)
convert_notebook_files(nb_files=args.notebooks,
fmt=args.to,
input_format=args.input_format,
output=args.output,
test_round_trip=args.test,
test_round_trip_strict=args.test_strict,
stop_on_first_error=args.stop_on_first_error,
update=args.update)
except ValueError as err: # (ValueError, TypeError, IOError) as err:
print('jupytext: error: ' + str(err))
exit(1)
| [
"[email protected]"
] | |
49be057ff2047be2b384ad0122012c526a1548b1 | afabd1f1778d4911c825409501f215c634319f0b | /src/python/wordalign.py | 3c3e198d3fff84aee4424baca774edd1fa2272e8 | [] | no_license | texttheater/xlci | 030b275a8ecf2bea640c9f9b70aa3fb9fbc768df | 702d3e90b2a97f56b98da62cd072f0988fcfa5a7 | refs/heads/master | 2021-12-24T09:40:10.803445 | 2021-12-22T13:54:49 | 2021-12-22T13:54:49 | 173,109,877 | 6 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,162 | py | #!/usr/bin/env python3
"""Converts (M)GIZA++ output to a token-offset-based format.
Input example:
# Sentence pair (1) source length 8 target length 5 alignment score : 0.000275724
tom is obviously scared .
NULL ({ 2 }) jak ({ }) je ({ }) vidět ({ 3 }) , ({ }) tom ({ 1 }) má ({ }) strach ({ 4 }) . ({ 5 })
Assuming the following raw texts:
Jak je vidět, Tom má strach. Tom is obviously scared.
Output example:
0 0 4,6
0 3
4 6
7 12 7,16
12 13
14 17 0,3
18 20
21 27 17,23
27 28 23,24
"""
import re
import sys
import util
# Matches a GIZA++ alignment line and extracts the bits between ({ and }).
ALIGN_PATTERN = re.compile(r'(?<=\(\{) ((?:\d+ )*)(?=}\))')
def trgid_list2english_offsets(trgid_list, english_sentence):
english_offsets = []
for trgid in trgid_list:
eng_from = english_sentence[trgid - 1][0]
eng_to = english_sentence[trgid - 1][1]
english_offsets.append((eng_from, eng_to))
english_offsets = ['{},{}'.format(*p) for p in english_offsets]
english_offsets = ' '.join(english_offsets)
return english_offsets
def read_offset_file(path):
"""Returns a list of lists of offset pairs."""
result = []
with open(path) as f:
for block in util.blocks(f):
result.append([])
for line in block.splitlines():
if line.rstrip():
fr, to, tokid, token = line.split(maxsplit=3)
fr = int(fr)
to = int(to)
result[-1].append((fr, to))
return result
def read_dict_file(path, nbest_out):
"""Returns a list of pairs (eng_token_count, eng_id_lists)."""
result = []
old_sentence_number = 0
with open(path) as f:
for comment_line, eng_line, alignment_line in util.chunk(3, f):
assert comment_line.startswith('# Sentence pair (')
index = comment_line.index(')')
sentence_number = int(comment_line[17:index])
if sentence_number != old_sentence_number:
assert sentence_number == old_sentence_number + 1
if sentence_number > 1:
result.append(sentence_alignments)
sentence_alignments = []
if len(sentence_alignments) < nbest_out:
old_sentence_number = sentence_number
eng_token_count = len(eng_line.split())
eng_id_lists = [[int(i) for i in l.split()] for l
in ALIGN_PATTERN.findall(alignment_line)]
sentence_alignments.append((eng_token_count, eng_id_lists))
result.append(sentence_alignments)
return result
if __name__ == '__main__':
try:
_, dict_path, engoff_path, foroff_path, nbest_out = sys.argv
nbest_out = int(nbest_out)
except ValueError:
print('USAGE (example): python3 wordalign.py nld-eng.dict eng.tok.off nld.tok.off 3',
file=sys.stderr)
sys.exit(1)
dict_data = read_dict_file(dict_path, nbest_out)
eng_sentences = read_offset_file(engoff_path)
for_sentences = read_offset_file(foroff_path)
assert len(dict_data) == len(eng_sentences)
assert len(dict_data) == len(for_sentences)
for alignments, eng_sentence, for_sentence in zip(dict_data, eng_sentences, for_sentences):
for eng_token_count, eng_id_lists in alignments:
if eng_token_count != len(eng_sentence) or \
len(eng_id_lists) != len(for_sentence) + 1:
print('WARNING: token counts don\'t match, skipping', file=sys.stderr)
else:
# Unaligned English words are aligned to a dummy token which we represent by the offsets 0,0:
print(0, 0, trgid_list2english_offsets(eng_id_lists[0], eng_sentence), sep='\t')
# Aligned English tokens are output with each foreign token:
for (for_from, for_to), eng_id_list in zip(for_sentence, eng_id_lists[1:]):
print(for_from, for_to,
trgid_list2english_offsets(eng_id_list, eng_sentence),
sep ='\t')
print()
| [
"[email protected]"
] | |
f9d797c43df21655a86fdee684138f49cb9bed79 | af33dc088dbbd4274abf44c1356dc3a66c65ca28 | /normalize.py | c4c09e44d178aecccf7ba71c87dc260b8ee0261a | [] | no_license | rasoolims/PBreak | dce2b14165e864803544a062a4dc68b3a6edffa7 | f2f5b4bdc626f1695ccd55aed8e35c8c69e827bf | refs/heads/master | 2023-01-18T15:58:07.947365 | 2020-12-09T18:57:34 | 2020-12-09T18:57:34 | 300,011,165 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 478 | py | from break_words import *
if __name__ == "__main__":
parser = get_lm_option_parser()
(options, args) = parser.parse_args()
normalizer = Normalizer()
with open(options.input_path, "r") as r, open(options.output_path, "w") as w:
for i, line in enumerate(r):
sen = line.strip()
sen = normalizer.normalize(sen)
w.write(sen + "\n")
if i % 1000 == 0:
print(i, end="\r")
print("\nFinished")
| [
"[email protected]"
] | |
f457a73cbca000fc2a445c94d38d41594ff73c95 | 64c5341a41e10ea7f19582cbbf3c201d92768b9f | /webInterface/aligner_webapp/yalignve/bin/easy_install | 93fd0dc0b09c482e2de9bddd82e729cf5bbb7991 | [] | no_license | CLARIN-PL/yalign | 6b050b5c330b8eaf7e1e2f9ef83ec88a8abe5164 | 6da94fbb74e803bea337e0c171c8abff3b17d7ee | refs/heads/master | 2023-06-10T18:30:42.112215 | 2021-06-24T13:07:17 | 2021-06-24T13:07:17 | 51,368,327 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 281 | #!/home/nlp/Downloads/Aligner/aligner_webapp/yalignve/bin/python2
# -*- coding: utf-8 -*-
import re
import sys
from setuptools.command.easy_install import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"[email protected]"
] | ||
a773e41ef132000b96c67ea4fe54eb8f7125c6ab | 93c22f53bc7ce33a4384c53f02988e6c9ccd86c9 | /re_flags_ascii.py | 392bb3d7d26fc66b2dea16eb68f1e9ebb5721655 | [
"Apache-2.0"
] | permissive | Kalpavrikshika/python_modules | a5ce678b58f94d32274846811388d991c281d4d3 | 9f338ab006dd5653fd7f65ff253bc50e0fd61fc6 | refs/heads/master | 2020-03-08T02:23:08.650248 | 2018-06-28T07:38:01 | 2018-06-28T07:38:01 | 127,858,450 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 389 | py | import re
text = u'Français złoty Österreich'
pattern = r'\w+'
#Give Ascii flag to compile in ascii.
ascii_pattern = re.compile(pattern, re.ASCII)
#Defined in unicode by default.
unicode_pattern = re.compile(pattern)
print ('Text :', text)
print ('Pattern :', pattern)
print ('ASCII : ', list(ascii_pattern.findall(text)))
print('Unicode :', list(unicode_pattern.findall(text))) | [
"[email protected]"
] | |
446b1954ef0713f46373376108286f757197384f | bade79e88dd32b42b4841a1e1a94eddfc86652bf | /Agreement/modus/skio/protocol/pms.py | a2649f7c82895dd6c1173cf5fd9b2ba7fdb2a83c | [] | no_license | a452669850/DCSNEW | 927171b10f455b396b50e8400001efcdb5fd1217 | 7578b6b18e021108c3e8b31c5dab2a17ac16a79d | refs/heads/master | 2023-01-04T11:25:09.787625 | 2020-10-20T13:16:54 | 2020-10-20T13:16:54 | 305,712,694 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,498 | py | import socket
import struct
import typing
from modbus_tk import defines as cst
from modbus_tk.exceptions import ModbusError
from modbus_tk.modbus_tcp import TcpMaster
from pubsub import pub
from Agreement.modus.skio import exception
from Agreement.modus.skio.define import IDev, IVar, T_Val, ValType
_T_OUTPUT_VALUE = typing.Union[typing.Tuple[int], typing.List[int], int]
# 三门协议
class SmPXIDev(IDev, TcpMaster):
_slave: int = 1
def __init__(self):
IDev.__init__(self)
TcpMaster.__init__(self)
def setup(self, uri):
host, port = uri.split(':')
self._host = host
self._port = int(port)
pub.sendMessage('SKIO.SmPXIDev.SETUP', a1=uri)
def read(self, var: IVar) -> T_Val:
try:
f_code = int(str(var.reg)[0])
address = int(str(var.reg)[1:])
except TypeError:
raise exception.SkError(exception.VAR_CFG_ERROR, f'{var} config error')
if var.chr == ValType.D64:
length = 4
data = self._cmd(f_code=f_code, address=address, quantity_of_x=length)
pub.sendMessage('SKIO.SmPXIDev.READ', a1=struct.unpack('<d', struct.pack('<HHHH', *data))[0])
return struct.unpack('<d', struct.pack('<HHHH', *data))[0]
elif var.chr == ValType.U32:
length = 2
data = self._cmd(f_code=f_code, address=address, quantity_of_x=length)
pub.sendMessage('SKIO.SmPXIDev.READ', a1=struct.unpack(f'<Q', struct.pack('<HHHH', *data))[0])
return struct.unpack(f'<Q', struct.pack(f'<HHHH', *data))[0]
elif var.chr == ValType.BOOL:
length = 1
data = self._cmd(f_code=f_code, address=address, quantity_of_x=length)
pub.sendMessage('SKIO.SmPXIDev.READ', a1=data[0])
return data[0]
elif var.chr == ValType.U64:
length = 4
data = self._cmd(f_code=f_code, address=address, quantity_of_x=length)
pub.sendMessage('SKIO.SmPXIDev.READ', a1=struct.unpack(f'<Q', struct.pack('<HHHH', *data))[0])
return struct.unpack(f'<Q', struct.pack(f'<HHHH', *data))[0]
else:
raise exception.SkError(exception.UNSUPPORTED_TYPE,
f'{self.__class__.__name__} not support {var.chr}')
def write(self, var: IVar, value: T_Val):
try:
if var.chr == ValType.B1:
value = int(value)
value = 1 if value > 0 else 0
elif var.chr in (ValType.F32, ValType.D64):
value = float(value)
else:
value = float(value)
f_code = int(str(var.reg)[0])
address = int(str(var.reg)[1:])
if var.engineering_unit == 'amps':
value = float(value)
value = (value - var.rlo) / (var.rhi - var.rlo)
elif var.engineering_unit == 'volts':
value = float(value)
value = (value - var.rlo) / (var.rhi - var.rlo)
elif var.engineering_unit == 'OHMS':
OHMSanalysis(value, var)
elif var.engineering_unit == 'HZ':
value = float(value)
except (TypeError, ValueError):
raise exception.SkError(exception.VAR_CFG_ERROR, f'{var} config error')
if f_code == cst.COILS:
value = 1 if int(value) > 0 else 0
self._cmd(f_code=cst.WRITE_SINGLE_COIL, address=address, output_value=value)
pub.sendMessage('SKIO.SmPXIDev.WRITE', a1=var.name, a2=value)
return value
elif f_code == cst.HOLDING_REGISTERS:
if var.chr == ValType.D64:
output_value = struct.unpack('<HHHH', struct.pack('<d', value))
elif var.chr == ValType.U64:
output_value = struct.unpack('<HHHH', struct.pack('<Q', value))
elif var.chr == ValType.U32:
output_value = struct.unpack('<HH', struct.pack('<L', value))
else:
raise exception.SkError(exception.UNSUPPORTED_TYPE,
f'{self.__class__.__name__} not support {var.chr}')
self._cmd(f_code=cst.WRITE_MULTIPLE_REGISTERS, address=address, output_value=output_value)
pub.sendMessage('SKIO.SmPXIDev.WRITE', a1=var.name, a2=value)
return value
else:
raise exception.SkError(exception.READ_ONLY, f'{var.name} is ReadOnly')
def _cmd(self,
f_code: int,
address: int,
quantity_of_x: int = 0,
output_value: _T_OUTPUT_VALUE = 0,
expected_length: int = -1
) -> _T_OUTPUT_VALUE:
try:
return self.execute(
self._slave,
f_code,
address,
quantity_of_x=quantity_of_x,
output_value=output_value,
expected_length=expected_length
)
except (socket.timeout, ConnectionRefusedError, ConnectionResetError, OSError) as e:
raise exception.SkError(exception.NETWORK_ERROR, e)
except ModbusError as e:
raise exception.SkError(exception.PROTOCOL_ERROR, e.get_exception_code())
class ModBus(IDev, TcpMaster):
_slave: int = 1
def __init__(self):
IDev.__init__(self)
TcpMaster.__init__(self)
def setup(self, uri):
host, port = uri.split(':')
self._host = host
self._port = int(port)
self.master = TcpMaster(self._host, self._port)
# pub.sendMessage('SKIO.SmPXIDev.SETUP', 传入值=uri, 传出值='写入成功')
def read(self, var: IVar) -> T_Val:
try:
f_code = int(str(var.reg)[0])
address = int(str(var.reg)[1:])
except TypeError:
raise exception.SkError(exception.VAR_CFG_ERROR, f'{var} config error')
if var.chr == ValType.D64:
length = 4
data = self._cmd(f_code=f_code, address=address, quantity_of_x=length)
pub.sendMessage('SKIO.SmPXIDev.READ', 传入值=var, 传出值=struct.unpack(f'<d', struct.pack('<HHHH', *data))[0])
return struct.unpack('<d', struct.pack('<HHHH', *data))[0]
elif var.chr == ValType.U32:
length = 2
data = self._cmd(f_code=f_code, address=address, quantity_of_x=length)
pub.sendMessage('SKIO.SmPXIDev.READ', 传入值=var, 传出值=struct.unpack(f'<Q', struct.pack('<HHHH', *data))[0])
return struct.unpack(f'<Q', struct.pack(f'<HHHH', *data))[0]
elif var.chr == ValType.BOOL:
length = 1
data = self._cmd(f_code=f_code, address=address, quantity_of_x=length)
pub.sendMessage('SKIO.SmPXIDev.READ', 传入值=var, 传出值=data[0])
return data[0]
elif var.chr == ValType.U64:
length = 4
data = self._cmd(f_code=f_code, address=address, quantity_of_x=length)
pub.sendMessage('SKIO.SmPXIDev.READ', 传入值=var, 传出值=struct.unpack(f'<Q', struct.pack('<HHHH', *data))[0])
return struct.unpack(f'<Q', struct.pack(f'<HHHH', *data))[0]
else:
raise exception.SkError(exception.UNSUPPORTED_TYPE,
f'{self.__class__.__name__} not support {var.chr}')
def write(self, var: IVar, value: T_Val):
try:
if var.chr == ValType.B1:
value = int(value)
value = 1 if value > 0 else 0
elif var.chr in (ValType.F32, ValType.D64):
value = float(value)
else:
value = float(value)
f_code = int(str(var.reg)[0])
address = int(str(var.reg)[1:])
if var.engineering_unit == 'amps':
value = float(value)
value = (value - var.rlo) / (var.rhi - var.rlo)
elif var.engineering_unit == 'volts':
value = float(value)
value = (value - var.rlo) / (var.rhi - var.rlo)
elif var.engineering_unit == 'OHMS':
OHMSanalysis(value, var)
elif var.engineering_unit == 'HZ':
value = float(value)
except (TypeError, ValueError):
raise exception.SkError(exception.VAR_CFG_ERROR, f'{var} config error')
if f_code == cst.COILS:
value = 1 if int(value) > 0 else 0
self._cmd(f_code=cst.WRITE_SINGLE_COIL, address=address, output_value=value)
pub.sendMessage('SKIO.SmPXIDev.WRITE', 传入值1=var, 传入值2=value, 传出值=value)
return value
elif f_code == cst.HOLDING_REGISTERS:
if var.chr == ValType.D64:
output_value = struct.unpack('<HHHH', struct.pack('<d', value))
elif var.chr == ValType.U64:
output_value = struct.unpack('<HHHH', struct.pack('<Q', value))
elif var.chr == ValType.U32:
output_value = struct.unpack('<HH', struct.pack('<L', value))
else:
raise exception.SkError(exception.UNSUPPORTED_TYPE,
f'{self.__class__.__name__} not support {var.chr}')
self._cmd(f_code=cst.WRITE_MULTIPLE_REGISTERS, address=address, output_value=output_value)
pub.sendMessage('SKIO.SmPXIDev.WRITE', 传入值=var, 传出值=value)
return value
else:
raise exception.SkError(exception.READ_ONLY, f'{var.name} is ReadOnly')
def _cmd(self,
f_code: int,
address: int,
quantity_of_x: int = 0,
output_value: _T_OUTPUT_VALUE = 0,
expected_length: int = -1
) -> _T_OUTPUT_VALUE:
try:
return self.master.execute(
self._slave,
f_code,
address,
quantity_of_x=quantity_of_x,
output_value=output_value,
expected_length=expected_length
)
except (socket.timeout, ConnectionRefusedError, ConnectionResetError, OSError) as e:
raise exception.SkError(exception.NETWORK_ERROR, e)
except ModbusError as e:
raise exception.SkError(exception.PROTOCOL_ERROR, e.get_exception_code())
def OHMSanalysis(value, var):
value = float(value) * 0.000476
value = (value - var.elo) / (var.ehi - var.elo)
return value
| [
"[email protected]"
] | |
217ccb92296cf39042a30cba2c587c4da9ac194d | 6e402eabc041dfef73a41a987b53eea6b566fb0c | /best/buses/handlers/bus.py | b35a6dcae3ad41131a502f9e77f978a3afe77103 | [] | no_license | batpad/bombayography | fb125a168ccdb217aff3672074001edb9866f2e8 | 0c6a64d32f826f8b9e43a695a327d64fcb4f58cf | refs/heads/master | 2021-01-25T05:15:51.161024 | 2011-01-02T10:30:23 | 2011-01-02T10:30:23 | 1,213,841 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 910 | py | #!/usr/bin/env python
# vim: ai ts=4 sts=4 et sw=4
from rapidsms.contrib.handlers.handlers.keyword import KeywordHandler
from buses.models import *
class BestHandler(KeywordHandler):
keyword = "route"
def help(self):
self.respond("Send route <bus_no>")
def handle(self, text):
bus_no = text.strip()
a = Atlas.objects.filter(route__iexact=bus_no)
if len(a) < 1:
self.respond("Did not find that bus number. Sorry.")
else:
a = a[0]
src = a.src
first_src = a.first_src
last_src = a.last_src
dest = a.dest
first_dest = a.first_dest
last_dest = a.last_dest
schedule = a.schedule
ret = "%s(%s-%s) to %s(%s-%s) from %s" % (src, str(first_src), str(last_src), dest, str(first_dest), str(last_dest), schedule)
self.respond(ret)
| [
"[email protected]"
] | |
1574bf91fa053aef47cf8548c54380b76988e91f | 80861e99492590d314dde6f3f19103c6d36fd02f | /ucsmsdk/methodmeta/EquipmentInstantiateNNamedTemplateMeta.py | 526b9387bec99f130ce1272c4922def3c95d0f68 | [
"Apache-2.0"
] | permissive | CiscoUcs/ucsmsdk | 2abf67cc084b0f23e453ae3192669a56018aa784 | d0f0fe2bfc7507e3189408e0113e204bd0d69386 | refs/heads/master | 2023-08-31T04:07:22.546644 | 2023-08-30T06:44:19 | 2023-08-30T06:44:19 | 46,483,999 | 83 | 94 | NOASSERTION | 2023-08-30T06:44:20 | 2015-11-19T10:06:22 | Python | UTF-8 | Python | false | false | 1,336 | py | """This module contains the meta information of EquipmentInstantiateNNamedTemplate ExternalMethod."""
from ..ucscoremeta import MethodMeta, MethodPropertyMeta
method_meta = MethodMeta("EquipmentInstantiateNNamedTemplate", "equipmentInstantiateNNamedTemplate", "Version142b")
prop_meta = {
"cookie": MethodPropertyMeta("Cookie", "cookie", "Xs:string", "Version142b", "InputOutput", False),
"dn": MethodPropertyMeta("Dn", "dn", "ReferenceObject", "Version142b", "InputOutput", False),
"in_error_on_existing": MethodPropertyMeta("InErrorOnExisting", "inErrorOnExisting", "Xs:string", "Version142b", "Input", False),
"in_hierarchical": MethodPropertyMeta("InHierarchical", "inHierarchical", "Xs:string", "Version142b", "Input", False),
"in_name_set": MethodPropertyMeta("InNameSet", "inNameSet", "DnSet", "Version142b", "Input", True),
"in_target_org": MethodPropertyMeta("InTargetOrg", "inTargetOrg", "ReferenceObject", "Version142b", "Input", False),
"out_configs": MethodPropertyMeta("OutConfigs", "outConfigs", "ConfigSet", "Version142b", "Output", True),
}
prop_map = {
"cookie": "cookie",
"dn": "dn",
"inErrorOnExisting": "in_error_on_existing",
"inHierarchical": "in_hierarchical",
"inNameSet": "in_name_set",
"inTargetOrg": "in_target_org",
"outConfigs": "out_configs",
}
| [
"[email protected]"
] | |
be7c6ed98d927a0bf90132514d10d3e80d087d74 | 6f06e33ee01027b8429fdf8563fae88b65e604e4 | /Lab04_03_QueuRunners.py | c45d63f3a357c54085ef0827a8092782e452a2b9 | [] | no_license | wjcheon/DeeplearningPractice_MODU | 0f8bf29f59087dffca92d3cb82eebdab545ee811 | 40e0115c71ab03fc2b038718516780a86feb0bfd | refs/heads/master | 2021-01-01T06:52:01.075539 | 2017-08-24T07:15:55 | 2017-08-24T07:15:55 | 97,533,568 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,366 | py | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Tue Jul 18 13:58:48 2017
@author: Wonjoong Cheon
"""
#%%
import tensorflow as tf
import numpy as np
filename_queue = tf.train.string_input_producer(['data-01-test-score.csv','data-01-test-score.csv'])
xy = np.loadtxt('data-01-test-score.csv', delimiter=',', dtype = np.float32)
x_data = xy[:,0:-1]
y_data = xy[:,[-1]]
print(x_data.shape, x_data, len(x_data))
print(y_data.shape, y_data, len(y_data))
#
#
X = tf.placeholder(tf.float32,shape = [None, 3])
Y = tf.placeholder(tf.float32,shape = [None, 1])
W = tf.Variable(tf.random_normal([3, 1]), name = 'Weight')
b = tf.Variable(tf.random_normal([1]), name = 'bias')
#
hypothesis = tf.matmul(X,W) + b
cost = tf.reduce_mean(tf.square(hypothesis - Y))
#
optimizer = tf.train.GradientDescentOptimizer(learning_rate = 1e-5)
train = optimizer.minimize(cost)
#%%
sess = tf.Session()
sess.run(tf.global_variables_initializer())
for step in range(2001):
cost_val, hy_val, _ = sess.run([cost, hypothesis, train],
feed_dict = {X:x_data, Y:y_data})
if step % 20 == 0:
print(step, "Cost:", cost_val, "\nPrediction:\n", hy_val)
#%%
print("Your score will be", sess.run(hypothesis, feed_dict={X:[[100, 70, 101]]}))
print("Other socre will be", sess.run(hypothesis, feed_dict={X:[[60, 70, 110], [90, 100, 80]]})) | [
"[email protected]"
] | |
a4e969cf1aba0818fcce2f0589c09f8557df1e0b | 1867c4c3f402424863f0dce931e4d4553d04bb0a | /office/migrations/0007_auto_20210110_2013.py | 7e6ff6eea0045a4fcfe44fe6ebca6e0a16256754 | [] | no_license | AnthonyRedGrave/innowise-task-api | 67f02792cf1b8fe30e0469a85375d9a45cbf858b | 819593d0873c40bd20925a9cd503548bb1544295 | refs/heads/master | 2023-02-20T13:51:35.054540 | 2021-01-27T18:37:57 | 2021-01-27T18:37:57 | 327,998,666 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 870 | py | # Generated by Django 3.1.5 on 2021-01-10 17:13
import datetime
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('office', '0006_auto_20210110_2006'),
]
operations = [
migrations.AlterField(
model_name='place',
name='client',
field=models.OneToOneField(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='place',
name='data',
field=models.DateField(blank=True, default=datetime.date(2021, 1, 10), null=True, verbose_name='Дата для бронирования'),
),
]
| [
"[email protected]"
] | |
a5a46356428a18fbccc661b250af675d7b5334c5 | 8dde6f201657946ad0cfeacab41831f681e6bc6f | /62. Unique Paths.py | 59341fd6c2040b99346b7d2dce4546f675fd0360 | [] | no_license | peraktong/LEETCODE_Jason | c5d4a524ba69b1b089f18ce4a53dc8f50ccbb88c | 06961cc468211b9692cd7a889ee38d1cd4e1d11e | refs/heads/master | 2022-04-12T11:34:38.738731 | 2020-04-07T21:17:04 | 2020-04-07T21:17:04 | 219,398,022 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 342 | py | class Solution(object):
def uniquePaths(self, m, n):
"""
:type m: int
:type n: int
:rtype: int
"""
# DP
dp = [[1] * n for i in range(m)]
for i in range(1, m):
for j in range(1, n):
dp[i][j] = dp[i - 1][j] + dp[i][j - 1]
return dp[-1][-1]
| [
"[email protected]"
] | |
ac0e03c9aabb196bcd25e697acd16276ed0dfb48 | 4944541b0cd0fa48a01581ffce5e7ce16f5cf8d7 | /src/Backend/MbkExam/SimpleExam/views.py | 041952f4adbc8875326e27a1df3d2cbda002813f | [] | no_license | aballah-chamakh/the_exam | 49a5b5c9d28c61b2283f2d42d2b2fb771dd48bf4 | dbbbdc7a955ca61572f26430a7788407eaf0c632 | refs/heads/main | 2023-03-28T13:19:18.148630 | 2021-04-03T22:12:51 | 2021-04-03T22:12:51 | 354,404,833 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 390 | py | from django.shortcuts import render
from rest_framework.decorators import action
from rest_framework.response import Response
from rest_framework import viewsets,status,generics
from .models import SimpleExam
from .serializers import SimpleExamSerializer
class SimpleExamViewSet(viewsets.ModelViewSet):
serializer_class = SimpleExamSerializer
queryset = SimpleExam.objects.all()
| [
"[email protected]"
] | |
0284d92e23809cd7d04fd9c59c3266ec025e9d92 | 3dc9f2aaea2620d6c382211369548d9b52f4039a | /FaceQuality/qualityface/config.py | f0753116b5bb74da50ad2f23270be1c25b0be2cd | [
"MIT"
] | permissive | banana1024/FaceProjects | 6fd4961d73356139c7ebba85428a9c40b02335a8 | 87ae30191c01eadc3cfa59b751db91f1aa76bc5d | refs/heads/master | 2022-09-18T15:40:19.346332 | 2020-05-29T08:15:17 | 2020-05-29T08:15:17 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 371 | py | import torch
import torchvision.transforms as T
class Config:
# data preprocess
test_transform = T.Compose([
T.ToTensor(),
T.Normalize(mean=[0.5] * 3, std=[0.5] * 3),
])
# training settings
checkpoints = "checkpoints"
restore_model = "last.pth"
device = 'cuda' if torch.cuda.is_available() else 'cpu'
config = Config() | [
"[email protected]"
] | |
1cc98c58b0790dbe9fcc7b0cafa6d97fb43db76b | c7a39ecf433a6b954096aed2ef8ef2b3366fab35 | /quora_project/questions/api/views.py | 686c01b36dd75ef3c615aec616367168208b3896 | [
"MIT"
] | permissive | vojta-janousek/Questions-and-Answers-Website | 78f28a8f8ad6e66f7f6aca6db733e41eef3c1485 | 02cfb6274a73f64382e57f3718beb5ee89c2d1ac | refs/heads/master | 2020-11-24T16:09:10.519718 | 2020-04-01T20:58:17 | 2020-04-01T20:58:17 | 228,234,952 | 0 | 0 | MIT | 2020-06-07T17:19:59 | 2019-12-15T18:58:25 | Python | UTF-8 | Python | false | false | 2,803 | py | from rest_framework import viewsets, generics, status
from rest_framework.exceptions import ValidationError
from rest_framework.generics import get_object_or_404
from rest_framework.permissions import IsAuthenticated
from rest_framework.views import APIView
from rest_framework.response import Response
from questions.api.permissions import IsAuthorOrReadOnly
from questions.api.serializers import QuestionSerializer, AnswerSerializer
from questions.models import Question, Answer
class QuestionViewSet(viewsets.ModelViewSet):
queryset = Question.objects.all()
lookup_field = 'slug'
serializer_class = QuestionSerializer
permission_classes = [IsAuthenticated, IsAuthorOrReadOnly]
def perform_create(self, serializer):
serializer.save(author=self.request.user)
class AnswerCreateAPIView(generics.CreateAPIView):
queryset = Answer.objects.all()
serializer_class = AnswerSerializer
permission_classes = [IsAuthenticated]
def perform_create(self, serializer):
request_user = self.request.user
kwarg_slug = self.kwargs.get('slug')
question = get_object_or_404(Question, slug=kwarg_slug)
if question.answers.filter(author=request_user).exists():
raise ValidationError('You have already answered this Question')
serializer.save(author=request_user, question=question)
class QuestionAnswerListAPIView(generics.ListAPIView):
serializer_class = AnswerSerializer
permission_classes = [IsAuthenticated]
def get_queryset(self):
kwarg_slug = self.kwargs.get('slug')
return Answer.objects.filter(
question__slug=kwarg_slug
).order_by('-created_at')
class AnswerRUDAPIView(generics.RetrieveUpdateDestroyAPIView):
queryset = Answer.objects.all()
serializer_class = AnswerSerializer
permission_classes = [IsAuthenticated, IsAuthorOrReadOnly]
class AnswerLikeAPIView(APIView):
serializer_class = AnswerSerializer
permission_classes = [IsAuthenticated]
def delete(self, request, pk):
answer = get_object_or_404(Answer, pk=pk)
user = request.user
answer.voters.remove(user)
answer.save()
serializer_context = {'request': request}
serializer = self.serializer_class(answer, context=serializer_context)
return Response(serializer.data, status=status.HTTP_200_OK)
def post(self, request, pk):
answer = get_object_or_404(Answer, pk=pk)
user = request.user
answer.voters.add(user)
answer.save()
serializer_context = {'request': request}
serializer = self.serializer_class(answer, context=serializer_context)
return Response(serializer.data, status=status.HTTP_200_OK)
| [
"[email protected]"
] | |
682ad7458ee8c3a3b669d39d903e656bb7072887 | 651a296c8f45b5799781fd78a6b5329effe702a0 | /monomial/mono_upto_enum.py | 3a359439d6875442b34669e9b22902025af36554 | [] | no_license | pdhhiep/Computation_using_Python | 095d14370fe1a01a192d7e44fcc81a52655f652b | 407ed29fddc267950e9860b8bbd1e038f0387c97 | refs/heads/master | 2021-05-29T12:35:12.630232 | 2015-06-27T01:05:17 | 2015-06-27T01:05:17 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,963 | py | #!/usr/bin/env python
def mono_upto_enum ( m, n ):
#*****************************************************************************80
#
## MONO_UPTO_ENUM enumerates monomials in M dimensions of degree up to N.
#
# Discussion:
#
# For M = 2, we have the following values:
#
# N VALUE
#
# 0 1
# 1 3
# 2 6
# 3 10
# 4 15
# 5 21
#
# In particular, VALUE(2,3) = 10 because we have the 10 monomials:
#
# 1, x, y, x^2, xy, y^2, x^3, x^2y, xy^2, y^3.
#
# Licensing:
#
# This code is distributed under the GNU LGPL license.
#
# Modified:
#
# 23 October 2014
#
# Author:
#
# John Burkardt
#
# Parameters:
#
# Input, integer M, the spatial dimension.
#
# Input, integer N, the maximum degree.
#
# Output, integer VALUE, the number of monomials in
# M variables, of total degree N or less.
#
from i4_choose import i4_choose
value = i4_choose ( n + m, n )
return value
def mono_upto_enum_test ( ):
#*****************************************************************************80
#
## MONO_UPTO_ENUM_TEST tests MONO_UPTO_ENUM.
#
# Licensing:
#
# This code is distributed under the GNU LGPL license.
#
# Modified:
#
# 23 October 2014
#
# Author:
#
# John Burkardt
#
print ''
print 'MONO_UPTO_ENUM_TEST'
print ' MONO_UPTO_ENUM can enumerate the number of monomials'
print ' in M variables, of total degree between 0 and N.'
print '';
print ' N:',
for n in range ( 0, 9 ):
print ' %4d' % ( n ),
print ''
print ' M +---------------------------------------------------------------'
for m in range ( 1, 9 ):
print ' %2d |' % ( m ),
for n in range ( 0, 9 ):
v = mono_upto_enum ( m, n )
print ' %5d' % ( v ),
print ''
print ''
print 'MONO_UPTO_ENUM_TEST'
print ' Normal end of execution.'
return
if ( __name__ == '__main__' ):
from timestamp import timestamp
timestamp ( )
mono_upto_enum_test ( )
timestamp ( )
| [
"[email protected]"
] | |
9957e7a14d7db921fad9a4cdccf9aeaacb5ce2e9 | 559f3dec0964d2e0f86c6c871371fe779cf3726c | /Matting/tools/predict.py | 83cff09e4a9dc949138a30d8d2bcd28e38cacff4 | [
"Apache-2.0"
] | permissive | PaddlePaddle/PaddleSeg | 319ab26665ea492527a1949671650135123ffc39 | 2c8c35a8949fef74599f5ec557d340a14415f20d | refs/heads/release/2.8 | 2023-08-31T09:08:06.724717 | 2023-08-18T01:59:56 | 2023-08-18T01:59:56 | 204,380,779 | 8,531 | 1,866 | Apache-2.0 | 2023-09-12T02:30:42 | 2019-08-26T02:32:22 | Python | UTF-8 | Python | false | false | 3,468 | py | # Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import sys
import paddle
import paddleseg
from paddleseg.cvlibs import manager
from paddleseg.utils import get_sys_env, logger
LOCAL_PATH = os.path.dirname(os.path.abspath(__file__))
sys.path.append(os.path.join(LOCAL_PATH, '..'))
manager.BACKBONES._components_dict.clear()
manager.TRANSFORMS._components_dict.clear()
import ppmatting
from ppmatting.core import predict
from ppmatting.utils import get_image_list, Config, MatBuilder
def parse_args():
parser = argparse.ArgumentParser(description='Model training')
parser.add_argument(
"--config", dest="cfg", help="The config file.", default=None, type=str)
parser.add_argument(
'--model_path',
dest='model_path',
help='The path of model for prediction',
type=str,
default=None)
parser.add_argument(
'--image_path',
dest='image_path',
help='The path of image, it can be a file or a directory including images',
type=str,
default=None)
parser.add_argument(
'--trimap_path',
dest='trimap_path',
help='The path of trimap, it can be a file or a directory including images. '
'The image should be the same as image when it is a directory.',
type=str,
default=None)
parser.add_argument(
'--save_dir',
dest='save_dir',
help='The directory for saving the model snapshot',
type=str,
default='./output/results')
parser.add_argument(
'--fg_estimate',
default=True,
type=eval,
choices=[True, False],
help='Whether to estimate foreground when predicting.')
parser.add_argument(
'--device',
dest='device',
help='Set the device type, which may be GPU, CPU or XPU.',
default='gpu',
type=str)
return parser.parse_args()
def main(args):
assert args.cfg is not None, \
'No configuration file specified, please set --config'
cfg = Config(args.cfg)
builder = MatBuilder(cfg)
paddleseg.utils.show_env_info()
paddleseg.utils.show_cfg_info(cfg)
paddleseg.utils.set_device(args.device)
model = builder.model
transforms = ppmatting.transforms.Compose(builder.val_transforms)
image_list, image_dir = get_image_list(args.image_path)
if args.trimap_path is None:
trimap_list = None
else:
trimap_list, _ = get_image_list(args.trimap_path)
logger.info('Number of predict images = {}'.format(len(image_list)))
predict(
model,
model_path=args.model_path,
transforms=transforms,
image_list=image_list,
image_dir=image_dir,
trimap_list=trimap_list,
save_dir=args.save_dir,
fg_estimate=args.fg_estimate)
if __name__ == '__main__':
args = parse_args()
main(args)
| [
"[email protected]"
] | |
fa28d630bdb2be7b15926769daaf0cf6361c335e | 8bf8ab29cb25de00c6a799d1f58610528b810592 | /모의 SW 역량테스트/5648. [모의 SW 역량테스트] 원자 소멸 시뮬레이션/참고.py | 29928d51765559d7b11a787d3a4eb2fcd27baddb | [] | no_license | mgh3326/sw_expert_academy_algorithm | fa93fb68862cabeba8f9f5fff00a87f26a014afc | 97cbd2a1845e42f142d189e9121c3cd5822fc8d8 | refs/heads/master | 2020-07-03T21:40:29.948233 | 2019-11-23T07:26:15 | 2019-11-23T07:26:15 | 202,058,567 | 0 | 0 | null | 2019-11-30T06:11:34 | 2019-08-13T03:40:18 | Python | UTF-8 | Python | false | false | 2,743 | py | T = int(input())
for tc in range(1, T + 1):
N = int(input())
atoms = [list(map(int, input().split())) for _ in range(N)]
for atom in atoms:
atom[0] *= 2
atom[1] *= 2
energy = 0
candidates = [[0, 0, 0]]
for i in range(N - 1):
for j in range(i, N):
dx = atoms[i][0] - atoms[j][0]
dy = atoms[i][1] - atoms[j][1]
v1 = atoms[i][2]
v2 = atoms[j][2]
if dy == 0:
if v1 == 2 and v2 == 3 and dx > 0:
candidates.append([dx // 2, i, j])
elif v1 == 3 and v2 == 2 and dx < 0:
candidates.append([-dx // 2, i, j])
elif dx == 0:
if v1 == 0 and v2 == 1 and dy < 0:
candidates.append([-dy // 2, i, j])
elif v1 == 1 and v2 == 0 and dy > 0:
candidates.append([dy // 2, i, j])
elif dx == dy:
if dx < 0 and v1 == 3 and v2 == 1:
candidates.append([-dx, i, j])
elif dx < 0 and v1 == 0 and v2 == 2:
candidates.append([-dx, i, j])
elif dx > 0 and v1 == 1 and v2 == 3:
candidates.append([dx, i, j])
elif dx > 0 and v1 == 2 and v2 == 0:
candidates.append([dx, i, j])
elif dx == -dy:
if dx < 0 and v1 == 3 and v2 == 0:
candidates.append([-dx, i, j])
elif dx < 0 and v1 == 1 and v2 == 2:
candidates.append([-dx, i, j])
elif dx > 0 and v1 == 0 and v2 == 3:
candidates.append([dx, i, j])
elif dx > 0 and v1 == 2 and v2 == 1:
candidates.append([dx, i, j])
visited = [0] * N
candidates.sort()
collision = []
for i in range(len(candidates) - 1):
if candidates[i][0] != candidates[i + 1][0]:
while collision:
temp = collision.pop()
if not visited[temp]:
visited[temp] = 1
energy += atoms[temp][3]
if not visited[candidates[i + 1][1]] and not visited[candidates[i + 1][2]]:
collision.append(candidates[i + 1][1])
collision.append(candidates[i + 1][2])
else:
if not visited[candidates[i + 1][1]] and not visited[candidates[i + 1][2]]:
collision.append(candidates[i + 1][1])
collision.append(candidates[i + 1][2])
while collision:
temp = collision.pop()
if not visited[temp]:
visited[temp] = 1
energy += atoms[temp][3]
print('#{} {}'.format(tc, energy))
| [
"[email protected]"
] | |
b28477b4e29bb41a9e9b42c78ded882797d1b48b | 7ccdcd8e7885603141c55f4c065373417f4aa118 | /list_ele.py | db3665532a1696e4d0b1c27fe0e4ee5bad465db8 | [] | no_license | Icode4passion/practicepythonprogams | f8330c499c660d4e46ab54d7ed44d62625f250db | 8936166d6419df5deef290a5723a30661ea064a2 | refs/heads/master | 2020-03-19T10:02:13.992465 | 2018-06-06T14:26:27 | 2018-06-06T14:26:27 | 136,337,310 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 173 | py | #Write a Python program to display the first and last colors from the following list.
color_list = ['red','green','white','black']
for color in color_list:
print (color) | [
"[email protected]"
] | |
d95ae65b44a40731a4a2e3009dc55d90af9ea531 | 58ffe83eb9828668c13242c6f98238f08655f561 | /app/notebooks/problang/custom_mlp.py | 4ac8b927457fd104f247d84542cc19d8682ce335 | [
"Apache-2.0"
] | permissive | DanFu09/esper | f9dcc47cd5677dee8dffb1e066d69332471a0d6c | ccc5547de3637728b8aaab059b6781baebc269ec | refs/heads/master | 2020-04-04T21:31:43.549572 | 2020-01-16T01:14:13 | 2020-01-16T01:14:13 | 156,289,533 | 4 | 0 | Apache-2.0 | 2018-12-14T03:01:02 | 2018-11-05T22:05:07 | Jupyter Notebook | UTF-8 | Python | false | false | 6,720 | py | from inspect import isclass
import torch
import torch.nn as nn
from pyro.distributions.util import broadcast_shape
class Exp(nn.Module):
"""
a custom module for exponentiation of tensors
"""
def __init__(self):
super(Exp, self).__init__()
def forward(self, val):
return torch.exp(val)
class ConcatModule(nn.Module):
"""
a custom module for concatenation of tensors
"""
def __init__(self, allow_broadcast=False):
self.allow_broadcast = allow_broadcast
super(ConcatModule, self).__init__()
def forward(self, *input_args):
# we have a single object
if len(input_args) == 1:
# regardless of type,
# we don't care about single objects
# we just index into the object
input_args = input_args[0]
# don't concat things that are just single objects
if torch.is_tensor(input_args):
return input_args
else:
if self.allow_broadcast:
shape = broadcast_shape(*[s.shape[:-1] for s in input_args]) + (-1,)
input_args = [s.expand(shape) for s in input_args]
return torch.cat(input_args, dim=-1)
class ListOutModule(nn.ModuleList):
"""
a custom module for outputting a list of tensors from a list of nn modules
"""
def __init__(self, modules):
super(ListOutModule, self).__init__(modules)
def forward(self, *args, **kwargs):
# loop over modules in self, apply same args
return [mm.forward(*args, **kwargs) for mm in self]
def call_nn_op(op):
"""
a helper function that adds appropriate parameters when calling
an nn module representing an operation like Softmax
:param op: the nn.Module operation to instantiate
:return: instantiation of the op module with appropriate parameters
"""
if op in [nn.Softmax, nn.LogSoftmax]:
return op(dim=1)
else:
return op()
class MLP(nn.Module):
def __init__(self, mlp_sizes, activation=nn.ReLU, output_activation=None,
post_layer_fct=lambda layer_ix, total_layers, layer: None,
post_act_fct=lambda layer_ix, total_layers, layer: None,
allow_broadcast=False, use_cuda=False):
# init the module object
super(MLP, self).__init__()
assert len(mlp_sizes) >= 2, "Must have input and output layer sizes defined"
# get our inputs, outputs, and hidden
input_size, hidden_sizes, output_size = mlp_sizes[0], mlp_sizes[1:-1], mlp_sizes[-1]
# assume int or list
assert isinstance(input_size, (int, list, tuple)), "input_size must be int, list, tuple"
# everything in MLP will be concatted if it's multiple arguments
last_layer_size = input_size if type(input_size) == int else sum(input_size)
# everything sent in will be concatted together by default
all_modules = [ConcatModule(allow_broadcast)]
# loop over l
for layer_ix, layer_size in enumerate(hidden_sizes):
assert type(layer_size) == int, "Hidden layer sizes must be ints"
# get our nn layer module (in this case nn.Linear by default)
cur_linear_layer = nn.Linear(last_layer_size, layer_size)
# for numerical stability -- initialize the layer properly
cur_linear_layer.weight.data.normal_(0, 0.001)
cur_linear_layer.bias.data.normal_(0, 0.001)
# use GPUs to share data during training (if available)
if use_cuda:
cur_linear_layer = nn.DataParallel(cur_linear_layer)
# add our linear layer
all_modules.append(cur_linear_layer)
# handle post_linear
post_linear = post_layer_fct(layer_ix + 1, len(hidden_sizes), all_modules[-1])
# if we send something back, add it to sequential
# here we could return a batch norm for example
if post_linear is not None:
all_modules.append(post_linear)
# handle activation (assumed no params -- deal with that later)
all_modules.append(activation())
# now handle after activation
post_activation = post_act_fct(layer_ix + 1, len(hidden_sizes), all_modules[-1])
# handle post_activation if not null
# could add batch norm for example
if post_activation is not None:
all_modules.append(post_activation)
# save the layer size we just created
last_layer_size = layer_size
# now we have all of our hidden layers
# we handle outputs
assert isinstance(output_size, (int, list, tuple)), "output_size must be int, list, tuple"
if type(output_size) == int:
all_modules.append(nn.Linear(last_layer_size, output_size))
if output_activation is not None:
all_modules.append(call_nn_op(output_activation)
if isclass(output_activation) else output_activation)
else:
# we're going to have a bunch of separate layers we can spit out (a tuple of outputs)
out_layers = []
# multiple outputs? handle separately
for out_ix, out_size in enumerate(output_size):
# for a single output object, we create a linear layer and some weights
split_layer = []
# we have an activation function
split_layer.append(nn.Linear(last_layer_size, out_size))
# then we get our output activation (either we repeat all or we index into a same sized array)
act_out_fct = output_activation if not isinstance(output_activation, (list, tuple)) \
else output_activation[out_ix]
if(act_out_fct):
# we check if it's a class. if so, instantiate the object
# otherwise, use the object directly (e.g. pre-instaniated)
split_layer.append(call_nn_op(act_out_fct)
if isclass(act_out_fct) else act_out_fct)
# our outputs is just a sequential of the two
out_layers.append(nn.Sequential(*split_layer))
all_modules.append(ListOutModule(out_layers))
# now we have all of our modules, we're ready to build our sequential!
# process mlps in order, pretty standard here
self.sequential_mlp = nn.Sequential(*all_modules)
# pass through our sequential for the output!
def forward(self, *args, **kwargs):
return self.sequential_mlp.forward(*args, **kwargs)
| [
"[email protected]"
] | |
61fc96cbdbdbae2bdd17f620cf967d0f0f88ef4d | b2545b8c1a7ed01216ae090a634ddf19099f175a | /python/coding-challenges/cc-005-create-phonebook/phone_book_class.py | ffe561d02e48a6472d28362db9e9b284221663c4 | [] | no_license | kopuskopecik/my-aws-devops-projects | 50f222986150ccd20d724ccaec9ec637aaf08a3c | 4a25f8f72c262c933ada26c0ac0476f4ef68fbcf | refs/heads/master | 2023-01-10T00:59:39.718578 | 2020-11-03T14:58:48 | 2020-11-03T14:58:48 | 290,531,908 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,056 | py | class PhoneBook:
phone_book = {} # class attribute
liste = []
demet = ()
print("Sınıf niteliği")
def __init__(self):
print("init çalıştı")
self.elma = ""
self.liste2 = []
self.demet2 = ()
self.start()
def start(self):
#self.book = {}
print("""
Welcome to the phonebook application
1. Find phone number
2. Insert a phone number
3. Delete a person from the phonebook
4. Terminate
""")
entered_number = input("Select operation on Phonebook App (1/2/3) :")
if entered_number == "1":
self.find_number()
self.start()
elif entered_number == "2":
self.insert_number()
self.start()
elif entered_number == "3":
self.delete_number()
self.start()
elif entered_number == "4":
print("Exiting Phonebook")
else:
print("Please enter a valid number")
self.start()
def insert_number(self):
name = input("Insert name of the person : ")
number = input("Insert phone number of the person: ")
if name and number.isdigit():
self.phone_book[name] = number
# phone_book = { "ali": 123456}
print(self.phone_book)
else:
print("Invalid input format, cancelling operation ...")
print(self.phone_book)
def find_number(self):
name = input("Find the phone number of : ")
if name:
value = self.phone_book.get(name, f"Couldn't find phone number of {name}")
print(value)
def delete_number(self):
name = input("Whom to delete from phonebook : ")
if name:
value = self.phone_book.pop(name, "")
if value:
print(f"{name} is deleted from the phonebook")
else:
print(f"{name} is not in the phonebook")
#ahmet = PhoneBook()
| [
"[email protected]"
] | |
900fd54df91d24ab3780bcd44f4f40e790869f97 | c57439f0c98af370ace65f9d55ef5a457bedc531 | /ydk/models/infra/_meta/_Cisco_IOS_XR_infra_infra_locale_cfg.py | f4b0a7c537e95d67b1f589244f48ddddf454a625 | [
"Apache-2.0"
] | permissive | myahmao/ydk-py | c932fbd8245e554227cce0fd723d9a22887b0c40 | 2f367d93f2088d4abdc2f2bb10ca4864952b458a | refs/heads/master | 2021-01-14T11:32:29.064494 | 2016-03-15T22:44:05 | 2016-03-15T22:44:05 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,749 | py |
import re
import collections
from enum import Enum
from ydk._core._dm_meta_info import _MetaInfoClassMember, _MetaInfoClass, _MetaInfoEnum, _dm_validate_value
from ydk.types import Empty, YList, DELETE, Decimal64, FixedBitsDict
from ydk._core._dm_meta_info import ATTRIBUTE, REFERENCE_CLASS, REFERENCE_LIST, REFERENCE_LEAFLIST, REFERENCE_IDENTITY_CLASS, REFERENCE_ENUM_CLASS, REFERENCE_BITS, REFERENCE_UNION
from ydk.errors import YPYError, YPYDataValidationError
from ydk.models import _yang_ns
_meta_table = {
'LocaleCountry_Enum' : _MetaInfoEnum('LocaleCountry_Enum', 'ydk.models.infra.Cisco_IOS_XR_infra_infra_locale_cfg',
{
'ad':'AD',
'ae':'AE',
'af':'AF',
'ag':'AG',
'ai':'AI',
'al':'AL',
'am':'AM',
'an':'AN',
'ao':'AO',
'aq':'AQ',
'ar':'AR',
'as':'AS',
'at':'AT',
'au':'AU',
'aw':'AW',
'az':'AZ',
'ba':'BA',
'bb':'BB',
'bd':'BD',
'be':'BE',
'bf':'BF',
'bg':'BG',
'bh':'BH',
'bi':'BI',
'bj':'BJ',
'bm':'BM',
'bn':'BN',
'bo':'BO',
'br':'BR',
'bs':'BS',
'bt':'BT',
'bv':'BV',
'bw':'BW',
'by':'BY',
'bz':'BZ',
'ca':'CA',
'cc':'CC',
'cd':'CD',
'cf':'CF',
'cg':'CG',
'ch':'CH',
'ci':'CI',
'ck':'CK',
'cl':'CL',
'cm':'CM',
'cn':'CN',
'co':'CO',
'cr':'CR',
'cu':'CU',
'cv':'CV',
'cx':'CX',
'cy':'CY',
'cz':'CZ',
'de':'DE',
'dj':'DJ',
'dk':'DK',
'dm':'DM',
'do':'DO',
'dz':'DZ',
'ec':'EC',
'ee':'EE',
'eg':'EG',
'eh':'EH',
'er':'ER',
'es':'ES',
'et':'ET',
'fi':'FI',
'fj':'FJ',
'fk':'FK',
'fm':'FM',
'fo':'FO',
'fr':'FR',
'ga':'GA',
'gb':'GB',
'gd':'GD',
'ge':'GE',
'gf':'GF',
'gh':'GH',
'gi':'GI',
'gl':'GL',
'gm':'GM',
'gn':'GN',
'gp':'GP',
'gq':'GQ',
'gr':'GR',
'gs':'GS',
'gt':'GT',
'gu':'GU',
'gw':'GW',
'gy':'GY',
'hk':'HK',
'hm':'HM',
'hn':'HN',
'hr':'HR',
'ht':'HT',
'hu':'HU',
'id':'ID',
'ie':'IE',
'il':'IL',
'in':'IN',
'io':'IO',
'iq':'IQ',
'ir':'IR',
'is':'IS',
'it':'IT',
'jm':'JM',
'jo':'JO',
'jp':'JP',
'ke':'KE',
'kg':'KG',
'kh':'KH',
'ki':'KI',
'km':'KM',
'kn':'KN',
'kp':'KP',
'kr':'KR',
'kw':'KW',
'ky':'KY',
'kz':'KZ',
'la':'LA',
'lb':'LB',
'lc':'LC',
'li':'LI',
'lk':'LK',
'lr':'LR',
'ls':'LS',
'lt':'LT',
'lu':'LU',
'lv':'LV',
'ly':'LY',
'ma':'MA',
'mc':'MC',
'md':'MD',
'mg':'MG',
'mh':'MH',
'mk':'MK',
'ml':'ML',
'mm':'MM',
'mn':'MN',
'mo':'MO',
'mp':'MP',
'mq':'MQ',
'mr':'MR',
'ms':'MS',
'mt':'MT',
'mu':'MU',
'mv':'MV',
'mw':'MW',
'mx':'MX',
'my':'MY',
'mz':'MZ',
'na':'NA',
'nc':'NC',
'ne':'NE',
'nf':'NF',
'ng':'NG',
'ni':'NI',
'nl':'NL',
'no':'NO',
'np':'NP',
'nr':'NR',
'nu':'NU',
'nz':'NZ',
'om':'OM',
'pa':'PA',
'pe':'PE',
'pf':'PF',
'pg':'PG',
'ph':'PH',
'pk':'PK',
'pl':'PL',
'pm':'PM',
'pn':'PN',
'pr':'PR',
'pt':'PT',
'pw':'PW',
'py':'PY',
'qa':'QA',
're':'RE',
'ro':'RO',
'ru':'RU',
'rw':'RW',
'sa':'SA',
'sb':'SB',
'sc':'SC',
'sd':'SD',
'se':'SE',
'sg':'SG',
'sh':'SH',
'si':'SI',
'sj':'SJ',
'sk':'SK',
'sl':'SL',
'sm':'SM',
'sn':'SN',
'so':'SO',
'sr':'SR',
'st':'ST',
'sv':'SV',
'sy':'SY',
'sz':'SZ',
'tc':'TC',
'td':'TD',
'tf':'TF',
'tg':'TG',
'th':'TH',
'tj':'TJ',
'tk':'TK',
'tm':'TM',
'tn':'TN',
'to':'TO',
'tp':'TP',
'tr':'TR',
'tt':'TT',
'tv':'TV',
'tw':'TW',
'tz':'TZ',
'ua':'UA',
'ug':'UG',
'um':'UM',
'us':'US',
'uy':'UY',
'uz':'UZ',
'va':'VA',
'vc':'VC',
've':'VE',
'vg':'VG',
'vi':'VI',
'vn':'VN',
'vu':'VU',
'wf':'WF',
'ws':'WS',
'ye':'YE',
'yt':'YT',
'yu':'YU',
'za':'ZA',
'zm':'ZM',
'zw':'ZW',
}, 'Cisco-IOS-XR-infra-infra-locale-cfg', _yang_ns._namespaces['Cisco-IOS-XR-infra-infra-locale-cfg']),
'LocaleLanguage_Enum' : _MetaInfoEnum('LocaleLanguage_Enum', 'ydk.models.infra.Cisco_IOS_XR_infra_infra_locale_cfg',
{
'aa':'AA',
'ab':'AB',
'af':'AF',
'am':'AM',
'ar':'AR',
'as':'AS',
'ay':'AY',
'az':'AZ',
'ba':'BA',
'be':'BE',
'bg':'BG',
'bh':'BH',
'bi':'BI',
'bn':'BN',
'bo':'BO',
'br':'BR',
'ca':'CA',
'co':'CO',
'cs':'CS',
'cy':'CY',
'da':'DA',
'de':'DE',
'dz':'DZ',
'el':'EL',
'en':'EN',
'eo':'EO',
'es':'ES',
'et':'ET',
'eu':'EU',
'fa':'FA',
'fi':'FI',
'fj':'FJ',
'fo':'FO',
'fr':'FR',
'fy':'FY',
'ga':'GA',
'gd':'GD',
'gl':'GL',
'gn':'GN',
'gu':'GU',
'ha':'HA',
'he':'HE',
'hi':'HI',
'hr':'HR',
'hu':'HU',
'hy':'HY',
'ia':'IA',
'id':'ID',
'ie':'IE',
'ik':'IK',
'is':'IS',
'it':'IT',
'iu':'IU',
'ja':'JA',
'jw':'JW',
'ka':'KA',
'kk':'KK',
'kl':'KL',
'km':'KM',
'kn':'KN',
'ko':'KO',
'ks':'KS',
'ku':'KU',
'ky':'KY',
'la':'LA',
'ln':'LN',
'lo':'LO',
'lt':'LT',
'lv':'LV',
'mg':'MG',
'mi':'MI',
'mk':'MK',
'ml':'ML',
'mn':'MN',
'mo':'MO',
'mr':'MR',
'ms':'MS',
'mt':'MT',
'my':'MY',
'na':'NA',
'ne':'NE',
'nl':'NL',
'no':'NO',
'oc':'OC',
'om':'OM',
'or':'OR',
'pa':'PA',
'pl':'PL',
'ps':'PS',
'pt':'PT',
'qu':'QU',
'rm':'RM',
'rn':'RN',
'ro':'RO',
'ru':'RU',
'rw':'RW',
'sa':'SA',
'sd':'SD',
'sg':'SG',
'sh':'SH',
'si':'SI',
'sk':'SK',
'sl':'SL',
'sm':'SM',
'sn':'SN',
'so':'SO',
'sq':'SQ',
'sr':'SR',
'ss':'SS',
'st':'ST',
'su':'SU',
'sv':'SV',
'sw':'SW',
'ta':'TA',
'te':'TE',
'tg':'TG',
'th':'TH',
'ti':'TI',
'tk':'TK',
'tl':'TL',
'tn':'TN',
'to':'TO',
'tr':'TR',
'ts':'TS',
'tt':'TT',
'tw':'TW',
'ug':'UG',
'uk':'UK',
'ur':'UR',
'uz':'UZ',
'vi':'VI',
'vo':'VO',
'wo':'WO',
'xh':'XH',
'yi':'YI',
'yo':'YO',
'za':'ZA',
'zh':'ZH',
'zu':'ZU',
}, 'Cisco-IOS-XR-infra-infra-locale-cfg', _yang_ns._namespaces['Cisco-IOS-XR-infra-infra-locale-cfg']),
'Locale' : {
'meta_info' : _MetaInfoClass('Locale',
False,
[
_MetaInfoClassMember('country', REFERENCE_ENUM_CLASS, 'LocaleCountry_Enum' , 'ydk.models.infra.Cisco_IOS_XR_infra_infra_locale_cfg', 'LocaleCountry_Enum',
[], [],
''' Name of country locale
''',
'country',
'Cisco-IOS-XR-infra-infra-locale-cfg', False),
_MetaInfoClassMember('language', REFERENCE_ENUM_CLASS, 'LocaleLanguage_Enum' , 'ydk.models.infra.Cisco_IOS_XR_infra_infra_locale_cfg', 'LocaleLanguage_Enum',
[], [],
''' Name of language locale
''',
'language',
'Cisco-IOS-XR-infra-infra-locale-cfg', False),
],
'Cisco-IOS-XR-infra-infra-locale-cfg',
'locale',
_yang_ns._namespaces['Cisco-IOS-XR-infra-infra-locale-cfg'],
'ydk.models.infra.Cisco_IOS_XR_infra_infra_locale_cfg'
),
},
}
| [
"[email protected]"
] | |
c436b52eec4081f8bf526d9d4795f98aa8cf4ae2 | 132826d3f9d0a68d70ec9ba411846bbf3695140d | /scripts/sklearn_classifier.py | 53ff3015511ebcee5e6fdc9dbd968284bcb8293c | [
"MIT"
] | permissive | deep-spin/spec | c7f9a4eae08ec7d6a422b7d9f21e52980c836312 | 23db7a559e09ff7f63ede06b04cad226432b90db | refs/heads/master | 2023-03-03T22:53:49.647064 | 2021-02-18T05:01:35 | 2021-02-18T05:01:35 | 312,660,674 | 4 | 1 | null | null | null | null | UTF-8 | Python | false | false | 4,704 | py | import argparse
import numpy as np
from sklearn.feature_extraction.text import (TfidfVectorizer, CountVectorizer,
HashingVectorizer)
from sklearn.linear_model import LogisticRegression, SGDClassifier
from sklearn.metrics import accuracy_score, matthews_corrcoef, \
precision_recall_fscore_support
from spec.dataset.corpora import available_corpora
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="sklearn classifier")
parser.add_argument("--corpus",
type=str,
choices=list(available_corpora.keys()),
default='sst',
help="corpus type",
required=True)
parser.add_argument("--train-path",
type=str,
default=None,
help="path to the train corpus",
required=True)
parser.add_argument("--test-path",
type=str,
default=None,
help="path to the test corpus",
required=True)
parser.add_argument("--feature",
type=str,
default="bow",
choices=['bow', 'tfidf', 'hash'],
help="features format")
args = parser.parse_args()
seed = 42
np.random.seed(42)
print('Reading train data...')
corpus_cls = available_corpora[args.corpus]
fields_tuples = corpus_cls.create_fields_tuples()
fields_dict = dict(fields_tuples)
corpus = corpus_cls(fields_tuples, lazy=True)
examples = corpus.read(args.train_path)
x_train, y_train = [], []
for ex in examples:
y_train.extend(ex.target)
text = ' '.join(ex.words)
if args.corpus == 'snli':
text = text + ' ' + ' '.join(ex.words_hyp)
x_train.append(text)
corpus.close()
y_train = np.array(y_train)
print('Vectorizing train data...')
if args.feature == 'bow':
vectorizer = CountVectorizer(lowercase=False)
features_train = vectorizer.fit_transform(x_train)
elif args.feature == 'bow':
vectorizer = TfidfVectorizer(lowercase=False)
features_train = vectorizer.fit_transform(x_train)
else:
vectorizer = HashingVectorizer(lowercase=False, n_features=2000)
features_train = vectorizer.fit_transform(x_train)
print('Training...')
# classifier_linear = LogisticRegression(
# C=1000,
# max_iter=1000,
# solver='lbfgs',
# multi_class='multinomial',
# penalty='l2',
# random_state=seed,
# n_jobs=2
# )
classifier_linear = SGDClassifier(
max_iter=50,
alpha=0.00001, # 0.0001
eta0=0.001, # not used for learning_rate=`optimal`
learning_rate='constant',
loss='hinge',
penalty='l2',
shuffle=True,
random_state=seed,
n_jobs=8,
verbose=1
)
classifier_linear.fit(features_train, y_train)
print('Reading test data...')
corpus = corpus_cls(fields_tuples, lazy=True)
examples = corpus.read(args.test_path)
x_test, y_test = [], []
for ex in examples:
y_test.extend(ex.target)
text = ' '.join(ex.words)
if args.corpus == 'snli':
text = text + ' ' + ' '.join(ex.words_hyp)
x_test.append(text)
corpus.close()
y_test = np.array(y_test)
print('Vectorizing test data...')
features_test = vectorizer.transform(x_test)
print('Predicting...')
y_train_pred = classifier_linear.predict(features_train)
y_test_pred = classifier_linear.predict(features_test)
print('Train')
print('-----')
acc = accuracy_score(y_train, y_train_pred)
mcc = matthews_corrcoef(y_train, y_train_pred)
prec, rec, f1, _ = precision_recall_fscore_support(y_train, y_train_pred,
average='macro')
print('Acc: {:.4f}'.format(acc))
print('Prec: {:.4f}'.format(prec))
print('Rec: {:.4f}'.format(rec))
print('F1: {:.4f}'.format(f1))
print('MCC: {:.4f}'.format(mcc))
print('Test')
print('-----')
acc = accuracy_score(y_test, y_test_pred)
mcc = matthews_corrcoef(y_test, y_test_pred)
prec, rec, f1, _ = precision_recall_fscore_support(y_test, y_test_pred,
average='macro')
print('Acc: {:.4f}'.format(acc))
print('Prec: {:.4f}'.format(prec))
print('Rec: {:.4f}'.format(rec))
print('F1: {:.4f}'.format(f1))
print('MCC: {:.4f}'.format(mcc))
| [
"[email protected]"
] | |
9db96abccd5b62b41daa94a3a729d2e868c9c8b7 | 3efca607aefbd6cf558517bae689ccdacb7b383e | /contrib/devtools/symbol-check.py | 5fdf31de16afe2e2d14e6880f9d5cb158835dd4b | [
"MIT",
"LicenseRef-scancode-public-domain"
] | permissive | MicroBitcoinOrg/MicroBitcoin | f761b2ff04bdcb650d7c0ddbef431ef95cd69541 | 0119e8eff44ec4d94313eaa30022a97692b71143 | refs/heads/snapshot | 2022-12-27T10:04:21.040945 | 2021-02-09T05:51:45 | 2021-02-09T05:51:45 | 132,959,214 | 21 | 33 | MIT | 2020-06-12T04:38:45 | 2018-05-10T22:07:51 | C++ | UTF-8 | Python | false | false | 6,136 | py | #!/usr/bin/env python3
# Copyright (c) 2014 Wladimir J. van der Laan
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
'''
A script to check that the (Linux) executables produced by gitian only contain
allowed gcc, glibc and libstdc++ version symbols. This makes sure they are
still compatible with the minimum supported Linux distribution versions.
Example usage:
find ../gitian-builder/build -type f -executable | xargs python contrib/devtools/symbol-check.py
'''
import subprocess
import re
import sys
import os
# Debian 6.0.9 (Squeeze) has:
#
# - g++ version 4.4.5 (https://packages.debian.org/search?suite=default§ion=all&arch=any&searchon=names&keywords=g%2B%2B)
# - libc version 2.11.3 (https://packages.debian.org/search?suite=default§ion=all&arch=any&searchon=names&keywords=libc6)
# - libstdc++ version 4.4.5 (https://packages.debian.org/search?suite=default§ion=all&arch=any&searchon=names&keywords=libstdc%2B%2B6)
#
# Ubuntu 10.04.4 (Lucid Lynx) has:
#
# - g++ version 4.4.3 (http://packages.ubuntu.com/search?keywords=g%2B%2B&searchon=names&suite=lucid§ion=all)
# - libc version 2.11.1 (http://packages.ubuntu.com/search?keywords=libc6&searchon=names&suite=lucid§ion=all)
# - libstdc++ version 4.4.3 (http://packages.ubuntu.com/search?suite=lucid§ion=all&arch=any&keywords=libstdc%2B%2B&searchon=names)
#
# Taking the minimum of these as our target.
#
# According to GNU ABI document (http://gcc.gnu.org/onlinedocs/libstdc++/manual/abi.html) this corresponds to:
# GCC 4.4.0: GCC_4.4.0
# GCC 4.4.2: GLIBCXX_3.4.13, CXXABI_1.3.3
# (glibc) GLIBC_2_11
#
MAX_VERSIONS = {
'GCC': (4,4,0),
'CXXABI': (1,3,3),
'GLIBCXX': (3,4,13),
'GLIBC': (2,11)
}
# See here for a description of _IO_stdin_used:
# https://bugs.debian.org/cgi-bin/bugreport.cgi?bug=634261#109
# Ignore symbols that are exported as part of every executable
IGNORE_EXPORTS = {
'_edata', '_end', '_init', '__bss_start', '_fini', '_IO_stdin_used', 'stdin', 'stdout', 'stderr'
}
READELF_CMD = os.getenv('READELF', '/usr/bin/readelf')
CPPFILT_CMD = os.getenv('CPPFILT', '/usr/bin/c++filt')
# Allowed NEEDED libraries
ALLOWED_LIBRARIES = {
# microd and micro-qt
'libgcc_s.so.1', # GCC base support
'libc.so.6', # C library
'libpthread.so.0', # threading
'libanl.so.1', # DNS resolve
'libm.so.6', # math library
'librt.so.1', # real-time (clock)
'ld-linux-x86-64.so.2', # 64-bit dynamic linker
'ld-linux.so.2', # 32-bit dynamic linker
# micro-qt only
'libX11-xcb.so.1', # part of X11
'libX11.so.6', # part of X11
'libxcb.so.1', # part of X11
'libfontconfig.so.1', # font support
'libfreetype.so.6', # font parsing
'libdl.so.2' # programming interface to dynamic linker
}
class CPPFilt(object):
'''
Demangle C++ symbol names.
Use a pipe to the 'c++filt' command.
'''
def __init__(self):
self.proc = subprocess.Popen(CPPFILT_CMD, stdin=subprocess.PIPE, stdout=subprocess.PIPE, universal_newlines=True)
def __call__(self, mangled):
self.proc.stdin.write(mangled + '\n')
self.proc.stdin.flush()
return self.proc.stdout.readline().rstrip()
def close(self):
self.proc.stdin.close()
self.proc.stdout.close()
self.proc.wait()
def read_symbols(executable, imports=True):
'''
Parse an ELF executable and return a list of (symbol,version) tuples
for dynamic, imported symbols.
'''
p = subprocess.Popen([READELF_CMD, '--dyn-syms', '-W', executable], stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE, universal_newlines=True)
(stdout, stderr) = p.communicate()
if p.returncode:
raise IOError('Could not read symbols for %s: %s' % (executable, stderr.strip()))
syms = []
for line in stdout.splitlines():
line = line.split()
if len(line)>7 and re.match('[0-9]+:$', line[0]):
(sym, _, version) = line[7].partition('@')
is_import = line[6] == 'UND'
if version.startswith('@'):
version = version[1:]
if is_import == imports:
syms.append((sym, version))
return syms
def check_version(max_versions, version):
if '_' in version:
(lib, _, ver) = version.rpartition('_')
else:
lib = version
ver = '0'
ver = tuple([int(x) for x in ver.split('.')])
if not lib in max_versions:
return False
return ver <= max_versions[lib]
def read_libraries(filename):
p = subprocess.Popen([READELF_CMD, '-d', '-W', filename], stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE, universal_newlines=True)
(stdout, stderr) = p.communicate()
if p.returncode:
raise IOError('Error opening file')
libraries = []
for line in stdout.splitlines():
tokens = line.split()
if len(tokens)>2 and tokens[1] == '(NEEDED)':
match = re.match('^Shared library: \[(.*)\]$', ' '.join(tokens[2:]))
if match:
libraries.append(match.group(1))
else:
raise ValueError('Unparseable (NEEDED) specification')
return libraries
if __name__ == '__main__':
cppfilt = CPPFilt()
retval = 0
for filename in sys.argv[1:]:
# Check imported symbols
for sym,version in read_symbols(filename, True):
if version and not check_version(MAX_VERSIONS, version):
print('%s: symbol %s from unsupported version %s' % (filename, cppfilt(sym), version))
retval = 1
# Check exported symbols
for sym,version in read_symbols(filename, False):
if sym in IGNORE_EXPORTS:
continue
print('%s: export of symbol %s not allowed' % (filename, cppfilt(sym)))
retval = 1
# Check dependency libraries
for library_name in read_libraries(filename):
if library_name not in ALLOWED_LIBRARIES:
print('%s: NEEDED library %s is not allowed' % (filename, library_name))
retval = 1
sys.exit(retval)
| [
"[email protected]"
] | |
c09a0cfac0b3372a8ca32d91799975aac6a12330 | 2b0f4f3590f5407da83d179db8103803f7c75e8f | /app/migrations/0001_initial.py | 772108d2acc8d7505d61bc9d919cf0a57b15c355 | [] | no_license | caiomarinhodev/ciacimento | 6c783f169ac912ed599bcfaa6a208d5be5c7942e | cf7a6951196bc36655fe0b303e3131932ec254cf | refs/heads/master | 2023-07-07T00:25:16.101307 | 2023-02-28T00:46:12 | 2023-02-28T00:46:12 | 117,120,811 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,211 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2018-01-16 10:38
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Categoria',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True)),
('published_at', models.DateTimeField(auto_now=True)),
('nome', models.CharField(max_length=100)),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='Cliente',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True)),
('published_at', models.DateTimeField(auto_now=True)),
('bairro', models.CharField(blank=True, max_length=200, null=True, verbose_name='Bairro')),
('cidade', models.CharField(blank=True, max_length=100, null=True, verbose_name='Cidade')),
('endereco', models.CharField(blank=True, max_length=200, null=True, verbose_name='Endere\xe7o')),
('numero', models.CharField(blank=True, max_length=5, null=True, verbose_name='N\xfamero')),
('complemento', models.CharField(blank=True, max_length=300, null=True, verbose_name='Ponto de Refer\xeancia')),
('lat', models.CharField(blank=True, max_length=100, null=True)),
('lng', models.CharField(blank=True, max_length=100, null=True)),
('phone', models.CharField(blank=True, max_length=30, null=True, verbose_name='Telefone')),
('full_address', models.CharField(blank=True, max_length=200, null=True)),
('user', models.OneToOneField(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='Foto',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True)),
('published_at', models.DateTimeField(auto_now=True)),
('url', models.URLField(blank=True, null=True)),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='Item',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True)),
('published_at', models.DateTimeField(auto_now=True)),
('quantidade', models.CharField(max_length=100)),
('valor_item', models.CharField(blank=True, max_length=100, null=True)),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='Marca',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True)),
('published_at', models.DateTimeField(auto_now=True)),
('nome', models.CharField(max_length=100)),
('site', models.URLField(blank=True, null=True)),
('descricao', models.TextField(blank=True, null=True)),
('foto', models.URLField(blank=True, null=True)),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='Message',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(blank=True, max_length=100, null=True)),
('email', models.EmailField(blank=True, max_length=254, null=True)),
('message', models.TextField(blank=True, null=True)),
('created_at', models.DateTimeField(auto_now_add=True)),
],
options={
'verbose_name': 'Mensagem',
'verbose_name_plural': 'Mensagens',
},
),
migrations.CreateModel(
name='Notification',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True)),
('published_at', models.DateTimeField(auto_now=True)),
('message', models.TextField()),
('type_message', models.CharField(choices=[('NOVO_PEDIDO_LOJA', 'NOVO_PEDIDO_LOJA'), ('NOVO_PEDIDO_VENDEDOR', 'NOVO_PEDIDO_VENDEDOR')], max_length=100)),
('is_read', models.BooleanField(default=False)),
('to', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='Pedido',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True)),
('published_at', models.DateTimeField(auto_now=True)),
('valor_total', models.CharField(blank=True, max_length=100, null=True)),
('valor_entrega', models.CharField(blank=True, max_length=100, null=True)),
('is_read', models.BooleanField(default=False)),
('is_completed', models.BooleanField(default=False)),
('cliente', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='app.Cliente')),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='Produto',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True)),
('published_at', models.DateTimeField(auto_now=True)),
('cod', models.CharField(max_length=50)),
('nome', models.CharField(max_length=100)),
('cor', models.CharField(blank=True, max_length=100, null=True)),
('peso', models.CharField(blank=True, max_length=50, null=True)),
('valor', models.CharField(blank=True, max_length=10, null=True)),
('is_active', models.BooleanField(default=True)),
('is_oferta', models.BooleanField(default=False)),
('descricao', models.TextField(blank=True, null=True)),
('instrucoes', models.TextField(blank=True, null=True)),
('tipo_embalagem', models.CharField(blank=True, max_length=100, null=True)),
('categoria', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='app.Categoria')),
('marca', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='app.Marca')),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='Tipo',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True)),
('published_at', models.DateTimeField(auto_now=True)),
('nome', models.CharField(max_length=100)),
('categoria', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='app.Categoria')),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='Vendedor',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True)),
('published_at', models.DateTimeField(auto_now=True)),
('bairro', models.CharField(blank=True, max_length=200, null=True, verbose_name='Bairro')),
('cidade', models.CharField(blank=True, max_length=100, null=True, verbose_name='Cidade')),
('endereco', models.CharField(blank=True, max_length=200, null=True, verbose_name='Endere\xe7o')),
('numero', models.CharField(blank=True, max_length=5, null=True, verbose_name='N\xfamero')),
('complemento', models.CharField(blank=True, max_length=300, null=True, verbose_name='Ponto de Refer\xeancia')),
('lat', models.CharField(blank=True, max_length=100, null=True)),
('lng', models.CharField(blank=True, max_length=100, null=True)),
('phone', models.CharField(blank=True, max_length=30, null=True, verbose_name='Telefone')),
('full_address', models.CharField(blank=True, max_length=200, null=True)),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
options={
'abstract': False,
},
),
migrations.AddField(
model_name='produto',
name='tipo',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='app.Tipo'),
),
migrations.AddField(
model_name='pedido',
name='vendedor',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='app.Vendedor'),
),
migrations.AddField(
model_name='item',
name='pedido',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='app.Pedido'),
),
migrations.AddField(
model_name='item',
name='produto',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='app.Produto'),
),
migrations.AddField(
model_name='foto',
name='produto',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='app.Produto'),
),
]
| [
"[email protected]"
] | |
947a32e74b228a2939732969ff1b57e7dc5e68cc | 364edc98a05ddecf5ad7b7614d2a35a95d19705b | /os_bitcoin/Bithumb_20170412_RESTFulAPI-python3/xcoin_api_client.py | bccc8fe928a3987f5073f8daa14df026d9ad127e | [] | no_license | as950118/outsource | f7f10b5ba62487da8ccddd894aaedc8af48e9d50 | 05a9f654aa222f4da4ce9c4902dde094c9d158d0 | refs/heads/master | 2022-12-21T00:18:45.405708 | 2020-02-03T15:53:16 | 2020-02-03T15:53:16 | 193,331,277 | 0 | 0 | null | 2022-12-06T22:38:00 | 2019-06-23T09:50:33 | HTML | UTF-8 | Python | false | false | 2,400 | py | #
# XCoin API-call related functions
#
# @author btckorea
# @date 2017-04-12
#
# Compatible with python3 version.
import sys
import time
import math
import base64
import hmac, hashlib
import urllib.parse
import pycurl
import json
import certifi
class XCoinAPI:
api_url = "https://api.bithumb.com";
api_key = "b023e33a524e648f08431825769db6d9";
api_secret = "88ea704e6800aa9e77e2edb8385a32a7";
def __init__(self, api_key, api_secret):
self.api_key = api_key;
self.api_secret = api_secret;
def body_callback(self, buf):
self.contents = buf;
def microtime(self, get_as_float = False):
if get_as_float:
return time.time()
else:
return '%f %d' % math.modf(time.time())
def usecTime(self) :
mt = self.microtime(False)
mt_array = mt.split(" ")[:2];
return mt_array[1] + mt_array[0][2:5];
def xcoinApiCall(self, endpoint, rgParams):
# 1. Api-Sign and Api-Nonce information generation.
# 2. Request related information from the Bithumb API server.
#
# - nonce: it is an arbitrary number that may only be used once.
# - api_sign: API signature information created in various combinations values.
endpoint_item_array = {
"endpoint" : endpoint
};
uri_array = dict(endpoint_item_array, **rgParams); # Concatenate the two arrays.
str_data = urllib.parse.urlencode(uri_array);
nonce = self.usecTime();
data = endpoint + chr(0) + str_data + chr(0) + nonce;
utf8_data = data.encode('utf-8');
key = self.api_secret;
utf8_key = key.encode('utf-8');
h = hmac.new(bytes(utf8_key), utf8_data, hashlib.sha512);
hex_output = h.hexdigest();
utf8_hex_output = hex_output.encode('utf-8');
api_sign = base64.b64encode(utf8_hex_output);
utf8_api_sign = api_sign.decode('utf-8');
curl_handle = pycurl.Curl();
curl_handle.setopt(pycurl.POST, 1);
#curl_handle.setopt(pycurl.VERBOSE, 1); # vervose mode :: 1 => True, 0 => False
curl_handle.setopt(pycurl.POSTFIELDS, str_data);
url = self.api_url + endpoint;
curl_handle.setopt(curl_handle.URL, url);
curl_handle.setopt(curl_handle.HTTPHEADER, ['Api-Key: ' + self.api_key, 'Api-Sign: ' + utf8_api_sign, 'Api-Nonce: ' + nonce]);
curl_handle.setopt(curl_handle.WRITEFUNCTION, self.body_callback);
curl_handle.perform();
#response_code = curl_handle.getinfo(pycurl.RESPONSE_CODE); # Get http response status code.
curl_handle.close();
return (json.loads(self.contents));
| [
"[email protected]"
] | |
e250fcd488a4c2615d7fe6db55c69dfc2945a987 | f9d564f1aa83eca45872dab7fbaa26dd48210d08 | /huaweicloud-sdk-dws/huaweicloudsdkdws/v2/model/switch_over_cluster_request.py | e198efd2d141632f65d73c68f05ebd92d2babbf9 | [
"Apache-2.0"
] | permissive | huaweicloud/huaweicloud-sdk-python-v3 | cde6d849ce5b1de05ac5ebfd6153f27803837d84 | f69344c1dadb79067746ddf9bfde4bddc18d5ecf | refs/heads/master | 2023-09-01T19:29:43.013318 | 2023-08-31T08:28:59 | 2023-08-31T08:28:59 | 262,207,814 | 103 | 44 | NOASSERTION | 2023-06-22T14:50:48 | 2020-05-08T02:28:43 | Python | UTF-8 | Python | false | false | 3,122 | py | # coding: utf-8
import six
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class SwitchOverClusterRequest:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'cluster_id': 'str'
}
attribute_map = {
'cluster_id': 'cluster_id'
}
def __init__(self, cluster_id=None):
"""SwitchOverClusterRequest
The model defined in huaweicloud sdk
:param cluster_id: 集群的ID。
:type cluster_id: str
"""
self._cluster_id = None
self.discriminator = None
self.cluster_id = cluster_id
@property
def cluster_id(self):
"""Gets the cluster_id of this SwitchOverClusterRequest.
集群的ID。
:return: The cluster_id of this SwitchOverClusterRequest.
:rtype: str
"""
return self._cluster_id
@cluster_id.setter
def cluster_id(self, cluster_id):
"""Sets the cluster_id of this SwitchOverClusterRequest.
集群的ID。
:param cluster_id: The cluster_id of this SwitchOverClusterRequest.
:type cluster_id: str
"""
self._cluster_id = cluster_id
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, SwitchOverClusterRequest):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"[email protected]"
] | |
9bace68bb00dea446e490b5150b8ea921d0e8499 | 94a4388cee6dfeaa1674fba20423e8a3f8f6dd42 | /backend/users/migrations/0003_user_group.py | 33f315c63bb0addf839fdc3d8a692489f8be205a | [] | no_license | crowdbotics-apps/game-mdoe-18873 | 0dc5c1e1827f382d5a84847697d0b1b05191066d | 8bcbe6c9b116fa1294b8104018c9cd36b1536c13 | refs/heads/master | 2022-11-15T17:29:36.126851 | 2020-07-13T22:23:28 | 2020-07-13T22:23:28 | 279,423,144 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 465 | py | # Generated by Django 2.2.14 on 2020-07-13 22:23
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('course', '0001_initial'),
('users', '0002_auto_20200713_2221'),
]
operations = [
migrations.AddField(
model_name='user',
name='group',
field=models.ManyToManyField(blank=True, related_name='user_group', to='course.Group'),
),
]
| [
"[email protected]"
] | |
3381f548bbfad75fbeb08e50c0769df83dd078b6 | a38b27d0eac787ca453c0ef06f1a819305b2f648 | /varappx/main/view_tools/main_views_tools.py | 6368bb5d40f33d9ebd93088403443fe118f06682 | [
"MIT"
] | permissive | 444thLiao/VarappX-flask | 9417b84167c31276c9342b23ab56cbcc4b71fa1f | 9a59f4eb5897a1ecf90978e9f357954cdd7d410a | refs/heads/master | 2021-01-19T05:00:05.575501 | 2017-05-16T07:04:59 | 2017-05-16T07:04:59 | 87,406,853 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,465 | py | # import logging
# import re
# from varapp.samples.samples_factory import *
#
#
# def samples_selection_from_request(request, db, from_ped=True):
# """Parse a GET request to make the samples groups and return a SamplesSelection.
# :param from_ped: read groups info based on 'phenotype' attribute in the Samples table.
# """
# groups = {}
# sample_requests = request.GET.getlist('samples',[])
# samples = samples_selection_factory(db).sort('sample_id') # a SamplesCollection
# if not sample_requests:
# if from_ped:
# groups = fetch_ped_info_groups(samples)
# else:
# groups = {}
# return SamplesSelection(samples, groups, db=db)
# elif all(x == '' for x in sample_requests):
# return SamplesSelection(samples, {}, db=db)
# else:
# for sr in sample_requests:
# m = re.match(r"(\S+?)=(\S+)", sr)
# if not m:
# raise ValueError("Wrong samples request (expected '<group>=<samples list>', got '{}').".format(sr))
# gname,snames = m.groups()
# snames = snames.split(',')
# group = samples.get_list(snames)
# if len(group) != len(snames):
# raise ValueError("Unknown samples: {}".format(
# set(snames) - set([s.name for s in group])))
# groups[gname] = [s.name for s in group]
# return SamplesSelection(samples, groups, db=db)
| [
"[email protected]"
] | |
ae7f12952f1b16f9a54b0c881fa92120c72709c2 | 6fa7f99d3d3d9b177ef01ebf9a9da4982813b7d4 | /FJk4mJwRk2TYZhkeQ_16.py | 9e35fdec6941c741373d0760b5d1ed5dc67a750f | [] | no_license | daniel-reich/ubiquitous-fiesta | 26e80f0082f8589e51d359ce7953117a3da7d38c | 9af2700dbe59284f5697e612491499841a6c126f | refs/heads/master | 2023-04-05T06:40:37.328213 | 2021-04-06T20:17:44 | 2021-04-06T20:17:44 | 355,318,759 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 89 | py |
def accum(txt):
return '-'.join([(n * ch).title() for n, ch in enumerate(txt, 1)])
| [
"[email protected]"
] | |
b675b567f1ca8b19f71f13a481b2c1f48fed4a7a | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02991/s309121406.py | e6af09084c5db2471cde62c6c8b5d88d68d8a2e5 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 627 | py | import sys
input = sys.stdin.readline
N,M = map(int,input().split())
UV = [tuple(map(int,input().split())) for i in range(M)]
S,T = map(int,input().split())
S,T = S-1,T-1
es = [[] for _ in range(N*3)]
for u,v in UV:
u,v = u-1,v-1
es[u].append(v+N)
es[u+N].append(v+N+N)
es[u+N+N].append(v)
from collections import deque
q = deque([S])
INF = float('inf')
dist = [INF] * (N*3)
dist[S] = 0
while q:
v = q.popleft()
for to in es[v]:
if dist[to] <= dist[v]+1: continue
dist[to] = dist[v]+1
q.append(to)
if to==T:
print(dist[to] // 3)
exit()
print(-1) | [
"[email protected]"
] | |
deb97f79158dad95e29030e083d12abf5eb6d1d4 | 1eaf69357dfca66e4dc6651da2b93db8665164f9 | /2022/06/test_code.py | 11832d889e5e26ee9deaddc92f34934c444b4cd3 | [
"MIT"
] | permissive | Akumatic/Advent-of-Code | deb89b9a5302999ffb344766bb3f1b0dd5272445 | 5377d8d653092246a7a35c7fa2a3e22cc74ebb0b | refs/heads/master | 2022-12-21T20:32:05.978675 | 2022-12-16T14:41:23 | 2022-12-16T14:41:23 | 221,700,755 | 24 | 13 | null | null | null | null | UTF-8 | Python | false | false | 638 | py | # SPDX-License-Identifier: MIT
# Copyright (c) 2022 Akumatic
from code import part1, part2
def test():
inputs = ["mjqjpqmgbljsphdztnvjfqwrcgsmlb", "bvwbjplbgvbhsrlpgdmjqwftvncz",
"nppdvjthqldpwncqszvftbrmjlhg", "nznrnfrfntjfmvfwmzdfjlvtqnbhcprsg", "zcfzfwzzqfrljwzlrfnpqdbhtmscgvjw"]
expected_values = [(7, 19), (5, 23), (6, 23), (10, 29), (11, 26)]
for i in range(len(inputs)):
assert part1(inputs[i]) == expected_values[i][0]
print("Passed Part 1")
for i in range(len(inputs)):
assert part2(inputs[i]) == expected_values[i][1]
print("Passed Part 2")
if __name__ == "__main__":
test()
| [
"[email protected]"
] | |
0ee4b1f629f10089a33bd6119d6964d0b041ed3d | 83de24182a7af33c43ee340b57755e73275149ae | /aliyun-python-sdk-alidns/aliyunsdkalidns/request/v20150109/DescribeDohSubDomainStatisticsSummaryRequest.py | f21d8c0c0b01b28d6156cd3ef3b28b308f708744 | [
"Apache-2.0"
] | permissive | aliyun/aliyun-openapi-python-sdk | 4436ca6c57190ceadbc80f0b1c35b1ab13c00c7f | 83fd547946fd6772cf26f338d9653f4316c81d3c | refs/heads/master | 2023-08-04T12:32:57.028821 | 2023-08-04T06:00:29 | 2023-08-04T06:00:29 | 39,558,861 | 1,080 | 721 | NOASSERTION | 2023-09-14T08:51:06 | 2015-07-23T09:39:45 | Python | UTF-8 | Python | false | false | 2,588 | py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkalidns.endpoint import endpoint_data
class DescribeDohSubDomainStatisticsSummaryRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Alidns', '2015-01-09', 'DescribeDohSubDomainStatisticsSummary','alidns')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_DomainName(self): # String
return self.get_query_params().get('DomainName')
def set_DomainName(self, DomainName): # String
self.add_query_param('DomainName', DomainName)
def get_StartDate(self): # String
return self.get_query_params().get('StartDate')
def set_StartDate(self, StartDate): # String
self.add_query_param('StartDate', StartDate)
def get_PageNumber(self): # Integer
return self.get_query_params().get('PageNumber')
def set_PageNumber(self, PageNumber): # Integer
self.add_query_param('PageNumber', PageNumber)
def get_EndDate(self): # String
return self.get_query_params().get('EndDate')
def set_EndDate(self, EndDate): # String
self.add_query_param('EndDate', EndDate)
def get_PageSize(self): # Integer
return self.get_query_params().get('PageSize')
def set_PageSize(self, PageSize): # Integer
self.add_query_param('PageSize', PageSize)
def get_SubDomain(self): # String
return self.get_query_params().get('SubDomain')
def set_SubDomain(self, SubDomain): # String
self.add_query_param('SubDomain', SubDomain)
def get_Lang(self): # String
return self.get_query_params().get('Lang')
def set_Lang(self, Lang): # String
self.add_query_param('Lang', Lang)
| [
"[email protected]"
] | |
8384c58d74daedb0e579241902fe89914974b152 | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/nouns/_shibboleths.py | 479b492df20a757071eeea8dcfd92915bc99a468 | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 266 | py |
from xai.brain.wordbase.nouns._shibboleth import _SHIBBOLETH
#calss header
class _SHIBBOLETHS(_SHIBBOLETH, ):
def __init__(self,):
_SHIBBOLETH.__init__(self)
self.name = "SHIBBOLETHS"
self.specie = 'nouns'
self.basic = "shibboleth"
self.jsondata = {}
| [
"[email protected]"
] | |
a0a636dd1aaf47c3b8b20d61ef35980706c7cc74 | 39e03684081b27311385a0ab31afcc2e09883e5c | /tools/train_saliency.py | 99a78842aca4431f7cdfd83729401f8bcf65db99 | [
"MIT",
"Python-2.0"
] | permissive | witnessai/MMSceneGraph | 8d0b2011a946ddcced95fbe15445b7f4da818509 | bc5e0f3385205404c712ae9f702a61a3191da0a1 | refs/heads/master | 2023-08-12T06:54:00.551237 | 2021-10-12T03:04:21 | 2021-10-12T03:04:21 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,333 | py | from __future__ import division
import argparse
import copy
import os
import os.path as osp
import time
import mmcv
import torch
from mmcv import Config
from mmcv.runner import init_dist
from mmdet import __version__
from mmdet.apis import set_random_seed, train_detector
from mmdet.datasets import build_dataset
from mmdet.models import build_saliency_detector
from mmdet.utils import collect_env, get_root_logger
import yaml
def parse_args():
parser = argparse.ArgumentParser(description='Train a detector')
parser.add_argument('config', help='train config file path')
parser.add_argument('--work_dir', help='the dir to save logs and models')
parser.add_argument(
'--resume_from', help='the checkpoint file to resume from')
# this is not allowed!
parser.add_argument(
'--validate',
action='store_true',
help='whether to evaluate the checkpoint during training')
group_gpus = parser.add_mutually_exclusive_group()
group_gpus.add_argument(
'--gpus',
type=int,
help='number of gpus to use '
'(only applicable to non-distributed training)')
group_gpus.add_argument(
'--gpu_ids',
type=int,
nargs='+',
help='ids of gpus to use '
'(only applicable to non-distributed training)')
parser.add_argument('--seed', type=int, default=None, help='random seed')
parser.add_argument(
'--deterministic',
action='store_true',
help='whether to set deterministic options for CUDNN backend.')
parser.add_argument(
'--launcher',
choices=['none', 'pytorch', 'slurm', 'mpi'],
default='none',
help='job launcher')
parser.add_argument('--local_rank', type=int, default=0)
parser.add_argument(
'--autoscale-lr',
action='store_true',
help='automatically scale lr with the number of gpus')
args = parser.parse_args()
if 'LOCAL_RANK' not in os.environ:
os.environ['LOCAL_RANK'] = str(args.local_rank)
return args
def main():
args = parse_args()
cfg = Config.fromfile(args.config)
# set cudnn_benchmark
if cfg.get('cudnn_benchmark', False):
torch.backends.cudnn.benchmark = True
# update configs according to CLI args
if args.work_dir is not None:
cfg.work_dir = args.work_dir
if args.resume_from is not None:
cfg.resume_from = args.resume_from
if args.gpu_ids is not None:
cfg.gpu_ids = args.gpu_ids
else:
cfg.gpu_ids = list(range(1)) if args.gpus is None else list(range(args.gpus))
if args.autoscale_lr:
# apply the linear scaling rule (https://arxiv.org/abs/1706.02677)
# For OD: bs=16 -- lr=0.001
cfg.optimizer['lr'] = cfg.optimizer['lr'] * len(cfg.gpu_ids) * cfg.data.imgs_per_gpu / 16
# init distributed env first, since logger depends on the dist info.
if args.launcher == 'none':
distributed = False
else:
distributed = True
init_dist(args.launcher, **cfg.dist_params)
# create work_dir
mmcv.mkdir_or_exist(osp.abspath(cfg.work_dir))
# init the logger before other steps
timestamp = time.strftime('%Y%m%d_%H%M%S', time.localtime())
log_file = osp.join(cfg.work_dir, '{}.log'.format(timestamp))
logger = get_root_logger(log_file=log_file, log_level=cfg.log_level)
# init the meta dict to record some important information such as
# environment info and seed, which will be logged
meta = dict()
# log env info
env_info_dict = collect_env()
env_info = '\n'.join([('{}: {}'.format(k, v))
for k, v in env_info_dict.items()])
dash_line = '-' * 60 + '\n'
logger.info('Environment info:\n' + dash_line + env_info + '\n' +
dash_line)
meta['env_info'] = env_info
# log some basic info
logger.info('Distributed training: {}'.format(distributed))
logger.info('Config:\n{}'.format(cfg.text))
# set random seeds
if args.seed is not None:
logger.info('Set random seed to {}, deterministic: {}'.format(
args.seed, args.deterministic))
set_random_seed(args.seed, deterministic=args.deterministic)
cfg.seed = args.seed
meta['seed'] = args.seed
model = build_saliency_detector(cfg.model)
datasets = [build_dataset(cfg.data.train)]
if len(cfg.workflow) == 2:
val_dataset = copy.deepcopy(cfg.data.val)
val_dataset.pipeline = cfg.data.train.pipeline
datasets.append(build_dataset(val_dataset))
if cfg.checkpoint_config is not None:
# save mmdet version, config file content and class names in
# checkpoints as meta data
cfg.checkpoint_config.meta = dict(
mmdet_version=__version__,
config=cfg.text)
mmcv.dump(yaml.safe_load(cfg.dump()), osp.join(cfg.work_dir, 'cfg.yaml'))
# you can reuse the detector training script.
train_detector(
model,
datasets,
cfg,
distributed=distributed,
validate=args.validate,
timestamp=timestamp,
meta=meta)
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
eb6e327944cbc63a28f6744e79a24841cf289c7b | a7ded5d3d19a98e61a44189cffe3703f7938e0db | /xero_python/payrolluk/models/leave_type_object.py | a3951dbe79d8171aed1d4532a1909c00f97b9355 | [
"MIT"
] | permissive | liseekeralbert/xero-python | dfd1076344f763d74f81f701e32600cf88bcc7b2 | d27ab1894ecd84d2a9af0ca91583593756b21ab3 | refs/heads/master | 2022-12-16T07:41:14.331308 | 2020-09-18T17:12:35 | 2020-09-18T17:12:35 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,074 | py | # coding: utf-8
"""
Xero Payroll UK
This is the Xero Payroll API for orgs in the UK region. # noqa: E501
OpenAPI spec version: 2.3.0
Contact: [email protected]
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
from xero_python.models import BaseModel
class LeaveTypeObject(BaseModel):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
"pagination": "Pagination",
"problem": "Problem",
"leave_type": "LeaveType",
}
attribute_map = {
"pagination": "pagination",
"problem": "problem",
"leave_type": "leaveType",
}
def __init__(self, pagination=None, problem=None, leave_type=None): # noqa: E501
"""LeaveTypeObject - a model defined in OpenAPI""" # noqa: E501
self._pagination = None
self._problem = None
self._leave_type = None
self.discriminator = None
if pagination is not None:
self.pagination = pagination
if problem is not None:
self.problem = problem
if leave_type is not None:
self.leave_type = leave_type
@property
def pagination(self):
"""Gets the pagination of this LeaveTypeObject. # noqa: E501
:return: The pagination of this LeaveTypeObject. # noqa: E501
:rtype: Pagination
"""
return self._pagination
@pagination.setter
def pagination(self, pagination):
"""Sets the pagination of this LeaveTypeObject.
:param pagination: The pagination of this LeaveTypeObject. # noqa: E501
:type: Pagination
"""
self._pagination = pagination
@property
def problem(self):
"""Gets the problem of this LeaveTypeObject. # noqa: E501
:return: The problem of this LeaveTypeObject. # noqa: E501
:rtype: Problem
"""
return self._problem
@problem.setter
def problem(self, problem):
"""Sets the problem of this LeaveTypeObject.
:param problem: The problem of this LeaveTypeObject. # noqa: E501
:type: Problem
"""
self._problem = problem
@property
def leave_type(self):
"""Gets the leave_type of this LeaveTypeObject. # noqa: E501
:return: The leave_type of this LeaveTypeObject. # noqa: E501
:rtype: LeaveType
"""
return self._leave_type
@leave_type.setter
def leave_type(self, leave_type):
"""Sets the leave_type of this LeaveTypeObject.
:param leave_type: The leave_type of this LeaveTypeObject. # noqa: E501
:type: LeaveType
"""
self._leave_type = leave_type
| [
"[email protected]"
] | |
8f436fd55a4a3cb924fcf951eecdd1aec41559a7 | efa87ca56d83e2e40015c4578c8774f5606d09f3 | /src/part_2.py | ea042d2b3dfabf37cc7e43cfed447537919e4180 | [] | no_license | dev2033/vk_api_pars_group | ea4eed70b5dabedf41a1f18252094314bba45593 | f6589d4447a3b45578f18d42966e6a659910b1b6 | refs/heads/master | 2023-02-03T12:16:20.105976 | 2020-12-26T20:36:18 | 2020-12-26T20:36:18 | 324,630,801 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,311 | py | import json
import youtube_dl
import requests
import os
from auth_data import token
def get_wall_posts(group_name):
"""
Функция для парсинга групп в социальной сети ВК,
Если в url - не указывать domain=, а сразу после ?=,
то будет парситься ваша стена в ВК, все посты и фото
"""
url = f"https://api.vk.com/method/wall.get?domain={group_name}&count=40&access_token={token}&v=5.52"
req = requests.get(url)
src = req.json()
# проверяем, сущестувует ли дирректория
if os.path.exists(f"{group_name}"):
print(f"Дирректория с именем {group_name} уже существует")
else:
os.mkdir(group_name)
# сохраняем данные в json файл, чтобы видеть структуру
with open(f"{group_name}/{group_name}.json", "w", encoding="utf-8") as file:
json.dump(src, file, indent=4, ensure_ascii=False)
# собираем ID новых постов в список
fresh_posts_id = []
posts = src["response"]["items"]
for fresh_post_id in posts:
fresh_post_id = fresh_post_id["id"]
fresh_posts_id.append(fresh_post_id)
"""
Проверка, если файл не существует, значит это первый
парсинг группы(отправляем все новые посты). Иначе начинаем
проверку и отправляем только новые посты
"""
if not os.path.exists(f"{group_name}/exists_posts_{group_name}.txt"):
print("Файла с ID постов не существует, содаем новый файл")
with open(f"{group_name}/exist_posts_{group_name}.txt", "w") as file:
for item in fresh_posts_id:
file.write(str(item) + "\n")
for post in posts:
def download_img(url, post_id, group_name):
"""Сохраняет фото в дирректорию с проектом"""
res = requests.get(url)
# создаем папку group_name/files
if not os.path.exists(f"{group_name}/files"):
os.mkdir(f"{group_name}/files")
with open(f"{group_name}/files/{post_id}.jpg", "wb") as img_file:
img_file.write(res.content)
def download_video(url, post_id, group_name):
"""Сохраняет видео в дирректорию с проектом"""
# создаем папку group_name/files
if not os.path.exists(f"{group_name}/video_files"):
os.mkdir(f"{group_name}/video_files")
try:
ydl_opts = {"outtmpl": f"{group_name}/video_files/{post_id}"}
with youtube_dl.YoutubeDL(ydl_opts) as ydl:
video_info = ydl.extract_info(url, download=False)
video_duration = video_info["duration"]
if video_duration > 300:
print("Видео слишком долгое")
else:
print(
f"Видео длиться {video_duration} "
f"секунд. Сохраняем видео")
ydl.download([url])
except Exception:
print("Не удалось скачать видео...")
post_id = post["id"]
print(f"Отправляем пост с ID {post_id}")
try:
if "attachments" in post:
post = post["attachments"]
photo_quality = [
"photo_2560",
"photo_1280",
"photo_807",
"photo_604",
"photo_130",
"photo_75"
]
# проверка на 1 или несколько фото/видео в посте
if len(post) == 1:
# забираем фото с поста
if post[0]["type"] == "photo":
for pq in photo_quality:
if pq in post[0]["photo"]:
post_photo = post[0]["photo"][pq]
print(f"Фото с расширеним {pq}")
print(post_photo)
download_img(
post_photo,
post_id,
group_name
)
break
# забираем видео с поста
elif post[0]["type"] == "video":
print("Видео пост")
# формируем запрос для составления запроса на
# получение ссылки на видео
video_access_key = post[0]["video"]["access_key"]
video_post_id = post[0]["video"]["id"]
video_owner_id = post[0]["video"]["owner_id"]
video_get_url = f"https://api.vk.com/method/" \
f"video.get?" \
f"videos={video_owner_id}_" \
f"{video_post_id}_" \
f"{video_access_key}&" \
f"access_token={token}&v=5.52"
req = requests.get(video_get_url)
res = req.json()
video_url = res["response"]["items"][0]["player"]
print(video_url)
download_video(video_url, post_id, group_name)
else:
print("Либо линк, либо аудио, либо репост...")
else:
photo_post_count = 0
for post_item_photo in post:
if post_item_photo["type"] == "photo":
for pq in photo_quality:
if pq in post_item_photo['photo']:
post_photo = post_item_photo["photo"][pq]
print(f"Фото с расширением {pq}")
print(post_photo)
post_id_counter = str(post_id) + f"_{photo_post_count}"
download_img(post_photo, post_id_counter, group_name)
photo_post_count += 1
break
# забираем видео
elif post_item_photo["type"] == "video":
print("Видео пост")
# формируем запрос для составления запроса на
# получение ссылки на видео
video_access_key = post_item_photo["video"][
"access_key"]
video_post_id = post_item_photo["video"]["id"]
video_owner_id = post_item_photo["video"][
"owner_id"]
video_get_url = f"https://api.vk.com/method/video.get?videos={video_owner_id}_{video_post_id}_{video_access_key}&access_token={token}&v=5.52"
req = requests.get(video_get_url)
res = req.json()
video_url = res["response"]["items"][0]["player"]
print(video_url)
post_id_counter = str(post_id) + f"_{photo_post_count}"
download_video(video_url, post_id_counter, group_name)
photo_post_count += 1
else:
print("Либо линк, либо аудио, либо репост...")
except Exception:
print(f"Что-то пошло не так с постом ID {post_id}")
else:
print("Файл с ID постов найден, начинаем выборку свежих постов!")
def main():
group_name = input("Введите название группы: ")
get_wall_posts(group_name)
# os.system('cls||clear')
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
beb0e5f57a3463005136baba1b8c7c3c9e533db5 | e9f598d4e2e53bdffd95e2b91726eed156a9bd25 | /we_chat_tou.py | 50911f89aa338c6ccce63b1f6e0df506ceb609c7 | [] | no_license | z991/play_pillow | e2d1b17ccdc7e3b5aacb0d1dc7b04805699942f8 | e01f99be129a5790c78700cb09b1fa85825f832a | refs/heads/master | 2020-11-27T12:20:51.237125 | 2019-12-21T14:19:48 | 2019-12-21T14:19:48 | 229,437,749 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,926 | py | import os
import math
from wxpy import Bot
from PIL import Image
# 创建头像存放文件夹
def creat_filepath():
# 返回一个表示当前工作目录的unicode字符串。
"""
/Users/zhuxuanyu/python_xuexi/pil_play
:return:
"""
avatar_dir = os.getcwd() + "/wechat/"
if not os.path.exists(avatar_dir):
os.mkdir(avatar_dir)
return avatar_dir
# 保存好友头像
def save_avatar(avatar_dir):
# 初始化机器人,扫码登陆
bot = Bot()
friends = bot.friends(update=True)
num = 0
for friend in friends:
friend.get_avatar(avatar_dir + '/' + str(num) + ".jpg")
num = num + 1
# 拼接头像
def joint_avatar(path):
# 设置画布大小(正方形)
image_size = 2560
# 获取文件夹内头像个数
length = len(os.listdir(path))
"""
计算所需各行列的头像数量
"""
# sqrt 开平方
x_lines = math.ceil(math.sqrt(length)) # 图片x 轴放多少个好友头像
y_lines = math.ceil(math.sqrt(length)) # 图片y 轴放多少个好友头像
# 设置每个头像大小 拼接后图片长度 / x(y)轴 好友头像的个数
each_size = math.ceil(image_size / math.floor(math.sqrt(length)))
image = Image.new('RGB', (each_size * x_lines, each_size * y_lines))
x = 0 # x 轴索引值
y = 0 # y 轴索引值
for (root, dirs, files) in os.walk(path):
for pic_name in files:
# 增加头像读取不出来的异常处理
try:
with Image.open(path + pic_name) as img:
"""
resize(self, size, resample=NEAREST, box=None):
返回此图像的大小调整后的副本。
"""
img = img.resize((each_size, each_size))
"""
paste(self, im, box=None, mask=None):
将另一个图像粘贴到此图像中。box参数要么是
一个2元组给出左上角,一个4元组定义
左,上,右,和低像素坐标,或没有(相同的
(0,0))。看到裁判:“坐标系”。如果给定了一个4元组,则给出其大小
所粘贴图像的大小必须与所粘贴区域的大小相匹配。
"""
image.paste(img, (x * each_size, y * each_size))
x += 1
if x == x_lines:
x = 0
y += 1
except IOError:
print("头像读取失败")
img = image.save(os.getcwd() + "/wechat.png")
print('微信好友头像拼接完成!')
if __name__ == '__main__':
avatar_dir = creat_filepath()
save_avatar(avatar_dir)
joint_avatar(avatar_dir)
# for (root, dirs, files) in os.walk(avatar_dir):
# print(root, dirs, files)
| [
"[email protected]"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.