blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
288
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 684
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 25
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 128
12.7k
| extension
stringclasses 142
values | content
stringlengths 128
8.19k
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
96e74a51787d9206d2e4ddd5c9531473c08384c5
|
593dff0c5746603268417a702a00cd3355f47f3a
|
/hq_extracter.py
|
0637fa456196047256f299fb62689b0330057cc7
|
[] |
no_license
|
vc2309/Blue-sky-tools
|
e508f2cb0fd240a95b812ed53f2ac6ed3ea1cd64
|
055b06cc9865808f3d0665dc9c95aba6b401fe69
|
refs/heads/master
| 2021-09-10T18:55:52.616954 | 2018-03-31T07:14:01 | 2018-03-31T07:14:01 | 104,192,264 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 630 |
py
|
import pandas as pd
ifiles=['sjs_all_feb_report_m.csv']
floors=['HQ - G/F'
,'HQ - 2S'
,'HQ - 2N'
,'HQ - 3S'
,'HQ - 3N'
,'HQ - 4N'
,'HQ - 5S'
,'HQ - 5N'
,'HQ - 6S'
,'HQ - 6N'
,'HQ - 7S'
,'HQ - 7N'
,'HQ - 8S'
,'HQ - 8N'
,'HQ - 9S'
,'HQ - 9N'
,'HQ - AC'
,'HQ - 11'
,'HQ - 12'
,'HQ - 13'
,'HQ - Lift'
,'HQ - 10']
def extract_hq(file):
print("here")
df=pd.read_csv(file)
hq_df=pd.DataFrame()
floor=[]
for f in floors:
floor.append(df[df['location']==f])
hq_df=pd.concat(floor)
print(hq_df.head())
hq_df.to_csv('hq_jan.csv')
def main():
for file in ifiles:
extract_hq(file)
if __name__=='__main__' :
print("ok")
main()
|
[
"[email protected]"
] | |
1c90deae299ed6a990528539c555580748edee2a
|
bc441bb06b8948288f110af63feda4e798f30225
|
/tuna_service_sdk/model/pipeline/build_pb2.pyi
|
b2d4c34548e7bc31341d04a0ced2cc56bb0cfe4a
|
[
"Apache-2.0"
] |
permissive
|
easyopsapis/easyops-api-python
|
23204f8846a332c30f5f3ff627bf220940137b6b
|
adf6e3bad33fa6266b5fa0a449dd4ac42f8447d0
|
refs/heads/master
| 2020-06-26T23:38:27.308803 | 2020-06-16T07:25:41 | 2020-06-16T07:25:41 | 199,773,131 | 5 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 4,876 |
pyi
|
# @generated by generate_proto_mypy_stubs.py. Do not edit!
import sys
from google.protobuf.descriptor import (
Descriptor as google___protobuf___descriptor___Descriptor,
)
from google.protobuf.internal.containers import (
RepeatedScalarFieldContainer as google___protobuf___internal___containers___RepeatedScalarFieldContainer,
)
from google.protobuf.message import (
Message as google___protobuf___message___Message,
)
from tuna_service_sdk.model.pipeline.build_status_pb2 import (
BuildStatus as tuna_service_sdk___model___pipeline___build_status_pb2___BuildStatus,
)
from tuna_service_sdk.model.pipeline.git_meta_pb2 import (
GitMeta as tuna_service_sdk___model___pipeline___git_meta_pb2___GitMeta,
)
from typing import (
Iterable as typing___Iterable,
Optional as typing___Optional,
Text as typing___Text,
Union as typing___Union,
)
from typing_extensions import (
Literal as typing_extensions___Literal,
)
builtin___bool = bool
builtin___bytes = bytes
builtin___float = float
builtin___int = int
if sys.version_info < (3,):
builtin___buffer = buffer
builtin___unicode = unicode
class Build(google___protobuf___message___Message):
DESCRIPTOR: google___protobuf___descriptor___Descriptor = ...
class Artifact(google___protobuf___message___Message):
DESCRIPTOR: google___protobuf___descriptor___Descriptor = ...
packageName = ... # type: typing___Text
versionName = ... # type: typing___Text
ctime = ... # type: typing___Text
packageId = ... # type: typing___Text
versionId = ... # type: typing___Text
def __init__(self,
*,
packageName : typing___Optional[typing___Text] = None,
versionName : typing___Optional[typing___Text] = None,
ctime : typing___Optional[typing___Text] = None,
packageId : typing___Optional[typing___Text] = None,
versionId : typing___Optional[typing___Text] = None,
) -> None: ...
if sys.version_info >= (3,):
@classmethod
def FromString(cls, s: builtin___bytes) -> Build.Artifact: ...
else:
@classmethod
def FromString(cls, s: typing___Union[builtin___bytes, builtin___buffer, builtin___unicode]) -> Build.Artifact: ...
def MergeFrom(self, other_msg: google___protobuf___message___Message) -> None: ...
def CopyFrom(self, other_msg: google___protobuf___message___Message) -> None: ...
def ClearField(self, field_name: typing_extensions___Literal[u"ctime",b"ctime",u"packageId",b"packageId",u"packageName",b"packageName",u"versionId",b"versionId",u"versionName",b"versionName"]) -> None: ...
id = ... # type: typing___Text
sender = ... # type: typing___Text
created = ... # type: builtin___int
yaml_string = ... # type: typing___Text
number = ... # type: typing___Text
events = ... # type: google___protobuf___internal___containers___RepeatedScalarFieldContainer[typing___Text]
@property
def git_meta(self) -> tuna_service_sdk___model___pipeline___git_meta_pb2___GitMeta: ...
@property
def artifact(self) -> Build.Artifact: ...
@property
def status(self) -> tuna_service_sdk___model___pipeline___build_status_pb2___BuildStatus: ...
def __init__(self,
*,
id : typing___Optional[typing___Text] = None,
git_meta : typing___Optional[tuna_service_sdk___model___pipeline___git_meta_pb2___GitMeta] = None,
sender : typing___Optional[typing___Text] = None,
artifact : typing___Optional[Build.Artifact] = None,
created : typing___Optional[builtin___int] = None,
yaml_string : typing___Optional[typing___Text] = None,
status : typing___Optional[tuna_service_sdk___model___pipeline___build_status_pb2___BuildStatus] = None,
number : typing___Optional[typing___Text] = None,
events : typing___Optional[typing___Iterable[typing___Text]] = None,
) -> None: ...
if sys.version_info >= (3,):
@classmethod
def FromString(cls, s: builtin___bytes) -> Build: ...
else:
@classmethod
def FromString(cls, s: typing___Union[builtin___bytes, builtin___buffer, builtin___unicode]) -> Build: ...
def MergeFrom(self, other_msg: google___protobuf___message___Message) -> None: ...
def CopyFrom(self, other_msg: google___protobuf___message___Message) -> None: ...
def HasField(self, field_name: typing_extensions___Literal[u"artifact",b"artifact",u"git_meta",b"git_meta",u"status",b"status"]) -> builtin___bool: ...
def ClearField(self, field_name: typing_extensions___Literal[u"artifact",b"artifact",u"created",b"created",u"events",b"events",u"git_meta",b"git_meta",u"id",b"id",u"number",b"number",u"sender",b"sender",u"status",b"status",u"yaml_string",b"yaml_string"]) -> None: ...
|
[
"[email protected]"
] | |
fc62026ad385c261dc340d5914e1490389de7b69
|
16abd82b9523f0fc7ae6df0aac11fd03e2e3d9f3
|
/boards/tests/test_views.py
|
c6631a2dcbefbde8dc9659cd11ccf5750f89b5e0
|
[] |
no_license
|
msm3858/projektforum
|
cf5255a5781f3536db56cf1b680557ca876f8221
|
c6a0abda9f147d3578e430012780bda3eb4f20b5
|
refs/heads/master
| 2021-09-10T10:03:32.962523 | 2018-03-24T06:26:18 | 2018-03-24T06:26:18 | 124,791,248 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 5,560 |
py
|
from django.test import TestCase
from django.urls import reverse, resolve
from ..views import home, board_topics, new_topic
from ..models import Board, Topic, Post, User
from ..forms import NewTopicForm
# Create your tests here.
#########################
# TEST HOME
#########################
class HomeTests(TestCase):
def setUp(self):
self.board = Board.objects.create(
name='Django', description='Django board.')
url = reverse('boards:home')
self.response = self.client.get(url)
def test_home_view_status_code(self):
self.assertEquals(self.response.status_code, 200)
def test_home_url_resolves_home_view(self):
view = resolve('/')
self.assertEquals(view.func, home)
def test_home_view_contains_link_to_topics_page(self):
board_topics_url = reverse(
'boards:board_topics', kwargs={'pk': self.board.pk})
self.assertContains(
self.response, 'href="{0}"'.format(board_topics_url))
#########################
# TEST BOARD
#########################
class BoardTopicsTests(TestCase):
def setUp(self):
Board.objects.create(
name='Django', description='Django board.')
def test_board_topics_view_success_status_code(self):
url = reverse('boards:board_topics', kwargs={'pk': 1})
response = self.client.get(url)
self.assertEquals(response.status_code, 200)
def test_board_topics_view_not_found_status_code(self):
url = reverse('boards:board_topics', kwargs={'pk': 99})
response = self.client.get(url)
self.assertEquals(response.status_code, 404)
def test_board_topics_url_resolves_board_topics_view(self):
view = resolve('/boards/1/')
self.assertEquals(view.func, board_topics)
def test_board_topics_view_contains_link_back_to_homepage(self):
board_topics_url = reverse('boards:board_topics', kwargs={'pk': 1})
response = self.client.get(board_topics_url)
homepage_url = reverse('boards:home')
self.assertContains(response, 'href="{0}"'.format(homepage_url))
def test_board_topics_view_contains_navigation_links(self):
board_topics_url = reverse('boards:board_topics', kwargs={'pk': 1})
homepage_url = reverse('boards:home')
new_topic_url = reverse('boards:new_topic', kwargs={'pk': 1})
response = self.client.get(board_topics_url)
self.assertContains(response, 'href="{0}"'.format(homepage_url))
self.assertContains(response, 'href="{0}"'.format(new_topic_url))
#########################
# TEST NEW TOPIC
#########################
class NewTopicTests(TestCase):
def setUp(self):
Board.objects.create(name='Django', description='Django board.')
User.objects.create_user(
username='marcin', email='[email protected]', password='123')
def test_new_topic_view_success_status_code(self):
url = reverse('boards:new_topic', kwargs={'pk': 1})
response = self.client.get(url)
self.assertEquals(response.status_code, 200)
def test_new_topic_view_not_fount_status_code(self):
url = reverse('boards:new_topic', kwargs={'pk': 99})
response = self.client.get(url)
self.assertEquals(response.status_code, 404)
def test_new_topic_view_reselves_board_topics_view(self):
view = resolve('/boards/1/new/')
self.assertEquals(view.func, new_topic)
def test_new_topic_view_contains_link_back_to_board_topics_view(self):
new_topic_url = reverse('boards:new_topic', kwargs={'pk': 1})
board_topics_url = reverse('boards:board_topics', kwargs={'pk': 1})
response = self.client.get(new_topic_url)
self.assertContains(response, 'href="{0}"'.format(board_topics_url))
def test_csrf(self):
url = reverse('boards:new_topic', kwargs={'pk': 1})
response = self.client.get(url)
self.assertContains(response, 'csrfmiddlewaretoken')
def test_new_topic_valid_post_data(self):
url = reverse('boards:new_topic', kwargs={'pk': 1})
data = {
'subject': 'Test title',
'message': 'Lorem ipsum dolor sit amet'
}
response = self.client.post(url, data)
self.assertTrue(Topic.objects.exists())
self.assertTrue(Post.objects.exists())
def test_new_topic_invalid_post_data(self):
'''
Invalid post data should not redirect
The expected behaviour is to show the form again with validation errors
'''
url = reverse('boards:new_topic', kwargs={'pk': 1})
response = self.client.post(url, {})
form = response.context.get('form')
self.assertEquals(response.status_code, 200)
self.assertTrue(form.errors)
def test_new_topic_invalid_post_data_empty_fields(self):
'''
Invalid post data should not redirect
The expected behaviour is to show the form again with validation errors
'''
url = reverse('boards:new_topic', kwargs={'pk': 1})
data = {
'subject': '',
'message': ''
}
response = self.client.post(url, data)
self.assertEquals(response.status_code, 200)
self.assertFalse(Topic.objects.exists())
self.assertFalse(Post.objects.exists())
def test_contains_form(self):
url = reverse('boards:new_topic', kwargs={'pk': 1})
response = self.client.get(url)
form = response.context.get('form')
self.assertIsInstance(form, NewTopicForm)
|
[
"="
] |
=
|
87416760e8d527e89eda7274e938fa35d0f5862c
|
ec551303265c269bf1855fe1a30fdffe9bc894b6
|
/topic12_backtrack/T37_solveSudoku/interview.py
|
aa39e66a9273588c348549634ece2fa51180ca9a
|
[] |
no_license
|
GongFuXiong/leetcode
|
27dbda7a5ced630ae2ae65e19d418ebbc65ae167
|
f831fd9603592ae5bee3679924f962a3ebce381c
|
refs/heads/master
| 2023-06-25T01:05:45.683510 | 2021-07-26T10:05:25 | 2021-07-26T10:05:25 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,380 |
py
|
'''
37. 解数独
编写一个程序,通过已填充的空格来解决数独问题。
一个数独的解法需遵循如下规则:
数字 1-9 在每一行只能出现一次。
数字 1-9 在每一列只能出现一次。
数字 1-9 在每一个以粗实线分隔的 3x3 宫内只能出现一次。
空白格用 '.' 表示。
'''
class Solution:
def solveSudoku(self, board):
"""
Do not return anything, modify board in-place instead.
"""
# 把所有没填数字的位置找到
all_points = []
for i in range(9):
for j in range(9):
if board[i][j] == ".":
all_points.append([i, j])
# check函数是为了检查是否在point位置k是合适的
def check(point, k):
row_i = point[0]
col_j = point[1]
for i in range(9):
# 检查 行
if i != row_i and board[i][col_j] == k:
return False
# 检查 列
if i != col_j and board[row_i][i] == k:
return False
# 检查块
for i in range(row_i//3*3 , row_i//3*3+3):
for j in range(col_j//3*3, col_j//3*3+3):
if i != row_i and j != col_j and board[i][j] == k:
return False
return True
def backtrack(i):
# 回溯终止条件
if i == len(all_points):
return True
for j in range(1, 10):
# 检查是否合适
if check(all_points[i],str(j)):
# 合适就把位置改过来
board[all_points[i][0]][all_points[i][1]] = str(j)
if backtrack(i+1): # 回溯下一个点
return True
board[all_points[i][0]][all_points[i][1]] = "."# 不成功把原来改回来
return False
backtrack(0)
print(f"board:{board}")
if __name__ == "__main__":
solution = Solution()
while 1:
str1 = input()
if str1 != "":
nums = [[c for c in s.split(",")] for s in str1.split(";")]
print(f"nums:{nums}")
res = solution.permute(nums)
print(res)
else:
break
|
[
"[email protected]"
] | |
4e1efa5fc68c6cf783d434aebf74d1157be0268f
|
6c1b28fce483c873f627104c8c58c90af54ef22a
|
/approach_3_solution_2.py
|
cbb82862b98488db1ddca6a2e88b15cc2ed1fb8c
|
[] |
no_license
|
rajkan01/hands_on_code_review
|
dc873857a7d73f75c9d2caa5bba3fa93ba56a4a2
|
ac28dabd6eb0d46345714208741ff57345f95149
|
refs/heads/master
| 2023-09-04T00:20:16.741717 | 2021-10-23T15:45:34 | 2021-10-25T10:56:24 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 641 |
py
|
# Approach 3 - Solution 2
from string import ascii_lowercase as lowercase_letters
def is_pangram(sentence):
actual_bits = 0
expected_bits = 0b11111111111111111111111111
for i, char in enumerate(sentence):
if char.isalpha():
letter_index = ord(char.lower()) - ord("a")
bit_shift = 1 << letter_index
actual_bits = actual_bits | bit_shift
return expected_bits == actual_bits
# Approach 3 - Solution 2 intentionally doesn't contain any comments.
# As discussed in the course, this is a practice problem for you: apply Approach 3 - study the code of others -- to this solution.
|
[
"[email protected]"
] | |
bd29a3919e9e554eae311ed596991eb065b7db1f
|
b210903908d418d471e0df3b93c5f290ec1c05a9
|
/gluon2pytorch/gluon2pytorch.py
|
ced44d371fe483100a99ec280b38330ca6939d3d
|
[
"MIT"
] |
permissive
|
chipper1/gluon2pytorch
|
d7bcf71900172484f1e26c46ba6f051aa1e7d773
|
e0fd770a28b1a8bf4d0aa352f360bf5765e8347d
|
refs/heads/master
| 2020-04-19T07:49:38.974250 | 2019-01-22T13:17:23 | 2019-01-22T13:17:23 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 4,078 |
py
|
import json
import torch
import torch.nn as nn
import torch.nn.functional as F
import mxnet as mx
import numpy as np
# Import comverters
from .layers import CONVERTERS
# Import PyTorch model template
from .pytorch_model_template import pytorch_model_template
def eval_model(pytorch_source, pytorch_dict, module_name):
# Tricky code
torch
nn
F
exec(pytorch_source)
globals()[module_name] = locals()[module_name]
pytorch_model = locals()[module_name]()
pytorch_model.load_state_dict(pytorch_dict)
return pytorch_model
def render_module(inits, calls, inputs, outputs, dst_dir, pytorch_dict, pytorch_module_name):
"""
Render model.
"""
inits = [i for i in inits if len(i) > 0]
output = pytorch_model_template.format(**{
'module_name': pytorch_module_name,
'module_name_lower': pytorch_module_name.lower(),
'inits': '\n'.join(inits),
'inputs': ', '.join(['x' + str(i) for i in inputs]),
'calls': '\n'.join(calls),
'outputs': ', '.join(['x' + str(i) for i in outputs]),
})
if dst_dir is not None:
import os
import errno
try:
os.makedirs(dst_dir)
except OSError as e:
if e.errno != errno.EEXIST:
raise
with open(os.path.join(dst_dir, pytorch_module_name.lower() + '.py'), 'w+') as f:
f.write(output)
f.close()
torch.save(pytorch_dict, os.path.join(dst_dir, pytorch_module_name.lower() + '.pt'))
return output
def gluon2pytorch(net, args, dst_dir, pytorch_module_name, debug=True):
"""
Function to convert a model.
"""
x = [mx.nd.array(np.ones(i)) for i in args]
x = net(*x)
# Get network params
params = net.collect_params()
# Create a symbol to trace net
# x = mx.sym.var('data')
x = [mx.sym.var('__input__' + str(i)) for i in range(len(args))]
sym = net(*x)
if len(sym) > 1:
group = mx.sym.Group(sym)
else:
group = sym
# Get JSON-definition of the model
json_model = json.loads(group.tojson())['nodes']
# Create empty accumulators
nodes = []
is_skipped = []
pytorch_dict = {}
inits = []
calls = []
inputs = []
outputs = [i[0] for i in json.loads(group.tojson())['heads']]
last = 0
# Trace model
for i, node in enumerate(json_model):
# If the node has 'null' op, it means, that it's not a real op, but only parameter
# TODO: convert constants
if node['op'] == 'null':
if node['name'].find('__input__') == 0:
inputs.append(int(node['name'][9:]))
is_skipped.append(1)
continue
# It's not 'null'
is_skipped.append(0)
# Create dict with necessary node parameters
op = {
'name': node['name'][:-4],
'type': node['op'],
}
print(op, node)
if len(node['inputs']) > 0:
orginal_inputs = [i for i in np.array(node['inputs'])[:, 0] if i in inputs]
op['inputs'] = [i for i in np.array(node['inputs'])[:, 0] if is_skipped[i] != 1 or i in orginal_inputs]
else:
print(json_model)
op['inputs'] = []
try:
# Not all nodes have 'attrs'
op['attrs'] = node['attrs']
except KeyError:
op['attrs'] = {}
# Debug output
if debug:
print(op)
print('__')
# Append new node to list
nodes.append(op)
# If operation is in available convertors, convert it
if op['type'] in CONVERTERS:
init_str, call_str = CONVERTERS[op['type']](i, op, nodes, params, pytorch_dict)
inits.append(init_str)
calls.append(call_str)
else:
raise AttributeError('Layer isn\'t supported')
pytorch_source = render_module(inits, calls, inputs, outputs, dst_dir, pytorch_dict, pytorch_module_name)
return eval_model(pytorch_source, pytorch_dict, pytorch_module_name)
|
[
"[email protected]"
] | |
52f8d22f90a6a6870ff064d288a72be4c6ab50de
|
7d78a18fcb8f34cc84e9439bd19cf491e3e0ec49
|
/Code/Particle_Identification/msc-hpc/hpc-mini-1/model8.py
|
7fca90d0b9552dd533fb15cee80aeff0c4a24a33
|
[] |
no_license
|
PsycheShaman/MSc-thesis
|
62767951b67b922ce5a21cad5bdb258998b7d2ea
|
34504499df64c7d6cc7c89af9618cd58d6378e8e
|
refs/heads/master
| 2022-03-12T07:17:57.309357 | 2019-12-10T21:17:39 | 2019-12-10T21:17:39 | 151,471,442 | 4 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 6,794 |
py
|
# -*- coding: utf-8 -*-
"""
Created on Sun Jun 16 18:47:05 2019
@author: gerhard
"""
import glob
import numpy as np
#P_files = glob.glob("C:/Users/gerhard/Documents/msc-thesis-data/P_*.pkl", recursive=True)
x_files = glob.glob("/scratch/vljchr004/1_8_to_2_2_GeV/x_*.pkl")
y_files = glob.glob("/scratch/vljchr004/1_8_to_2_2_GeV/y_*.pkl")
#x_files = glob.glob("C:\\Users\\gerhard\\Documents\\msc-thesis-data\\cnn\\x_*.pkl")
#y_files = glob.glob("C:\\Users\\gerhard\\Documents\\msc-thesis-data\\cnn\\y_*.pkl")
import pickle
print("loading first x pickle........................................................................................")
with open(x_files[0], 'rb') as x_file0:
x = pickle.load(x_file0)
print("loading first y pickle........................................................................................")
with open(y_files[0], 'rb') as y_file0:
y = pickle.load(y_file0)
#with open(P_files[0], 'rb') as P_file0:
# P = pickle.load(P_file0)
x.shape = (x.shape[1],x.shape[2],x.shape[3])
print("x.shape")
print(x.shape)
print("recursively adding x pickles........................................................................................")
for i in x_files[1:]:
with open(i,'rb') as x_file:
print(i)
xi = pickle.load(x_file)
xi.shape = (xi.shape[1],xi.shape[2],xi.shape[3])
print("xi.shape")
print(xi.shape)
x = np.concatenate((x,xi),axis=0)
print("recursively adding y pickles........................................................................................")
for i in y_files[1:]:
with open(i,'rb') as y_file:
yi = pickle.load(y_file)
y = np.concatenate((y,yi),axis=None)
#for i in P_files[1:]:
# with open(i,'rb') as P_file:
# Pi = pickle.load(P_file)
# P = np.concatenate((P,Pi),axis=None)
#x_files = glob.glob("/scratch/vljchr004/data/msc-thesis-data/cnn/x_*.npy")
#y_files = glob.glob("/scratch/vljchr004/data/msc-thesis-data/cnn/y_*.npy")
#
#print("recursively adding x numpys........................................................................................")
#
#for i in x_files[0:]:
# with open(i,'rb') as x_file:
# print(i)
# xi = np.load(x_file)
# x = np.concatenate((x,xi),axis=0)
#
#print("recursively adding y numpys........................................................................................")
#
#for i in y_files[0:]:
# with open(i,'rb') as y_file:
# yi = np.load(y_file)
# y = np.concatenate((y,yi),axis=None)
nz = np.array([np.count_nonzero(i) for i in x])
zeros = np.where(nz==0)
x = np.delete(x,zeros,axis=0)
y = np.delete(y,zeros)
#P = np.delete(P,zeros)
x.shape = (x.shape[0],x.shape[1],x.shape[2],1)
#x.shape = (x.shape[0],x.shape[2],x.shape[1])
print("x.shape after reshape for lstm")
print(x.shape)
#GeV_range2 = np.where(P>=1.8 and P<=2.2)
#
#x = x[GeV_range2,:,:,:]
#y = y[GeV_range2]
electrons = np.where(y==1)
electrons = electrons[0]
pions = np.where(y==0)
pions = pions[0]
pions = pions[0:electrons.shape[0]]
x_1 = x[electrons,:,:]
x_2 = x[pions,:,:]
x = np.vstack((x_1,x_2))
y_1 = y[electrons]
y_2 = y[pions]
y = np.concatenate((y_1,y_2),axis=None)
ma = np.max(x)
x = x/ma
#ma = np.amax(x,axis=2)
#
#x = np.divide(x,ma)
#check the division above before running!!!!!!!!!!!1
from sklearn.model_selection import train_test_split
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.2,random_state=123456)
from tensorflow.keras.utils import to_categorical
y_train = to_categorical(y_train)
y_test = to_categorical(y_test)
import tensorflow
from tensorflow import keras
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Dropout, Activation, Flatten
from tensorflow.keras.layers import Conv2D, MaxPooling2D, LSTM, Bidirectional, TimeDistributed
model = Sequential()
model.add(Conv2D(32,(6,6),input_shape=(17,24,1),padding="same",activation="relu"))
model.add(Conv2D(64,(6,6),padding="same",activation="relu"))
model.add(MaxPooling2D((2,2)))
model.add(Conv2D(64,(4,4),padding="same",activation="relu"))
model.add(Conv2D(128,(4,4),padding="same",activation="relu"))
model.add(MaxPooling2D((2,2)))
model.add(Conv2D(128,(3,3),padding="same",activation="relu"))
model.add(Conv2D(256,(3,3),padding="same",activation="relu"))
model.add(MaxPooling2D((2,2)))
model.add(Conv2D(256,(3,3),padding="same",activation="relu"))
model.add(Conv2D(512,(3,3),padding="same",activation="relu"))
model.add(MaxPooling2D((2,2)))
model.add(Flatten())
model.add(Dense(1024,activation="relu"))
model.add(Dense(1024,activation="relu"))
model.add(Dense(512,activation="relu"))
model.add(Dense(512,activation="relu"))
model.add(Dense(256,activation="relu"))
model.add(Dense(256,activation="relu"))
model.add(Dense(128,activation="relu"))
model.add(Dense(128,activation="relu"))
model.add(Dense(64,activation="relu"))
model.add(Dense(32,activation="relu"))
model.add(Dense(2,activation="softmax"))
adam = tensorflow.keras.optimizers.Adam()
# Let's train the model using RMSprop
model.compile(loss='binary_crossentropy',
optimizer=adam,
metrics=['accuracy'])
batch_size=32
epochs=50
history=model.fit(x_train, y_train,
batch_size=batch_size,
epochs=epochs,
validation_split=0.2,
shuffle=True)#,
#class_weight=class_weights)
import matplotlib.pyplot as plt
# summarize history for accuracy
plt.plot(history.history['acc'])
plt.plot(history.history['val_acc'])
plt.title('model accuracy')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
plt.savefig('/home/vljchr004/hpc-mini/model8_history1.png', bbox_inches='tight')
# summarize history for loss
plt.close()
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
plt.savefig('/home/vljchr004/hpc-mini/model8_history2.png', bbox_inches='tight')
model.probs = model.predict_proba(x_test)
import numpy as np
np.savetxt("/home/vljchr004/hpc-mini/model8_results.csv", np.array(model.probs), fmt="%s")
np.savetxt("/home/vljchr004/hpc-mini/model8_y_test.csv", np.array(y_test), fmt="%s")
model.save('/home/vljchr004/hpc-mini/model8_.h5') # creates a HDF5 file 'my_model.h5'
del model
print("<-----------------------------done------------------------------------------>")
|
[
"[email protected]"
] | |
fa852b15b22790660899f828bd2b36acf41ab473
|
2b477700384af7ceb67f97908f1bd5899d984596
|
/mxonline/second_day/mxonline/mxonline/settings.py
|
0c86916a2d658b263215bc8d182ed18fe7d4a103
|
[] |
no_license
|
ZhiqiKou/django
|
58b743f962e0f7d85b3610e9d09a0e1db32ba9bb
|
e3d35c981e6b91130472114b121b65fd7d5cacf8
|
refs/heads/master
| 2020-03-28T20:44:56.286125 | 2018-09-07T02:21:29 | 2018-09-07T02:21:29 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,544 |
py
|
"""
Django settings for mxonline project.
Generated by 'django-admin startproject' using Django 2.0.3.
For more information on this file, see
https://docs.djangoproject.com/en/2.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.0/ref/settings/
"""
import sys
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.insert(0, os.path.join(BASE_DIR, 'apps'))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'hw$ull9#yd)%((n32%_jx_cy+!kcr@u8-ywc_r4pg6kjmzx(f6'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'users',
'organization',
'operation',
'courses',
]
# 此处重载使UserProfile生效
AUTH_USER_MODEL = "users.UserProfile"
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'mxonline.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'mxonline.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': 'mxonline3',
'USER': 'root',
'PASSWORD': '123456',
'HOST': '127.0.0.1',
}
}
# Password validation
# https://docs.djangoproject.com/en/2.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.0/howto/static-files/
STATIC_URL = '/static/'
TEMPLATE_DIRS = (
os.path.join(BASE_DIR, 'templates'),
)
|
[
"[email protected]"
] | |
a4b4a0a3244cac402cda7f3b4ed5278efc2fa651
|
c4b47ba53d40e861571c82f8a968a989974dc433
|
/fireball/blobs/admin.py
|
454a72b4217a2e674b995a6f5a635ca10bde368e
|
[] |
no_license
|
underlost/fireball
|
4be3e441a82f6a0fbb603b33be8493f03019392e
|
3cf312fa88860e9f2e9f34479b5b1962dae09f55
|
refs/heads/master
| 2016-09-01T18:45:18.059628 | 2013-06-03T16:26:12 | 2013-06-03T16:26:12 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 250 |
py
|
from django.contrib import admin
from fireball.blobs.models import Blob
class BlobAdmin(admin.ModelAdmin):
list_filter = ('user',)
search_fields = ['description','url',]
list_display = ('user', 'url',)
admin.site.register(Blob,BlobAdmin)
|
[
"[email protected]"
] | |
596411c05f2c94b4b357beb48a6cac370bb39083
|
82fce9aae9e855a73f4e92d750e6a8df2ef877a5
|
/Lab/venv/lib/python3.8/site-packages/OpenGL/GL/ARB/seamless_cube_map.py
|
25e63a7d3f689d0ff11cc0c81f81b889b4c44394
|
[] |
no_license
|
BartoszRudnik/GK
|
1294f7708902e867dacd7da591b9f2e741bfe9e5
|
6dc09184a3af07143b9729e42a6f62f13da50128
|
refs/heads/main
| 2023-02-20T19:02:12.408974 | 2021-01-22T10:51:14 | 2021-01-22T10:51:14 | 307,847,589 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,689 |
py
|
'''OpenGL extension ARB.seamless_cube_map
This module customises the behaviour of the
OpenGL.raw.GL.ARB.seamless_cube_map to provide a more
Python-friendly API
Overview (from the spec)
When sampling from cube map textures, a three-dimensional texture
coordinate is used to select one of the cube map faces and generate
a two dimensional texture coordinate ( s t ), at which a texel is
sampled from the determined face of the cube map texture. Each face
of the texture is treated as an independent two-dimensional texture,
and the generated ( s t ) coordinate is subjected to the same
clamping and wrapping rules as for any other two dimensional texture
fetch.
Although it is unlikely that the generated ( s t ) coordinate lies
significantly outside the determined cube map face, it is often the
case that the locations of the individual elements required during a
linear sampling do not lie within the determined face, and their
coordinates will therefore be modified by the selected clamping and
wrapping rules. This often has the effect of producing seams or
other discontinuities in the sampled texture.
This extension allows implementations to take samples from adjacent
cube map faces, providing the ability to create seamless cube maps.
The official definition of this extension is available here:
http://www.opengl.org/registry/specs/ARB/seamless_cube_map.txt
'''
from OpenGL.raw.GL.ARB.seamless_cube_map import _EXTENSION_NAME
def glInitSeamlessCubeMapARB():
'''Return boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGLExtension( _EXTENSION_NAME )
### END AUTOGENERATED SECTION
|
[
"[email protected]"
] | |
fcd0b3996dcc8bf3891d3ed563e44c660b62677b
|
3d19e1a316de4d6d96471c64332fff7acfaf1308
|
/Users/D/dmsilv/facebook_fans.py
|
3fc1f0e56bfce614a8af5c9b37936e98b95a0c94
|
[] |
no_license
|
BerilBBJ/scraperwiki-scraper-vault
|
4e98837ac3b1cc3a3edb01b8954ed00f341c8fcc
|
65ea6a943cc348a9caf3782b900b36446f7e137d
|
refs/heads/master
| 2021-12-02T23:55:58.481210 | 2013-09-30T17:02:59 | 2013-09-30T17:02:59 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,020 |
py
|
# Blank Python
import scraperwiki
from BeautifulSoup import BeautifulSoup
#define the order our columns are displayed in the datastore
scraperwiki.metadata.save('data_columns', ['Page Name', 'Fans'])
#scrape the fan section
def scrape_fans(soup):
data_table = soup.find("table",{ "class" : "uiGrid"}) #find the pages with most fans section
rows= data_table.findAll("tr") #find all the table rows
for row in rows: #loop through the rows
cells = row.findAll("td") #find all the cells
for cell in cells: #loop through the cells
#setup the data record
record={}
print cell
#table_cells=cell.findAll("p") #find all the p items
if table_cells: #if the item exists store it
record['Page Name'] = table_cells[0].text
record['Fans'] = table_cells[1].text[:-5]
scraperwiki.datastore.save(["Page Name"], record)
def scrape_page(url):
html = scraperwiki.scrape(url)
soup = BeautifulSoup(html)
#print soup.prettify()
link_table=soup.find("div", {"class" : "alphabet_list clearfix"})
#next_link=soup.findAll("a")
for link in link_table:
next_url=link['href']
#print next_url
html1 = scraperwiki.scrape(next_url)
soup1 = BeautifulSoup(html1)
scrape_fans(soup1)
#setup the base url
base_url = 'http://facebook.com/directory/pages/'
#setup the startup url
#call the scraping function
scrape_page(base_url)
# Blank Python
import scraperwiki
from BeautifulSoup import BeautifulSoup
#define the order our columns are displayed in the datastore
scraperwiki.metadata.save('data_columns', ['Page Name', 'Fans'])
#scrape the fan section
def scrape_fans(soup):
data_table = soup.find("table",{ "class" : "uiGrid"}) #find the pages with most fans section
rows= data_table.findAll("tr") #find all the table rows
for row in rows: #loop through the rows
cells = row.findAll("td") #find all the cells
for cell in cells: #loop through the cells
#setup the data record
record={}
print cell
#table_cells=cell.findAll("p") #find all the p items
if table_cells: #if the item exists store it
record['Page Name'] = table_cells[0].text
record['Fans'] = table_cells[1].text[:-5]
scraperwiki.datastore.save(["Page Name"], record)
def scrape_page(url):
html = scraperwiki.scrape(url)
soup = BeautifulSoup(html)
#print soup.prettify()
link_table=soup.find("div", {"class" : "alphabet_list clearfix"})
#next_link=soup.findAll("a")
for link in link_table:
next_url=link['href']
#print next_url
html1 = scraperwiki.scrape(next_url)
soup1 = BeautifulSoup(html1)
scrape_fans(soup1)
#setup the base url
base_url = 'http://facebook.com/directory/pages/'
#setup the startup url
#call the scraping function
scrape_page(base_url)
|
[
"[email protected]"
] | |
7b47c1b415e3ad729bdce1cdb26e32be6031bda6
|
ef66e297a49d04098d98a711ca3fda7b8a9a657c
|
/snippets/ziroom/detail.py
|
32e2f1c346b125a3b9d7882a7320a3f98a252f9a
|
[] |
no_license
|
breezy1812/MyCodes
|
34940357954dad35ddcf39aa6c9bc9e5cd1748eb
|
9e3d117d17025b3b587c5a80638cb8b3de754195
|
refs/heads/master
| 2020-07-19T13:36:05.270908 | 2018-12-15T08:54:30 | 2018-12-15T08:54:30 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 850 |
py
|
import os
from datetime import datetime
from time import sleep
from random import choice
import requests
from agents import AGENTS
url = 'http://www.ziroom.com/detail/info'
params = {
'id': '61155405',
'house_id': '60185997',
}
headers = {
'User-Agent': choice(AGENTS),
}
while True:
resp = requests.get(url, params=params, headers=headers)
now = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
if resp.status_code != 200:
print(now, 'Failed')
sleep(5)
continue
try:
data = resp.json()['data']
status = data['status']
price = data['price']
print(now, status, price)
if status != 'tzpzz':
break
except Exception:
print(data)
sleep(10)
cmd = os.system('zsh -c "while true;do;afplay /System/Library/Sounds/Ping.aiff -v 30;done"')
|
[
"[email protected]"
] | |
f588bf0d916ba7a047741568bb2946f4fd4c309d
|
de24f83a5e3768a2638ebcf13cbe717e75740168
|
/moodledata/vpl_data/53/usersdata/89/22195/submittedfiles/matriz2.py
|
cd1384dfef6761b0fbf48eaf1aa1f3eaef0a4bc4
|
[] |
no_license
|
rafaelperazzo/programacao-web
|
95643423a35c44613b0f64bed05bd34780fe2436
|
170dd5440afb9ee68a973f3de13a99aa4c735d79
|
refs/heads/master
| 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,491 |
py
|
# -*- coding: utf-8 -*-
from __future__ import division
import numpy as np
def somaDiagonalPrincipal(a):
soma=0
for i in range(0,a.shape[0],1):
soma=soma+a[i,i]
return soma
def somaDiagonalSecundaria(a):
soma=0
for i in range(0,a.shape[0],1):
soma=soma+a[i,a.shape[0]-i-1]
return soma
def somaLinhas(a):
s=[]
for i in range(0,a.shape[0],1):
soma=0
for j in range(0,a.shape[1],1):
soma=soma+a[i,j]
s.append(soma)
return s
def somaColunas(a):
r=[]
for j in range(0,a.shape[1],1):
soma=0
for i in range(0,a.shape[0],1):
soma=soma+a[i,j]
r.append(soma)
return r
def quadradoMagico(a):
sdP=somaDiagonalPrincipal(a)
sdS=somaDiagonalSecundaria(a)
somaL=somaLinhas(a)
somaC=somaColunas(a)
contador=0
for i in range(0,len(somaL),1):
if sdP==sdS==somaL[i]==somaC[i]:
contador=contador+1
if contador==len(somaL):
return True
else:
return False
#programa principal
n=input('digite o numero de linhas da matriz:')
#n=input('digite o numero de colunas da matriz:')
matriz=np.zeros((n,n))
for i in range(0,matriz.shape[0],1):
for j in range(0,matriz.shape[1],1):
matriz[i,j]=input('digite um elemento da matriz:')
if quadradoMagico(matriz):
print('S')
else:
print('N')
|
[
"[email protected]"
] | |
b0e675a66588e8634b1b1524e860f5399ed48426
|
051910d10f4597cd1148207b1f5f2030c01d7ddf
|
/py/src/consts.py
|
e63df96ef48d5d3fb483637f645611bd156ae172
|
[
"MIT"
] |
permissive
|
LaplaceKorea/rosettaboy
|
831cd285e1a305690f2ee76861ccff91d77fa4a7
|
fb238cb8b73eb7903ce8b9b298896c549e75fccb
|
refs/heads/master
| 2023-08-15T15:41:36.459790 | 2021-10-12T12:13:55 | 2021-10-12T12:13:55 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,381 |
py
|
class Mem:
VBLANK_HANDLER = 0x40
LCD_HANDLER = 0x48
TIMER_HANDLER = 0x50
SERIAL_HANDLER = 0x58
JOYPAD_HANDLER = 0x60
VRAM_BASE = 0x8000
TILE_DATA_TABLE_0 = 0x8800
TILE_DATA_TABLE_1 = 0x8000
BACKGROUND_MAP_0 = 0x9800
BACKGROUND_MAP_1 = 0x9C00
WINDOW_MAP_0 = 0x9800
WINDOW_MAP_1 = 0x9C00
OAM_BASE = 0xFE00
JOYP = 0xFF00
SB = 0xFF01 # Serial Data
SC = 0xFF02 # Serial Control
DIV = 0xFF04
TIMA = 0xFF05
TMA = 0xFF06
TAC = 0xFF07
IF = 0xFF0F
NR10 = 0xFF10
NR11 = 0xFF11
NR12 = 0xFF12
NR13 = 0xFF13
NR14 = 0xFF14
NR20 = 0xFF15
NR21 = 0xFF16
NR22 = 0xFF17
NR23 = 0xFF18
NR24 = 0xFF19
NR30 = 0xFF1A
NR31 = 0xFF1B
NR32 = 0xFF1C
NR33 = 0xFF1D
NR34 = 0xFF1E
NR40 = 0xFF1F
NR41 = 0xFF20
NR42 = 0xFF21
NR43 = 0xFF22
NR44 = 0xFF23
NR50 = 0xFF24
NR51 = 0xFF25
NR52 = 0xFF26
LCDC = 0xFF40
STAT = 0xFF41
SCY = 0xFF42 # SCROLL_Y
SCX = 0xFF43 # SCROLL_X
LY = 0xFF44 # LY aka currently drawn line 0-153 >144 = vblank
LCY = 0xFF45
DMA = 0xFF46
BGP = 0xFF47
OBP0 = 0xFF48
OBP1 = 0xFF49
WY = 0xFF4A
WX = 0xFF4B
BOOT = 0xFF50
IE = 0xFFFF
class Interrupt:
VBLANK = 1 << 0
STAT = 1 << 1
TIMER = 1 << 2
SERIAL = 1 << 3
JOYPAD = 1 << 4
|
[
"[email protected]"
] | |
a7a7d34db8b105e67c352abaceb211f4a7e084c3
|
a97f789530412fc1cb83170a11811f294b139ee8
|
/疯狂Python讲义/codes/12/12.6/writebytes_test.py
|
36e61f54a785fdf550bc07aebcbd19870b13ad75
|
[] |
no_license
|
baidongbin/python
|
3cebf2cc342a15b38bf20c23f941e6887dac187a
|
1c1398bff1f1820afdd8ddfa0c95ccebb4ee836f
|
refs/heads/master
| 2021-07-21T19:23:32.860444 | 2020-03-07T11:55:30 | 2020-03-07T11:55:30 | 195,909,272 | 0 | 1 | null | 2020-07-21T00:51:24 | 2019-07-09T01:24:31 |
Python
|
UTF-8
|
Python
| false | false | 413 |
py
|
import os
f = open('y.txt', 'wb+')
# os.linesep 代表当前操作系统上的换行符
f.write(('我爱Python' + os.linesep).encode('utf-8'))
f.writelines((('土门壁甚坚,' + os.linesep).encode('utf-8'),
('杏园度亦难。' + os.linesep).encode('utf-8'),
('势异邺城下,' + os.linesep).encode('utf-8'),
('纵死时犹宽。' + os.linesep).encode('utf-8')))
|
[
"[email protected]"
] | |
1f31c768f9d581da1b7ad412a23ad6d4e24ce3e6
|
ede5e159641ba71a1a25a50a1328c11175459cce
|
/day-3/generate.py
|
d2f956f398b09d0fff8c03fbfb6e5c34d5a4d14d
|
[
"BSD-3-Clause"
] |
permissive
|
sbu-python-summer/python-tutorial
|
1cf23c700b7b2588680ad78a06a3582dfcce873b
|
c93ac1d75188d762df7d17df7045af39dbc1bee8
|
refs/heads/master
| 2021-01-20T18:08:21.393092 | 2017-06-16T18:50:06 | 2017-06-16T18:50:06 | 90,908,339 | 1 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 153 |
py
|
# generate data for a histogram example
import numpy as np
N = 100
a = 10*np.random.randn(N)
for i in range(N):
print("{} {}".format(i, a[i]))
|
[
"[email protected]"
] | |
7848ffce0b6988c96aae1efeb96a21b0d8bb93f4
|
c762ab8c1c25ffa97229a62ff43a33543093f963
|
/venv/bin/easy_install
|
06e9fd8505708c760e9975bfea10710ea86251e7
|
[
"Apache-2.0"
] |
permissive
|
nahyunkwon/multi-ttach
|
e68948d66541e85b764216efc54a82f6fc9ac044
|
971d0d93cc39f295deb23ea71146647f6db50ebc
|
refs/heads/master
| 2023-08-09T18:25:31.658950 | 2023-07-24T17:46:04 | 2023-07-24T17:46:04 | 297,783,964 | 0 | 1 |
Apache-2.0
| 2021-04-07T07:46:24 | 2020-09-22T22:08:53 |
G-code
|
UTF-8
|
Python
| false | false | 458 |
#!/Users/kwon/PycharmProjects/digital_fabrication_studio/venv/bin/python
# EASY-INSTALL-ENTRY-SCRIPT: 'setuptools==39.1.0','console_scripts','easy_install'
__requires__ = 'setuptools==39.1.0'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('setuptools==39.1.0', 'console_scripts', 'easy_install')()
)
|
[
"[email protected]"
] | ||
ad94567b4862afe2ff1de2f46b5864cbb56a525b
|
9e988c0dfbea15cd23a3de860cb0c88c3dcdbd97
|
/sdBs/AllRun/sdssj_162711.80-000950.8/sdB_SDSSJ_162711.80-000950.8_lc.py
|
29d244111b17ca32fa317154ab63ad362a67cc25
|
[] |
no_license
|
tboudreaux/SummerSTScICode
|
73b2e5839b10c0bf733808f4316d34be91c5a3bd
|
4dd1ffbb09e0a599257d21872f9d62b5420028b0
|
refs/heads/master
| 2021-01-20T18:07:44.723496 | 2016-08-08T16:49:53 | 2016-08-08T16:49:53 | 65,221,159 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 371 |
py
|
from gPhoton.gAperture import gAperture
def main():
gAperture(band="NUV", skypos=[246.799167,0.164111], stepsz=30., csvfile="/data2/fleming/GPHOTON_OUTPU/LIGHTCURVES/sdBs/sdB_SDSSJ_162711.80-000950.8 /sdB_SDSSJ_162711.80-000950.8_lc.csv", maxgap=1000., overwrite=True, radius=0.00555556, annulus=[0.005972227,0.0103888972], verbose=3)
if __name__ == "__main__":
main()
|
[
"[email protected]"
] | |
67cc8ae0e9b2a05fb85c2ca88cb58905348d3cf1
|
141545126466a00f32247dfa40e067ec049b0fa4
|
/Programming Basics Python/Exam Problems 20042019/Easter Shop.py
|
24f0f9d5278dd658f8d045e6e6d0eab1bb4ce257
|
[] |
no_license
|
RadkaValkova/SoftUni-Web-Developer
|
83314367172a18f001e182b4e57f7ca0502ad1fc
|
61d3414373498bb6009ae70e8d17f26cd2d88ea5
|
refs/heads/main
| 2023-06-01T02:11:06.606370 | 2021-06-29T19:39:19 | 2021-06-29T19:39:19 | 325,611,606 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 629 |
py
|
start_eggs = int(input())
sold_eggs = 0
fill_eggs = 0
available_eggs = start_eggs
while True:
command = input()
if command == 'Close':
print('Store is closed!')
print(f'{sold_eggs} eggs sold.')
break
eggs_number = int(input())
if command == 'Buy':
if available_eggs < eggs_number:
print('Not enough eggs in store!')
print(f'You can buy only {available_eggs}.')
break
else:
sold_eggs += eggs_number
available_eggs -= eggs_number
else:
fill_eggs += eggs_number
available_eggs += eggs_number
|
[
"[email protected]"
] | |
cac5a5118c1caa62049a0352cf2b96d18328b009
|
068d271e241d8cdb46dbf4243166e4b8ee7025b2
|
/Django/rbac/day69/luffy_permission/rbac/middlewares/rbac.py
|
05e05715e769339255dc4cd45e4b8f209919f427
|
[] |
no_license
|
caiqinxiong/python
|
f6e226e76cb62aac970bcfbcb6c8adfc64858b60
|
9029f6c528d2cb742b600af224e803baa74cbe6a
|
refs/heads/master
| 2023-05-26T19:41:34.911885 | 2020-05-15T09:02:08 | 2020-05-15T09:02:08 | 195,261,757 | 1 | 0 | null | 2021-06-10T23:33:33 | 2019-07-04T15:01:42 |
JavaScript
|
UTF-8
|
Python
| false | false | 1,122 |
py
|
from django.utils.deprecation import MiddlewareMixin
from django.shortcuts import HttpResponse, redirect, reverse
from django.conf import settings
import re
class RbacMiddleWare(MiddlewareMixin):
def process_request(self, request):
# 获取当前访问的页面
url = request.path_info # index
# 白名单
for i in settings.WHITE_LIST:
if re.match(i, url):
return
# 获取登录状态
is_login = request.session.get('is_login')
# 没有登录跳转到登录页面
if not is_login:
return redirect(reverse('login'))
# 免认证
for i in settings.NO_PERMISSION_LIST:
if re.match(i, url):
return
# 获取当前用户的权限
permission_list = request.session['permission']
print(permission_list)
# 权限的校验
for i in permission_list:
if re.match('^{}$'.format(i['permissions__url']), url):
return
# 没匹配成功 没有权限
return HttpResponse('没有访问的权限')
|
[
"[email protected]"
] | |
2cd69de0ed6caab0657b020ebf567283aa2001c4
|
52a32a93942b7923b7c0c6ca5a4d5930bbba384b
|
/dojo/db_migrations/0051_regulation_categories.py
|
2fe6ff5d352fd9d2bde9377ad6d5aaf8ae2c4bdb
|
[
"MIT-open-group",
"GCC-exception-2.0",
"BSD-3-Clause",
"LicenseRef-scancode-free-unknown",
"LGPL-3.0-only",
"GPL-3.0-or-later",
"LicenseRef-scancode-warranty-disclaimer",
"LGPL-3.0-or-later",
"IJG",
"Zlib",
"LicenseRef-scancode-proprietary-license",
"PSF-2.0",
"LicenseRef-scancode-python-cwi",
"GPL-2.0-or-later",
"HPND",
"libtiff",
"LGPL-2.1-or-later",
"EPL-2.0",
"GPL-3.0-only",
"MIT",
"BSD-3-Clause-Modification",
"LicenseRef-scancode-public-domain-disclaimer",
"HPND-Markus-Kuhn",
"CC-BY-SA-4.0",
"LicenseRef-scancode-secret-labs-2011",
"LicenseRef-scancode-other-copyleft",
"LicenseRef-scancode-unknown-license-reference",
"LGPL-2.1-only",
"LicenseRef-scancode-openssl-exception-lgpl3.0plus",
"Libpng",
"LicenseRef-scancode-other-permissive",
"Python-2.0",
"BSD-Advertising-Acknowledgement",
"LGPL-2.0-or-later",
"GPL-1.0-or-later",
"MPL-2.0",
"MIT-Modern-Variant",
"ISC",
"GPL-2.0-only",
"LicenseRef-scancode-xfree86-1.0",
"Apache-2.0",
"LicenseRef-scancode-public-domain",
"LicenseRef-scancode-unknown",
"BSD-2-Clause"
] |
permissive
|
DefectDojo/django-DefectDojo
|
43bfb1c728451335661dadc741be732a50cd2a12
|
b98093dcb966ffe972f8719337de2209bf3989ec
|
refs/heads/master
| 2023-08-21T13:42:07.238370 | 2023-08-14T18:00:34 | 2023-08-14T18:00:34 | 31,028,375 | 2,719 | 1,666 |
BSD-3-Clause
| 2023-09-14T19:46:49 | 2015-02-19T17:53:47 |
HTML
|
UTF-8
|
Python
| false | false | 810 |
py
|
# Generated by Django 2.2.15 on 2020-08-30 21:49
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('dojo', '0050_deduplication_on_engagement'),
]
operations = [
migrations.AlterField(
model_name='regulation',
name='category',
field=models.CharField(choices=[('privacy', 'Privacy'), ('finance', 'Finance'), ('education', 'Education'), ('medical', 'Medical'), ('corporate', 'Corporate'), ('other', 'Other')], help_text='The subject of the regulation.', max_length=9),
),
migrations.AlterField(
model_name='regulation',
name='name',
field=models.CharField(help_text='The name of the regulation.', max_length=128, unique=True),
),
]
|
[
"[email protected]"
] | |
ca40498aa30d2a1e5baee83adfbb27bbb25c2bfa
|
f18125b848e37a64e35136a90cf4694e52eb9fcc
|
/teek/_timeouts.py
|
13563144be59e5b03aa5f93e5c8b5b14d4b72708
|
[
"MIT"
] |
permissive
|
carlbordum/teek
|
d19271dfe11e3e77052b1a3c215ddf6a9d50e440
|
a931b468744c8236fd4ce6f1dc3a8c4829d59db3
|
refs/heads/master
| 2020-04-16T11:41:10.909230 | 2019-01-13T19:24:16 | 2019-01-13T19:24:16 | 165,547,247 | 0 | 0 | null | 2019-01-13T19:48:26 | 2019-01-13T19:48:26 | null |
UTF-8
|
Python
| false | false | 2,489 |
py
|
import teek as tk
from teek._tcl_calls import make_thread_safe
# there's no after_info because i don't see how it would be useful in
# teek
class _Timeout:
def __init__(self, after_what, callback, args, kwargs):
if kwargs is None:
kwargs = {}
self._callback = callback
self._args = args
self._kwargs = kwargs
self._state = 'pending' # just for __repr__ and error messages
self._tcl_command = tk.create_command(self._run)
self._id = tk.tcl_call(str, 'after', after_what, self._tcl_command)
def __repr__(self):
name = getattr(self._callback, '__name__', self._callback)
return '<%s %r timeout %r>' % (self._state, name, self._id)
def _run(self):
needs_cleanup = True
# this is important, thread tests freeze without this special
# case for some reason
def quit_callback():
nonlocal needs_cleanup
needs_cleanup = False
tk.before_quit.connect(quit_callback)
try:
self._callback(*self._args, **self._kwargs)
self._state = 'successfully completed'
except Exception as e:
self._state = 'failed'
raise e
finally:
tk.before_quit.disconnect(quit_callback)
if needs_cleanup:
tk.delete_command(self._tcl_command)
@make_thread_safe
def cancel(self):
"""Prevent this timeout from running as scheduled.
:exc:`RuntimeError` is raised if the timeout has already ran or
it has been cancelled.
"""
if self._state != 'pending':
raise RuntimeError("cannot cancel a %s timeout" % self._state)
tk.tcl_call(None, 'after', 'cancel', self._id)
self._state = 'cancelled'
tk.delete_command(self._tcl_command)
@make_thread_safe
def after(ms, callback, args=(), kwargs=None):
"""Run ``callback(*args, **kwargs)`` after waiting for the given time.
The *ms* argument should be a waiting time in milliseconds, and
*kwargs* defaults to ``{}``. This returns a timeout object with a
``cancel()`` method that takes no arguments; you can use that to
cancel the timeout before it runs.
"""
return _Timeout(ms, callback, args, kwargs)
@make_thread_safe
def after_idle(callback, args=(), kwargs=None):
"""Like :func:`after`, but runs the timeout as soon as possible."""
return _Timeout('idle', callback, args, kwargs)
|
[
"[email protected]"
] | |
3c2f70f7d8cb1b239b139682b5e5bfd6579d4cb8
|
02c4c52f2f630df77bc25994aee13a7530e3136d
|
/012016/python/xmlParsing.py
|
ec2f58145a53178b29ba7dc467dc309ce4c9fc3c
|
[] |
no_license
|
abgoswam/2016
|
ac7d61828bc5fda8c003f135bc250ed57d03ae4c
|
307844ba74bf302c290a4e2b4792a096e0c3cb56
|
refs/heads/master
| 2020-04-15T14:03:49.674650 | 2016-12-09T02:19:44 | 2016-12-09T02:19:44 | 57,982,844 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,555 |
py
|
# -*- coding: utf-8 -*-
"""
Created on Sun Jan 10 07:38:36 2016
@author: agoswami
Code from : http://www.blog.pythonlibrary.org/2013/04/30/python-101-intro-to-xml-parsing-with-elementtree/
"""
import time
import xml.etree.ElementTree as xml
import xml.etree.cElementTree as ET
import xml.dom.minidom as minidom
#http://stackoverflow.com/questions/17402323/use-xml-etree-elementtree-to-write-out-nicely-formatted-xml-files
def prettify(elem):
"""Return a pretty-printed XML string for the Element.
"""
"The idea is to print your Element in a string, parse it using minidom and convert it again in XML using the toprettyxml function"
rough_string = xml.tostring(elem, 'utf-8')
reparsed = minidom.parseString(rough_string)
return reparsed.toprettyxml(indent="\t")
def createXML(filename):
root = xml.Element("zAppointments")
appt = xml.Element("appointment")
root.append(appt)
# adding appt children
begin = xml.SubElement(appt, "begin")
begin.text = "12345678"
uid = xml.SubElement(appt, "uid")
uid.text = "040000008200E000"
alarmTime = xml.SubElement(appt, "alarmTime")
alarmTime.text = "1181572063"
state = xml.SubElement(appt, "state")
location = xml.SubElement(appt, "location")
duration = xml.SubElement(appt, "duration")
duration.text = "1800"
subject = xml.SubElement(appt, "subject")
tree = xml.ElementTree(root)
with open(filename, "w") as fh:
tree.write(fh)
def editXML(filename, updatedfilename):
tree = ET.ElementTree(file=filename)
root = tree.getroot()
for begin_time in root.iter("begin"):
begin_time.text = time.ctime(int(begin_time.text))
s = prettify(root)
print s
# tree = ET.ElementTree(root)
with open(updatedfilename, "w") as f:
# tree.write(f)
f.write(s)
def parseXML(xml_file):
"""
Parse XML with ElementTree
"""
tree = ET.ElementTree(file=xml_file)
print tree.getroot()
root = tree.getroot()
print "tag=%s, attrib=%s" % (root.tag, root.attrib)
for child in root:
print child.tag, child.attrib
if child.tag == "appointment":
for step_child in child:
print step_child.tag
# iterate over the entire tree
print "-" * 40
print "Iterating using a tree iterator"
print "-" * 40
iter_ = tree.getiterator()
for elem in iter_:
print elem.tag
# get the information via the children!
print "-" * 40
print "Iterating using getchildren()"
print "-" * 40
appointments = root.getchildren()
for appointment in appointments:
appt_children = appointment.getchildren()
for appt_child in appt_children:
print "%s=%s" % (appt_child.tag, appt_child.text)
#----------------------------------------------------------------------
if __name__ == "__main__":
filename = "appt.xml"
updatedfilename = "updated.xml"
createXML(filename)
## just playing around with how to read / write text to files in python
# f = open(filename, "ab")
# f.writelines("abhishek\n")
# f.writelines("goswami\n")
# f.writelines("microsoft\n")
# f.close()
#
# with open(filename, "rb") as fh:
# s = fh.read()
# print "++ line:{0}".format(s)
# for line in fh:
# print "-- line:{0}".format(line)
editXML(filename, updatedfilename)
parseXML(updatedfilename)
|
[
"[email protected]"
] | |
17f286bd52e1fda213acc5e1347e4d32bd730c24
|
de24f83a5e3768a2638ebcf13cbe717e75740168
|
/moodledata/vpl_data/389/usersdata/346/73669/submittedfiles/poligono.py
|
2d92908686c389fe16d305bc44eb0541a9bdb9f1
|
[] |
no_license
|
rafaelperazzo/programacao-web
|
95643423a35c44613b0f64bed05bd34780fe2436
|
170dd5440afb9ee68a973f3de13a99aa4c735d79
|
refs/heads/master
| 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 156 |
py
|
# -*- coding: utf-8 -*-
#ENTRADA
n= int(input('Digite quantos lados deve ter o seu polígono: ')
#PROCESSAMENTO
nd = (n*(n-3))/2
#SAÍDA
print('%.1f' % nd)
|
[
"[email protected]"
] | |
a23a188a9a220ca25ffaec14f74dd0d311ecb76a
|
ba3be84d355e90860479fc2a0d92b536c377c643
|
/PYTHON/Advance/Python Controls/py_Frame2.py
|
d9e598e1135bffa05661b601196ac36c71774648
|
[] |
no_license
|
vipin26/python
|
c62a3427511658ff292085fc382c5b3b3ff4d609
|
e8442b84c385ddef972d6a514e471d8eba8af0a3
|
refs/heads/master
| 2020-09-29T12:39:52.775466 | 2019-12-15T05:44:31 | 2019-12-15T05:44:31 | 227,034,374 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 348 |
py
|
from Tkinter import *
top = Tk()
top.geometry("500x500")
#width="200", height="200"
f1 = Frame(top,bg="green")
f1.pack(side="left",fill=BOTH,expand=1)
f2 = Frame(top,bg="Red", width="200", height="200")
f2.pack(side="right",fill=BOTH,expand=1)
b1= Button(f1,text="Button 1");
b1.pack()
b2= Button(f2,text="Button 2");
b2.pack()
top.mainloop()
|
[
"[email protected]"
] | |
2749a89da17d91be60716423a5b52e513374404b
|
7e325da0ec25a56158f4a47acf6f594548a72384
|
/users/migrations/0005_profile_stripe_customer_id.py
|
97a12f3ccd87c340090928f9137cccccc4257501
|
[
"MIT"
] |
permissive
|
drewvpham/xclude.com
|
5102a921721c508552648ee03f5a8e1b0bafb6e8
|
103e89e2326c4c6fbfab819c43bc4e4634913bc9
|
refs/heads/master
| 2022-12-16T06:36:00.631404 | 2019-12-29T20:35:16 | 2019-12-29T20:35:16 | 222,317,889 | 0 | 0 |
MIT
| 2022-12-07T23:54:06 | 2019-11-17T21:58:47 |
Python
|
UTF-8
|
Python
| false | false | 428 |
py
|
# Generated by Django 2.2.7 on 2019-12-24 16:01
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('users', '0004_profile_one_click_purchasing'),
]
operations = [
migrations.AddField(
model_name='profile',
name='stripe_customer_id',
field=models.CharField(blank=True, max_length=50, null=True),
),
]
|
[
"[email protected]"
] | |
0e928393f712c9aa0b389fc8e611da0166635eb7
|
a3d6eb92c6fc6ed6095d10bc7b329cb8b4a8b166
|
/src/config.py
|
9f147f5f9b4dfea32202702abb0161c77899e708
|
[] |
no_license
|
hgiesel/anki_straight_reward
|
ab29e6f154beba7c5f2f9bd9579c21cdd942b218
|
3fe72255f667d2eb544afb2541a1eb974c23eede
|
refs/heads/master
| 2022-12-23T22:38:28.432113 | 2022-12-12T13:46:36 | 2022-12-12T13:46:36 | 245,190,627 | 19 | 4 | null | 2023-08-28T16:01:55 | 2020-03-05T14:53:08 |
Python
|
UTF-8
|
Python
| false | false | 2,381 |
py
|
from anki.cards import Card
from aqt import mw
from .types import StraightSetting
DEFAULT_SETTINGS = StraightSetting(2, True, 5, 5, 130, 250)
KEYWORD = "straightReward"
def serialize_setting(setting: StraightSetting) -> dict:
return {
"enableNotifications": setting.enable_notifications,
"straightLength": setting.straight_length,
"baseEase": setting.base_ease,
"stepEase": setting.step_ease,
"startEase": setting.start_ease,
"stopEase": setting.stop_ease,
}
def deserialize_setting(
straight_length: int,
enable_notifications: bool,
base_ease: int,
step_ease: int,
start_ease: int,
stop_ease: int,
) -> StraightSetting:
return StraightSetting(
straight_length,
enable_notifications,
base_ease,
step_ease,
start_ease,
stop_ease,
)
def deserialize_setting_from_dict(setting_data: dict) -> StraightSetting:
return StraightSetting(
setting_data["straightLength"]
if "straightLength" in setting_data
else DEFAULT_SETTINGS.straight_length,
setting_data["enableNotifications"]
if "enableNotifications" in setting_data
else DEFAULT_SETTINGS.enable_notifications,
setting_data["baseEase"]
if "baseEase" in setting_data
else DEFAULT_SETTINGS.base_ease,
setting_data["stepEase"]
if "stepEase" in setting_data
else DEFAULT_SETTINGS.step_ease,
setting_data["startEase"]
if "startEase" in setting_data
else DEFAULT_SETTINGS.start_ease,
setting_data["stopEase"]
if "stopEase" in setting_data
else DEFAULT_SETTINGS.stop_ease,
)
def get_setting_from_config(config) -> StraightSetting:
try:
return deserialize_setting_from_dict(config[KEYWORD])
except:
return get_default_setting()
def get_setting_from_card(card: Card) -> StraightSetting:
# confForDid did resort to conf for default deck if not available (TODO is this still true?)
config = mw.col.decks.config_dict_for_deck_id(card.odid or card.did)
return get_setting_from_config(config)
def get_default_setting() -> StraightSetting:
return DEFAULT_SETTINGS
def write_setting(config, setting: StraightSetting):
config[KEYWORD] = serialize_setting(setting)
mw.col.decks.update_config(config)
|
[
"[email protected]"
] | |
314181b6076dd4f353ab40ca9d5695c63949d5ba
|
c0a9460591dcb5a322c1c5ec8b67e1d775f8f4ba
|
/advec_1d/dg_modal_gpu.py
|
d4b79c2b6d4cba9f5cc88e1e69a5f264c41dfdf1
|
[] |
no_license
|
wbkifun/my_research
|
aca8f5132d03de2e15adc3b0ded164fbd89e38a3
|
eb7e61f5405834dcbea240665bdc819f4b3f97bf
|
refs/heads/master
| 2020-12-24T16:40:29.722161 | 2016-03-07T00:31:15 | 2016-03-07T00:31:15 | 5,176,828 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,676 |
py
|
#===============================================================================
# DG method with modal basis functions
# 1-D advection equation
# ------------------------------------------------------------------------------
#
# Last update: 2012.4.26
# ------------------------------------------------------------------------------
#
# <Description>
# - basis function: Legendre polynomial
# - boundary condition: periodic
# - initial condition: Gaussian shape
# - numerical integration: Gauss quadrature (Gauss-lubatto rules)
# - time evolution: 4th-order Runge-Kutta
# - Legendre polynomial calculations: Numpy module (numpy.polynomial.legendre)
#
# <Variables>
# - ux solution u(x) at t in physical domain
# - ul spectral components of u(x) in Legendre polynomial space
# - fl spectral components of f(u), f=vu is used
# - v fluid velociy
# - ne # of elements
# - nne # of gaussian quadrature nodes in a element
# - nn # of total nodes
# - x4n global coordinates for each nodes
# - sle slice indices in a element
# - sles list of sle s
#
# <History>
# 2012.4.26 Class inheritance by Ki-Hwan Kim
# Reduce number of kernels (4 -> 2)
# 2012.4.25 fix dx -> de by Ki-Hwan Kim
# 2012.4.24 CUDA version by Ki-Hwan Kim
# 2012.4.14 Convert to object-oriented by Ki-Hwan Kim
# 2012.4.13 Rewriten using Python by Ki-Hwan Kim
# 2012.3.27 Matlab code by Shin-Hoo Kang
#===============================================================================
from __future__ import division
from dg_modal_base import DGModalBase
import numpy as np
import pycuda.driver as cuda
class DGModalGpu(DGModalBase):
def __init__(self, ne, p_degree, cfl=0.1, v=0.5, target_gpu=0):
cuda.init()
self.dev = cuda.Device(target_gpu)
self.ctx = self.dev.make_context()
import atexit
atexit.register(self.ctx.pop)
super(DGModalGpu, self).__init__(ne, p_degree, cfl, v)
def allocation(self):
super(DGModalGpu, self).allocation()
self.ul_gpu = cuda.to_device(self.ul)
self.ul_prev_gpu = cuda.to_device(self.ul)
self.ul_tmp_gpu = cuda.to_device(self.ul)
self.kl_gpu = cuda.to_device(self.ul)
self.el_sum_gpu = cuda.to_device(np.zeros(self.ne))
def x2l(self):
super(DGModalGpu, self).x2l()
cuda.memcpy_htod(self.ul_gpu, self.ul)
def l2x(self):
cuda.memcpy_dtoh(self.ul, self.ul_gpu)
super(DGModalGpu, self).l2x()
def prepare_update(self):
from pycuda.compiler import SourceModule
import os
src_path = '/'.join( os.path.abspath(__file__).split('/')[:-1] )
kernels = open(src_path + '/core.cu').read()
mod = SourceModule(kernels)
#mod = cuda.module_from_file('core.cubin')
self.update_pre = mod.get_function('update_pre')
self.update_ul = mod.get_function('update_ul')
def update(self):
nn, ne, nne = np.int32([self.nn, self.ne, self.nne])
dt, de, vf = np.float64([self.dt, self.de, self.vf])
bs, gs = (256,1,1), (self.nn//256+1,1)
ul, ul_prev, ul_tmp = self.ul_gpu, self.ul_prev_gpu, self.ul_tmp_gpu
kl = self.kl_gpu
el_sum = self.el_sum_gpu
c_ul_tmps = np.float32([0, 0.5, 0.5, 1])
c_uls = np.float32([1./6, 1./3, 1./3, 1./6])
cuda.memcpy_dtod(ul_prev, ul, self.ul.nbytes)
for c_ul_tmp, c_ul in zip(c_ul_tmps, c_uls):
self.update_pre(nn, nne, vf, c_ul_tmp, ul, ul_prev, ul_tmp, kl, el_sum, block=bs, grid=gs)
self.update_ul(nn, ne, nne, dt, de, vf, c_ul, ul, ul_tmp, kl, el_sum, block=bs, grid=gs)
|
[
"[email protected]"
] | |
2ac42e0a73d94a3ea63cdde82603cce7f4879b66
|
b697b98db859c061c1174837deee1d6fc47d115e
|
/examples/spot/futures/futures_loan_interest_history.py
|
d11b8ced29894bbc52ba7c6b24d2585c250be296
|
[
"MIT"
] |
permissive
|
leozaragoza/binance-connector-python
|
7e684d6e68ff7d580b7e3fa83f952540a79b1120
|
3311d102c9e788e3d71047f0af103c00d1ae2162
|
refs/heads/master
| 2023-07-15T12:27:50.041388 | 2021-08-22T17:08:38 | 2021-08-22T17:08:38 | 396,354,910 | 3 | 0 |
MIT
| 2021-08-22T17:08:38 | 2021-08-15T13:12:41 |
Python
|
UTF-8
|
Python
| false | false | 268 |
py
|
#!/usr/bin/env python
import logging
from binance.spot import Spot as Client
from binance.lib.utils import config_logging
config_logging(logging, logging.DEBUG)
key = ""
secret = ""
client = Client(key, secret)
logging.info(client.futures_loan_interest_history())
|
[
"[email protected]"
] | |
92f9238ad3092c97ec4b095701f67d1c55cfd079
|
5d302c38acd02d5af4ad7c8cfe244200f8e8f877
|
/String/1408. String Matching in an Array(Easy).py
|
adf72b4a7fde98423f8ff4a18cf2a13c73a92e79
|
[] |
no_license
|
nerohuang/LeetCode
|
2d5214a2938dc06600eb1afd21686044fe5b6db0
|
f273c655f37da643a605cc5bebcda6660e702445
|
refs/heads/master
| 2023-06-05T00:08:41.312534 | 2021-06-21T01:03:40 | 2021-06-21T01:03:40 | 230,164,258 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 725 |
py
|
class Solution:
def stringMatching(self, words: List[str]) -> List[str]:
store = set();
for i in range(len(words)):
for j in range(len(words)):
if len(words[i]) >= len(words[j]) and words[i] != words[j]:
if words[i].find(words[j]) != -1:
store.add(words[j])
return list(store)
#class Solution:
# def stringMatching(self, words: List[str]) -> List[str]:
# res = []
# words.sort(key=len)
# for i, word in enumerate(words):
# for k in range(i+1, len(words)):
# if word in words[k]:
# res.append(word)
# break
#
# return res
|
[
"[email protected]"
] | |
b24094d77418e88f7477458debc1594cdeb2b6fa
|
021ce16e42120246557dfa210bef6e96a34623b6
|
/tornado_sqlalchemy_login/sqla/models.py
|
6769f29cf020d69ba5c5480e1a770a37fb2ae347
|
[
"Apache-2.0"
] |
permissive
|
timkpaine/tornado-sqlalchemy-login
|
f455b95d60d392202b46758ff230259785f7dc19
|
499bc7d79926b79352a3b9abdb864815e9896274
|
refs/heads/main
| 2022-12-01T22:28:06.620106 | 2022-11-22T01:46:21 | 2022-11-22T01:46:21 | 230,801,513 | 1 | 0 |
Apache-2.0
| 2023-09-04T13:35:46 | 2019-12-29T20:46:05 |
Python
|
UTF-8
|
Python
| false | false | 2,109 |
py
|
import secrets
from sqlalchemy import Column, Integer, String, Boolean, ForeignKey
from sqlalchemy.ext.hybrid import hybrid_property
from sqlalchemy.orm import relationship
from sqlalchemy.ext.declarative import declarative_base
TOKEN_WIDTH = 64
Base = declarative_base()
class User(Base):
__tablename__ = "users"
id = Column(Integer, primary_key=True)
username = Column(String(100), nullable=False, unique=True)
password = Column(String(100), nullable=False)
_email = Column("email", String, nullable=False, unique=True)
apikeys = relationship("APIKey", back_populates="user")
admin = Column(Boolean, default=False)
@hybrid_property
def email(self):
return self._email
@email.setter
def email(self, email):
# TODO validate
self._email = email
def __repr__(self):
return "<User(id='{}', username='{}')>".format(self.id, self.username)
def to_dict(self):
ret = {}
for item in ("id", "username", "email"):
ret[item] = getattr(self, item)
return ret
def from_dict(self, d):
raise NotImplementedError()
class APIKey(Base):
__tablename__ = "apikeys"
id = Column(Integer, primary_key=True)
user_id = Column(Integer, ForeignKey("users.id", ondelete="cascade"))
user = relationship("User", back_populates="apikeys")
key = Column(
String(100), nullable=False, default=lambda: secrets.token_urlsafe(TOKEN_WIDTH)
)
secret = Column(
String(100), nullable=False, default=lambda: secrets.token_urlsafe(TOKEN_WIDTH)
)
@staticmethod
def generateKey():
return {
"key": secrets.token_urlsafe(TOKEN_WIDTH),
"secret": secrets.token_urlsafe(TOKEN_WIDTH),
}
def __repr__(self):
return "<Key(id='{}', key='{}', secret='***')>".format(self.id, self.key)
def to_dict(self):
ret = {}
for item in ("id", "user_id", "key", "secret"):
ret[item] = getattr(self, item)
return ret
def from_dict(self, d):
raise NotImplementedError()
|
[
"[email protected]"
] | |
3e2af68956ab395d7d6c3ee1a4238c837c4b51cc
|
b2472967910be9c12576f0f97d33bca0576a8667
|
/atcoder-old/2019/0901_abc139/d.py
|
0f4f619ee2e61c90c3c79b0e44b9d3a7b51c02c3
|
[] |
no_license
|
ykmc/contest
|
85c3d1231e553d37d1235e1b0fd2c6c23f06c1e4
|
69a73da70f7f987eb3e85da503ea6da0744544bd
|
refs/heads/master
| 2020-09-01T22:56:10.444803 | 2020-07-14T11:36:43 | 2020-07-14T11:36:43 | 217,307,953 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 417 |
py
|
# Python3 (3.4.3)
import sys
input = sys.stdin.readline
# -------------------------------------------------------------
# function
# -------------------------------------------------------------
# -------------------------------------------------------------
# main
# -------------------------------------------------------------
N = int(input())
# i % i+1 = i が最善, あまりは 1 〜 N-1
print(N*(N-1)//2)
|
[
"[email protected]"
] | |
368d3a8e796916e2e6cadcf0e634c978a7ef2699
|
ae7ba9c83692cfcb39e95483d84610715930fe9e
|
/martinkersner/train-CRF-RNN/crfasrnn.py
|
472a86f853e687aa057de49dcc8aa8957733f63d
|
[] |
no_license
|
xenron/sandbox-github-clone
|
364721769ea0784fb82827b07196eaa32190126b
|
5eccdd8631f8bad78eb88bb89144972dbabc109c
|
refs/heads/master
| 2022-05-01T21:18:43.101664 | 2016-09-12T12:38:32 | 2016-09-12T12:38:32 | 65,951,766 | 5 | 7 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,462 |
py
|
#!/usr/bin/env python
# Martin Kersner, [email protected]
# 2016/03/03
from __future__ import print_function
caffe_root = '../caffe-crfrnn/'
import sys
sys.path.insert(0, caffe_root + 'python')
import os
import cPickle
import logging
import numpy as np
import pandas as pd
from PIL import Image as PILImage
import cStringIO as StringIO
import caffe
import matplotlib.pyplot as plt
from utils import palette_demo
# TODO concatenate input and output image
def main():
iteration, image_paths = process_arguments(sys.argv)
if iteration:
prototxt = 'TVG_CRFRNN_COCO_VOC_TEST_3_CLASSES.prototxt'
model = 'models/train_iter_{}.caffemodel'.format(iteration)
else:
prototxt = 'TVG_CRFRNN_COCO_VOC.prototxt'
model = 'TVG_CRFRNN_COCO_VOC.caffemodel'
if not exist_model(model, prototxt):
help()
# default images (part of http://www.cs.berkeley.edu/~bharath2/codes/SBD/download.html)
if not image_paths:
image_paths.append('images/2007_005844.png') # chair
image_paths.append('images/2008_007811.png') # bottle
image_paths.append('images/2007_002094.png') # bird
palette = palette_demo()
net = caffe.Segmenter(prototxt, model, True)
for path in image_paths:
image, cur_h, cur_w = preprocess_image(path)
if image == None:
print(path + ' does not exist! Skipping.' , file=sys.stderr)
continue
print('Processing ' + path + '...', end='')
segmentation = net.predict([image])
segm_post = postprocess_label(segmentation, cur_h, cur_w, palette)
plt.imshow(segm_post)
plt.savefig(create_label_name(path))
print('finished.')
def preprocess_image(image_path):
if not os.path.exists(image_path):
return None, 0, 0
input_image = 255 * caffe.io.load_image(image_path)
image = PILImage.fromarray(np.uint8(input_image))
image = np.array(image)
mean_vec = np.array([103.939, 116.779, 123.68], dtype=np.float32)
reshaped_mean_vec = mean_vec.reshape(1, 1, 3);
im = image[:,:,::-1]
im = im - reshaped_mean_vec
# Pad as necessary
cur_h, cur_w, cur_c = im.shape
pad_h = 500 - cur_h
pad_w = 500 - cur_w
im = np.pad(im, pad_width=((0, pad_h), (0, pad_w), (0, 0)), mode = 'constant', constant_values = 0)
return im, cur_h, cur_w
def postprocess_label(segmentation, cur_h, cur_w, palette):
segmentation2 = segmentation[0:cur_h, 0:cur_w]
output_im = PILImage.fromarray(segmentation2)
output_im.putpalette(palette)
return output_im
def create_label_name(orig_path):
return 'label_' + os.path.splitext(os.path.basename(orig_path))[0] + '.png'
def exist_model(model, prototxt):
if not os.path.exists(model):
print('Model ' + model + ' does not exist! Exiting.', file=sys.stderr)
return False
elif not os.path.exists(prototxt):
print('Prototxt' + prototxt + ' does not exist! Exiting.', file=sys.stderr)
return False
return True
def process_arguments(argv):
num_args = len(argv)
iteration = None
image_paths = []
if num_args == 2:
iteration = argv[1]
elif num_args > 2:
iteration = argv[1]
for name in argv[2:]:
image_paths.append(name)
return iteration, image_paths
def help():
print('Usage: python crfasrnn.py [ITERATION_NUM [IMAGE, IMAGE, ...]\n'
'ITERATION_NUM denotes iteration number of model which shall be run.\n'
'IMAGE one or more images can be passed as arguments.'
, file=sys.stderr)
exit()
if __name__ == '__main__':
main()
|
[
"[email protected]"
] | |
f0ee9e58c7029957af4366c984e9861db7dfc8e2
|
6ffed3351dd608ce311a7c04d813e4cbcc283d6f
|
/djongo_project/files/api/serializers.py
|
e0c2ca3d77901c61c0d12799e99a053ec5653fad
|
[] |
no_license
|
navill/ai_2-1
|
bc2311dd5e8839c6bd4112965b7348252408785a
|
7dd782b93286b7a407c433aa55a3b8f3f626fe89
|
refs/heads/master
| 2023-01-06T19:09:26.051130 | 2020-10-07T02:08:15 | 2020-10-07T02:08:15 | 290,131,119 | 0 | 0 | null | 2020-10-08T14:45:37 | 2020-08-25T06:20:46 |
Python
|
UTF-8
|
Python
| false | false | 1,396 |
py
|
from typing import *
from rest_framework import serializers
from rest_framework.reverse import reverse
from accounts.models import CommonUser
from utilities.file_utils import EncryptHandler
from files.models import CommonFile
class FileManageSerializer(serializers.ModelSerializer):
user = serializers.PrimaryKeyRelatedField(queryset=CommonUser.objects.all(), required=False)
patient_name = serializers.CharField(required=True)
file = serializers.FileField(use_url=False)
created_at = serializers.DateTimeField(read_only=True)
class Meta:
model = CommonFile
fields = ['user', 'patient_name', 'file', 'created_at']
read_only_fields = ['user']
def to_representation(self, instance: CommonFile) -> Dict:
ret = super().to_representation(instance)
encrypted_path = self._create_encrypted_path(str(instance.id))
encrypted_pull_url = reverse('files:download', args=[encrypted_path], request=self.context['request'])
ret['url'] = encrypted_pull_url
return ret
def create(self, validated_data: dict) -> CommonFile:
try:
file_obj = CommonFile.objects.create(**validated_data)
except Exception:
raise
return file_obj
def _create_encrypted_path(self, instance_id: str) -> str:
handler = EncryptHandler(instance_id)
return handler.encrypt()
|
[
"[email protected]"
] | |
59f182eac7ff61fa54275583dd65186678b519c5
|
ef34e68712fb4aa9a1320c4e1e370a24de34fcb4
|
/nlu/utils/environment/authentication.py
|
31b46bde24d6ece7e8670f0fe028b52bcba4a11a
|
[
"Apache-2.0"
] |
permissive
|
milyiyo/nlu
|
dd656e77eedf2c831482edfd4ec59490b25d3954
|
d209ed11c6a84639c268f08435552248391c5573
|
refs/heads/master
| 2023-08-16T00:03:10.326392 | 2021-10-16T03:05:49 | 2021-10-16T03:05:49 | 414,223,627 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,823 |
py
|
from nlu.utils.environment.env_utils import *
def install_and_import_healthcare(JSL_SECRET):
""" Install Spark-NLP-Healthcare PyPI Package in current enviroment if it cannot be imported and liscense provided"""
import importlib
try:
importlib.import_module('sparknlp_jsl')
except ImportError:
import pip
print("Spark NLP Healthcare could not be imported. Installing latest spark-nlp-jsl PyPI package via pip...")
hc_version = JSL_SECRET.split('-')[0]
import pyspark
pip_major_version = int(pip.__version__.split('.')[0])
if pip_major_version in [10, 18, 19, 20]:
# for these versions pip module does not support installing, we install via OS command.
os.system(
f'pip install spark-nlp-jsl=={hc_version} --extra-index-url https://pypi.johnsnowlabs.com/{JSL_SECRET}')
else:
pip.main(['install', f'spark-nlp-jsl=={hc_version}', '--extra-index-url',
f'https://pypi.johnsnowlabs.com/{JSL_SECRET}'])
finally:
import site
from importlib import reload
reload(site)
globals()['sparknlp_jsl'] = importlib.import_module('sparknlp_jsl')
def authenticate_enviroment(SPARK_NLP_LICENSE, AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY):
"""Set Secret environ variables for Spark Context"""
os.environ['SPARK_NLP_LICENSE'] = SPARK_NLP_LICENSE
os.environ['AWS_ACCESS_KEY_ID'] = AWS_ACCESS_KEY_ID
os.environ['AWS_SECRET_ACCESS_KEY'] = AWS_SECRET_ACCESS_KEY
def get_authenticated_spark(SPARK_NLP_LICENSE, AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY, JSL_SECRET, gpu=False, ):
"""
Authenticates enviroment if not already done so and returns Spark Context with Healthcare Jar loaded
0. If no Spark-NLP-Healthcare, install it via PyPi
1. If not auth, run authenticate_enviroment()
"""
import sparknlp
authenticate_enviroment(SPARK_NLP_LICENSE, AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY)
install_and_import_healthcare(JSL_SECRET)
import sparknlp_jsl
if is_env_pyspark_2_3(): return sparknlp_jsl.start(JSL_SECRET, spark23=True, gpu=gpu)
if is_env_pyspark_2_4(): return sparknlp_jsl.start(JSL_SECRET, spark24=True, gpu=gpu)
if is_env_pyspark_3_0() or is_env_pyspark_3_1():
return sparknlp_jsl.start(JSL_SECRET, gpu=gpu, public=sparknlp.version())
print(f"Current Spark version {get_pyspark_version()} not supported!")
raise ValueError
def is_authorized_enviroment():
"""Check if auth secrets are set in enviroment"""
SPARK_NLP_LICENSE = os.getenv('SPARK_NLP_LICENSE')
AWS_ACCESS_KEY_ID = os.getenv('AWS_ACCESS_KEY_ID')
AWS_SECRET_ACCESS_KEY = os.getenv('AWS_SECRET_ACCESS_KEY')
return None not in [SPARK_NLP_LICENSE, AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY]
|
[
"[email protected]"
] | |
71ca8542c4f7c61d9328341ec14b583e4d30c82d
|
e4920c4fe4290bde524e0c141189f80fddfe44b7
|
/info/migrations/0023_remove_message_recaptcha.py
|
85b4c72c01e9b2dc98df1f027f92d51e1b879720
|
[] |
no_license
|
ShahadatShuvo/Django_portfolio
|
42af2b0aa686bff08730cdb105f95d6b63adb620
|
795ed7cbb6444245af08582ea63f57a0f32679a0
|
refs/heads/master
| 2023-05-30T01:43:50.409584 | 2021-06-14T08:29:18 | 2021-06-14T08:29:18 | 376,372,632 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 330 |
py
|
# Generated by Django 2.2.16 on 2020-12-22 17:04
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('info', '0022_auto_20201222_1655'),
]
operations = [
migrations.RemoveField(
model_name='message',
name='recaptcha',
),
]
|
[
"[email protected]"
] | |
e8720a42e2d433fa822311add8bf6a44faced378
|
bda32ee120fd07499fad1e5e973249ac15861200
|
/ValidSudoku.py
|
5996590c78e0207e3f98330665a34507963f42cf
|
[] |
no_license
|
congyingTech/leetcode
|
5f76d11a283115e46fdf4f295cf0279f53e692a1
|
35ff5db1ee6abcb3cf1144a9bf5420758e31e6ec
|
refs/heads/master
| 2021-01-21T04:41:20.195451 | 2016-06-16T07:03:09 | 2016-06-16T07:03:09 | 54,643,108 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,772 |
py
|
#-*- coding:utf-8 -*-
'''
Created on Mar 30, 2016
@author: congyingw
'''
#Sudoku的要求有三条:每行的数只出现一次,每列的数只出现一次,每个九宫格数只出现一次。
#所以我们要验证这三个条件逐个遍历一遍。
class Solution:
def isValidSudoku(self, board):
for i in range(0, 9):
#第i行(固定的行)j列 or 第j行i列(固定的列)
if not self.isValidList([board[i][j] for j in range(0,9)]) or not self.isValidList([board[j][i] for j in range(0, 9)]):
return False
#检查第三条:九宫格里面是否出现重复,
for i in range(0, 3):
for j in range(0, 3):
if not self.isValidList([board[m][n] for m in range (3 * i, 3 * i + 3)for n in range(3 * j, 3*j + 3)]):
return False
return True
#判断是否是有效的list,去掉. 之后,set可以过滤相同的元素,过滤之后,看len是否相等
def isValidList(self, xs):
xs = list(filter(lambda x: x != '.', xs))
return len(set(xs)) == len(xs)
if __name__ == "__main__":
board = [[5, '.', '.', '.', '.', '.', '.', '.', '.'],
[5, 2, '.', '.', '.', '.', '.', '.', '.'],
['.', '.', 3, '.', '.', '.', '.', '.', '.'],
['.', '.', '.', 4, '.', '.', '.', '.', '.'],
['.', '.', '.', '.', 5, '.', '.', '.', '.'],
['.', '.', '.', '.', '.', 6, '.', '.', '.'],
['.', '.', '.', '.', '.', '.', 7, '.', '.'],
['.', '.', '.', '.', '.', '.', '.', 8, '.'],
['.', '.', '.', '.', '.', '.', '.', '.', 9]]
print(Solution().isValidSudoku(board))
|
[
"[email protected]"
] | |
94bb8b2a0fb2fd8136b0743980291df09b163012
|
850001831b1fcdd4d27e328b356fc34909ca2917
|
/tests/test_map.py
|
77237924f7d6561097ffea685bfbe60e67c465bc
|
[
"BSD-3-Clause"
] |
permissive
|
yidiq7/pathos
|
b337353ccfe447866c46a4a784a7908c2f3fe31e
|
7e4fef911dc0283e245189df4683eea65bfd90f0
|
refs/heads/master
| 2022-08-24T08:43:34.009115 | 2020-05-27T12:18:21 | 2020-05-27T12:18:21 | 267,310,390 | 0 | 0 |
NOASSERTION
| 2020-05-27T12:14:50 | 2020-05-27T12:14:47 | null |
UTF-8
|
Python
| false | false | 2,170 |
py
|
#!/usr/bin/env python
#
# Author: Mike McKerns (mmckerns @caltech and @uqfoundation)
# Copyright (c) 1997-2016 California Institute of Technology.
# Copyright (c) 2016-2020 The Uncertainty Quantification Foundation.
# License: 3-clause BSD. The full license text is available at:
# - https://github.com/uqfoundation/pathos/blob/master/LICENSE
import time
verbose = False
delay = 0.01
items = 100
def busy_add(x,y, delay=0.01):
import time
for n in range(x):
x += n
for n in range(y):
y -= n
time.sleep(delay)
return x + y
def timed_pool(pool, items=100, delay=0.1, verbose=False):
_x = range(-items//2,items//2,2)
_y = range(len(_x))
_d = [delay]*len(_x)
if verbose: print(pool)
start = time.time()
res = pool.map(busy_add, _x, _y, _d)
_t = time.time() - start
if verbose: print("time to queue: %s" % _t)
start = time.time()
_sol_ = list(res)
t_ = time.time() - start
if verbose: print("time to results: %s\n" % t_)
return _sol_
class BuiltinPool(object):
def map(self, *args):
return list(map(*args))
std = timed_pool(BuiltinPool(), items, delay=0, verbose=False)
def test_serial():
from pathos.pools import SerialPool as PS
pool = PS()
res = timed_pool(pool, items, delay, verbose)
assert res == std
def test_pp():
from pathos.pools import ParallelPool as PPP
pool = PPP(servers=('localhost:5653','localhost:2414'))
res = timed_pool(pool, items, delay, verbose)
assert res == std
def test_processing():
from pathos.pools import ProcessPool as MPP
pool = MPP()
res = timed_pool(pool, items, delay, verbose)
assert res == std
def test_threading():
from pathos.pools import ThreadPool as MTP
pool = MTP()
res = timed_pool(pool, items, delay, verbose)
assert res == std
if __name__ == '__main__':
if verbose:
print("CONFIG: delay = %s" % delay)
print("CONFIG: items = %s" % items)
print("")
from pathos.helpers import freeze_support, shutdown
freeze_support()
test_serial()
test_pp()
test_processing()
test_threading()
shutdown()
|
[
"mmckerns@8bfda07e-5b16-0410-ab1d-fd04ec2748df"
] |
mmckerns@8bfda07e-5b16-0410-ab1d-fd04ec2748df
|
e93d618ef2a5f5ad993261c09a6a1b7b73293570
|
0fa1d839550f4bfb1d9d0860915770071422f2cd
|
/parrot.py
|
51dd4bbc1ed740272deb7e105a164b4e9cb6f887
|
[] |
no_license
|
crystalDf/Python-Crash-Course-2nd-Edition-Chapter-07-Input
|
b996d5b5bfbf20be039ac2e2314e51d6a51545a1
|
a8838fe405e4ce70e827a6ace98f3502b3a57f45
|
refs/heads/master
| 2023-06-10T08:12:07.675473 | 2021-06-20T14:45:01 | 2021-06-20T14:45:01 | 378,668,714 | 1 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 460 |
py
|
message = input("Tell me something, and I will repeat it back to you: ")
print(message)
prompt = "\nTell me something, and I will repeat it back to you:"
prompt += "\nEnter 'quit' to end the program. "
message = ""
while message != 'quit':
message = input(prompt)
if message != 'quit':
print(message)
active = True
while active:
message = input(prompt)
if message == 'quit':
active = False
else:
print(message)
|
[
"[email protected]"
] | |
e3cb34e969e398b08d9c43935908f7b26d4014f0
|
000a4b227d970cdc6c8db192f4437698cb782721
|
/python/helpers/typeshed/stubs/stripe/stripe/api_resources/charge.pyi
|
2e3467e67bc5e6b90113fd9988cf281a375bde2c
|
[
"Apache-2.0",
"MIT"
] |
permissive
|
trinhanhngoc/intellij-community
|
2eb2f66a2a3a9456e7a0c5e7be1eaba03c38815d
|
1d4a962cfda308a73e0a7ef75186aaa4b15d1e17
|
refs/heads/master
| 2022-11-03T21:50:47.859675 | 2022-10-19T16:39:57 | 2022-10-19T23:25:35 | 205,765,945 | 1 | 0 |
Apache-2.0
| 2019-09-02T02:55:15 | 2019-09-02T02:55:15 | null |
UTF-8
|
Python
| false | false | 851 |
pyi
|
from typing import Any
from stripe import api_requestor as api_requestor
from stripe.api_resources.abstract import (
CreateableAPIResource as CreateableAPIResource,
ListableAPIResource as ListableAPIResource,
UpdateableAPIResource as UpdateableAPIResource,
custom_method as custom_method,
)
class Charge(CreateableAPIResource, ListableAPIResource, UpdateableAPIResource):
OBJECT_NAME: str
def capture(self, idempotency_key: Any | None = ..., **params): ...
def refund(self, idempotency_key: Any | None = ..., **params): ...
def update_dispute(self, idempotency_key: Any | None = ..., **params): ...
def close_dispute(self, idempotency_key: Any | None = ..., **params): ...
def mark_as_fraudulent(self, idempotency_key: Any | None = ...): ...
def mark_as_safe(self, idempotency_key: Any | None = ...): ...
|
[
"[email protected]"
] | |
37ed82c45df03e22c5d1a9edd666017218ee89f1
|
c9500ad778b8521aaa85cb7fe3239989efaa4799
|
/plugins/zscaler/icon_zscaler/util/helpers.py
|
3a839a924b1fefd1411e0082e08af7540ce22557
|
[
"MIT"
] |
permissive
|
rapid7/insightconnect-plugins
|
5a6465e720f114d71b1a82fe14e42e94db104a0b
|
718d15ca36c57231bb89df0aebc53d0210db400c
|
refs/heads/master
| 2023-09-01T09:21:27.143980 | 2023-08-31T10:25:36 | 2023-08-31T10:25:36 | 190,435,635 | 61 | 60 |
MIT
| 2023-09-14T08:47:37 | 2019-06-05T17:05:12 |
Python
|
UTF-8
|
Python
| false | false | 4,440 |
py
|
from insightconnect_plugin_runtime.exceptions import PluginException
from re import sub, match, split
from typing import Union, Any, Dict
from icon_zscaler.util.constants import Assistance, Cause
CAMEL_CASE_REGEX = r"\b[a-z0-9]+([A-Z][a-z]+[0-9]*)*\b"
PASCAL_CASE_REGEX = r"\b[A-Z][a-z]+[0-9]*([A-Z][a-z]+[0-9]*)*\b"
CAMEL_CASE_ACRONYM_REGEX = r"\b[a-z0-9]+([A-Z]+[0-9]*)*\b"
def clean_dict(dictionary: Dict[str, Any]) -> Dict[str, Any]:
cleaned_dict = dictionary.copy()
for key, value in dictionary.items():
if isinstance(value, dict):
cleaned_dict[key] = clean_dict(value)
if cleaned_dict[key] == {}:
del cleaned_dict[key]
elif value in [None, "", 0, [], {}]:
del cleaned_dict[key]
return cleaned_dict
def remove_password_from_result(dictionary: dict) -> dict:
return {key: value for key, value in dictionary.copy().items() if key != "password"}
def prepare_department(department_api_result: list, given_department_name: str) -> dict:
for department in department_api_result:
if department.get("name") == given_department_name:
return department
raise PluginException(
cause=Cause.DEPARTMENT_NOT_FOUND,
assistance=Assistance.VERIFY_INPUT,
)
def prepare_groups(groups_api_result: list, given_groups_names: list) -> list:
result_list = []
available_names = [item.get("name") for item in groups_api_result]
for name in given_groups_names:
if name not in available_names:
raise PluginException(
cause=Cause.GROUP_NOT_FOUND,
assistance=Assistance.VERIFY_INPUT,
)
for group in groups_api_result:
for name in given_groups_names:
if name == group.get("name"):
result_list.append(group)
return result_list
def to_camel_case(provided_string: str) -> str:
if match(CAMEL_CASE_REGEX, provided_string):
return provided_string
if match(PASCAL_CASE_REGEX, provided_string):
return provided_string[0].lower() + provided_string[1:]
if match(CAMEL_CASE_ACRONYM_REGEX, provided_string):
words = split(r"(?<=[a-z0-9])(?=[A-Z])|(?<=[A-Z0-9])(?=[a-z])", provided_string)
result = "".join([w.title() for w in words])
return result[0].lower() + result[1:]
init, *temp = provided_string.split("_")
result = "".join([init.lower(), *map(str.title, temp)])
return result
def convert_dict_keys_to_camel_case(to_modify: Union[dict, list]) -> Union[dict, list]:
if isinstance(to_modify, list):
return [convert_dict_keys_to_camel_case(element) for element in to_modify]
elif isinstance(to_modify, dict):
output_dict = {}
for key, value in to_modify.items():
output_dict[to_camel_case(key)] = convert_dict_keys_to_camel_case(value)
return output_dict
else:
return to_modify
def filter_dict_keys(dict_to_modify: dict, keys_to_keep: list) -> dict:
if not isinstance(dict_to_modify, dict):
return dict_to_modify
return {key: dict_to_modify.get(key) for key in keys_to_keep if key in dict_to_modify}
def find_custom_url_category_by_name(url_category_name: str, url_categories_list: list) -> dict:
if not url_categories_list or not url_category_name:
raise PluginException(
cause=Cause.CATEGORY_NOT_FOUND,
assistance=Assistance.VERIFY_INPUT,
)
url_category = list(
filter(lambda category: category.get("configuredName") == url_category_name, url_categories_list)
)
if url_category and url_category[0].get("id"):
return url_category[0]
else:
raise PluginException(
cause=Cause.CATEGORY_NOT_FOUND,
assistance=Assistance.VERIFY_INPUT,
)
def find_url_category_by_id(url_category_id: str, url_categories_list: str) -> dict:
if not url_categories_list or not url_category_id:
raise PluginException(
cause=Cause.CATEGORY_NOT_FOUND,
assistance=Assistance.VERIFY_INPUT,
)
url_category = list(filter(lambda category: category.get("id") == url_category_id, url_categories_list))
if url_category and url_category[0].get("id"):
return url_category[0]
else:
raise PluginException(
cause=Cause.CATEGORY_NOT_FOUND,
assistance=Assistance.VERIFY_INPUT,
)
|
[
"[email protected]"
] | |
0292b72004bd85deca84805fc86f18693d557717
|
6fa7f99d3d3d9b177ef01ebf9a9da4982813b7d4
|
/JFLADuABfkeoz8mqN_5.py
|
418dc4c327c7b6b0b704d40cb3c93aa5c599e590
|
[] |
no_license
|
daniel-reich/ubiquitous-fiesta
|
26e80f0082f8589e51d359ce7953117a3da7d38c
|
9af2700dbe59284f5697e612491499841a6c126f
|
refs/heads/master
| 2023-04-05T06:40:37.328213 | 2021-04-06T20:17:44 | 2021-04-06T20:17:44 | 355,318,759 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 386 |
py
|
class Person:
def __init__(self, name, age):
self.name = name
self.age = age
def compare_age(self, other):
modifier = ''
if self.age < other.age:
modifier = 'older than'
elif self.age == other.age:
modifier = 'the same age as'
else:
modifier = 'younger than'
return '{n} is {m} me.'.format(n = other.name, m = modifier)
|
[
"[email protected]"
] | |
ca93607b79207cc78004efd8c339bc2ab7e9d567
|
6b2a8dd202fdce77c971c412717e305e1caaac51
|
/solutions_5670465267826688_0/Python/yagao0o/dijkstra.py
|
96ca68f0bdc39cf5943ae362656074c360209c95
|
[] |
no_license
|
alexandraback/datacollection
|
0bc67a9ace00abbc843f4912562f3a064992e0e9
|
076a7bc7693f3abf07bfdbdac838cb4ef65ccfcf
|
refs/heads/master
| 2021-01-24T18:27:24.417992 | 2017-05-23T09:23:38 | 2017-05-23T09:23:38 | 84,313,442 | 2 | 4 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,207 |
py
|
# author: yagoa0o
# date : 2015/04/11
class Solution():
multi_square = {'1': {'1': '1', 'i': 'i', 'j': 'j', 'k': 'k'},
'i': {'1': 'i', 'i': '-1', 'j': 'k', 'k': '-j'},
'j': {'1': 'j', 'i': '-k', 'j': '-1', 'k': 'i'},
'k': {'1': 'k', 'i': 'j', 'j': '-i', 'k': '-1'}}
def get_result(self, input_file_name, output_file_name):
infile = open(input_file_name)
outfile = open(output_file_name, "w+")
total = int(infile.readline())
# main procedure
for i in range(total):
input_parms = infile.readline().split()
l = int(input_parms[0])
x = int(input_parms[1])
characters = infile.readline()
result = 'Yes'
if x % 4 != 0 and l > 1:
#count characters
cal_result = '1'
cal_string = characters[:l] * (x % 4 + 8) if x > 12 else characters[:l] * x
got_i = False
got_j = False
for char in cal_string:
cal_result = self.multiply(cal_result, char)
if (not got_i) and cal_result == 'i':
got_i = True
if (not got_j) and got_i and cal_result == 'k':
got_j = True
if cal_result == '-1' and got_i and got_j:
result = 'YES'
else:
result = 'NO'
else:
result = 'NO'
outfile.write('Case #' + str(i + 1) + ': ' + result + '\n')
infile.close()
outfile.close()
return False
def multiply(self, a, b):
is_negative = False
is_negative = is_negative != (a[0] == '-')
is_negative = is_negative != (b[0] == '-')
result = self.multi_square[a[-1]][b[-1]]
is_negative = is_negative != (result[0] == '-')
if not is_negative:
return result[-1]
else:
return '-' + result[-1]
solu = Solution()
file_name = 'C-small-attempt2'
solu.get_result(file_name + '.in', file_name + '.out')
|
[
"[email protected]"
] | |
9b3c43e2159eccae5ed0ba80f67a626831e04c62
|
722d8f2aa69095dbdbe32ecdeebb4bcf20e2ea3c
|
/tests/test_issue10_17.py
|
e54ddd56630de1e5e811d12e7ec3f3da0d804333
|
[] |
permissive
|
clach04/pyqrcodeNG
|
1bad85bea9c2a5ca9bcda8849e35af51f8e394f3
|
8033a915eca1946537002b8b271ea2cddb4e004b
|
refs/heads/master
| 2022-04-23T00:18:36.478138 | 2020-01-21T22:28:44 | 2020-01-21T22:28:44 | 259,402,644 | 0 | 0 |
BSD-3-Clause
| 2020-04-27T17:24:36 | 2020-04-27T17:24:35 | null |
UTF-8
|
Python
| false | false | 424 |
py
|
# -*- coding: utf-8 -*-
"""\
Tests against <https://github.com/mnooner256/pyqrcode/issues/17> and
<https://github.com/heuer/pyqrcode/issues/10>
Unicode issues.
"""
from __future__ import unicode_literals
import pyqrcodeng as pyqrcode
def test_issue_10_17():
qr = pyqrcode.create('John’s Pizza')
assert qr
assert 'binary' == qr.mode
if __name__ == '__main__':
import pytest
pytest.main([__file__])
|
[
"[email protected]"
] | |
9148b220bb576626f27c0f2cfb3cb25ebbcd7139
|
ad13583673551857615498b9605d9dcab63bb2c3
|
/output/models/ms_data/datatypes/facets/byte/byte_max_inclusive001_xsd/__init__.py
|
0444a6966ba5abae09b921e38485930982799bb6
|
[
"MIT"
] |
permissive
|
tefra/xsdata-w3c-tests
|
397180205a735b06170aa188f1f39451d2089815
|
081d0908382a0e0b29c8ee9caca6f1c0e36dd6db
|
refs/heads/main
| 2023-08-03T04:25:37.841917 | 2023-07-29T17:10:13 | 2023-07-30T12:11:13 | 239,622,251 | 2 | 0 |
MIT
| 2023-07-25T14:19:04 | 2020-02-10T21:59:47 |
Python
|
UTF-8
|
Python
| false | false | 173 |
py
|
from output.models.ms_data.datatypes.facets.byte.byte_max_inclusive001_xsd.byte_max_inclusive001 import (
FooType,
Test,
)
__all__ = [
"FooType",
"Test",
]
|
[
"[email protected]"
] | |
274befd704407b98b615b5efb59b5c392ba2d396
|
82b946da326148a3c1c1f687f96c0da165bb2c15
|
/sdk/python/pulumi_azure_native/sql/outbound_firewall_rule.py
|
5a8309fc8ccaf95892a167a53395faad58b8bd6a
|
[
"Apache-2.0",
"BSD-3-Clause"
] |
permissive
|
morrell/pulumi-azure-native
|
3916e978382366607f3df0a669f24cb16293ff5e
|
cd3ba4b9cb08c5e1df7674c1c71695b80e443f08
|
refs/heads/master
| 2023-06-20T19:37:05.414924 | 2021-07-19T20:57:53 | 2021-07-19T20:57:53 | 387,815,163 | 0 | 0 |
Apache-2.0
| 2021-07-20T14:18:29 | 2021-07-20T14:18:28 | null |
UTF-8
|
Python
| false | false | 7,974 |
py
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = ['OutboundFirewallRuleArgs', 'OutboundFirewallRule']
@pulumi.input_type
class OutboundFirewallRuleArgs:
def __init__(__self__, *,
resource_group_name: pulumi.Input[str],
server_name: pulumi.Input[str],
outbound_rule_fqdn: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a OutboundFirewallRule resource.
:param pulumi.Input[str] resource_group_name: The name of the resource group that contains the resource. You can obtain this value from the Azure Resource Manager API or the portal.
:param pulumi.Input[str] server_name: The name of the server.
"""
pulumi.set(__self__, "resource_group_name", resource_group_name)
pulumi.set(__self__, "server_name", server_name)
if outbound_rule_fqdn is not None:
pulumi.set(__self__, "outbound_rule_fqdn", outbound_rule_fqdn)
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> pulumi.Input[str]:
"""
The name of the resource group that contains the resource. You can obtain this value from the Azure Resource Manager API or the portal.
"""
return pulumi.get(self, "resource_group_name")
@resource_group_name.setter
def resource_group_name(self, value: pulumi.Input[str]):
pulumi.set(self, "resource_group_name", value)
@property
@pulumi.getter(name="serverName")
def server_name(self) -> pulumi.Input[str]:
"""
The name of the server.
"""
return pulumi.get(self, "server_name")
@server_name.setter
def server_name(self, value: pulumi.Input[str]):
pulumi.set(self, "server_name", value)
@property
@pulumi.getter(name="outboundRuleFqdn")
def outbound_rule_fqdn(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "outbound_rule_fqdn")
@outbound_rule_fqdn.setter
def outbound_rule_fqdn(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "outbound_rule_fqdn", value)
class OutboundFirewallRule(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
outbound_rule_fqdn: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
server_name: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
An Azure SQL DB Server Outbound Firewall Rule.
API Version: 2021-02-01-preview.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] resource_group_name: The name of the resource group that contains the resource. You can obtain this value from the Azure Resource Manager API or the portal.
:param pulumi.Input[str] server_name: The name of the server.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: OutboundFirewallRuleArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
An Azure SQL DB Server Outbound Firewall Rule.
API Version: 2021-02-01-preview.
:param str resource_name: The name of the resource.
:param OutboundFirewallRuleArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(OutboundFirewallRuleArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
outbound_rule_fqdn: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
server_name: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = OutboundFirewallRuleArgs.__new__(OutboundFirewallRuleArgs)
__props__.__dict__["outbound_rule_fqdn"] = outbound_rule_fqdn
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__.__dict__["resource_group_name"] = resource_group_name
if server_name is None and not opts.urn:
raise TypeError("Missing required property 'server_name'")
__props__.__dict__["server_name"] = server_name
__props__.__dict__["name"] = None
__props__.__dict__["provisioning_state"] = None
__props__.__dict__["type"] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:sql:OutboundFirewallRule"), pulumi.Alias(type_="azure-native:sql/v20210201preview:OutboundFirewallRule"), pulumi.Alias(type_="azure-nextgen:sql/v20210201preview:OutboundFirewallRule")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(OutboundFirewallRule, __self__).__init__(
'azure-native:sql:OutboundFirewallRule',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'OutboundFirewallRule':
"""
Get an existing OutboundFirewallRule resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = OutboundFirewallRuleArgs.__new__(OutboundFirewallRuleArgs)
__props__.__dict__["name"] = None
__props__.__dict__["provisioning_state"] = None
__props__.__dict__["type"] = None
return OutboundFirewallRule(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
Resource name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> pulumi.Output[str]:
"""
The state of the outbound rule.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
Resource type.
"""
return pulumi.get(self, "type")
|
[
"[email protected]"
] | |
fbc8f567fc64ca0f8626efc764da3297fb9e5918
|
b5a9d42f7ea5e26cd82b3be2b26c324d5da79ba1
|
/tensorflow/contrib/tensor_forest/hybrid/python/models/stochastic_soft_decisions_to_data_then_nn.py
|
e916a8734c524c83772576e38418e6667fa6b0c9
|
[
"Apache-2.0"
] |
permissive
|
uve/tensorflow
|
e48cb29f39ed24ee27e81afd1687960682e1fbef
|
e08079463bf43e5963acc41da1f57e95603f8080
|
refs/heads/master
| 2020-11-29T11:30:40.391232 | 2020-01-11T13:43:10 | 2020-01-11T13:43:10 | 230,088,347 | 0 | 0 |
Apache-2.0
| 2019-12-25T10:49:15 | 2019-12-25T10:49:14 | null |
UTF-8
|
Python
| false | false | 1,954 |
py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A hybrid model that samples paths when training."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.tensor_forest.hybrid.python.layers import decisions_to_data
from tensorflow.contrib.tensor_forest.hybrid.python.layers import fully_connected
from tensorflow.contrib.tensor_forest.hybrid.python.models import hard_decisions_to_data_then_nn
from tensorflow.python.training import adagrad
class StochasticSoftDecisionsToDataThenNN(
hard_decisions_to_data_then_nn.HardDecisionsToDataThenNN):
"""A hybrid model that samples paths when training."""
def __init__(self,
params,
device_assigner=None,
optimizer_class=adagrad.AdagradOptimizer,
**kwargs):
super(StochasticSoftDecisionsToDataThenNN, self).__init__(
params,
device_assigner=device_assigner,
optimizer_class=optimizer_class,
**kwargs)
self.layers = [decisions_to_data.StochasticSoftDecisionsToDataLayer(
params, 0, device_assigner),
fully_connected.FullyConnectedLayer(
params, 1, device_assigner=device_assigner)]
|
[
"[email protected]"
] | |
290d3ea467c81eb4adcf2a72c26529c2a9e07eb4
|
1a2ca64839723ede3134a0781128b0dc0b5f6ab8
|
/ExtractFeatures/Data/jsinix/Problem10.py
|
8f8dd302f291fb441406f1d44ec06aae2c72032e
|
[] |
no_license
|
vivekaxl/LexisNexis
|
bc8ee0b92ae95a200c41bd077082212243ee248c
|
5fa3a818c3d41bd9c3eb25122e1d376c8910269c
|
refs/heads/master
| 2021-01-13T01:44:41.814348 | 2015-07-08T15:42:35 | 2015-07-08T15:42:35 | 29,705,371 | 9 | 3 | null | null | null | null |
UTF-8
|
Python
| false | false | 459 |
py
|
# Question: Summation of primes
# Problem 10
# The sum of the primes below 10 is 2 + 3 + 5 + 7 = 17.
# Find the sum of all the primes below two million.
# Answer: 142913828922
#!/usr/bin/python
def is_prime(num):
for j in range(2,num):
if (num % j) == 0:
return False
return True
list1 = []
for i in range(3,2000000,2):
if is_prime(i) == True:
list1.append(i)
sum1 = 0
for j in list1:
sum1 = sum1+j
print sum1
|
[
"[email protected]"
] | |
abdf25273170a0e464ee6c988a08c42a21bbd8b0
|
f3a1629a46f5c3cbf7314c54fc36be3156146517
|
/venv/bin/sqlformat
|
f01aa4e0e6285b25c7aec0571805920953267bed
|
[] |
no_license
|
AlexsandroMO/qualiy_applications
|
ec4cdbcbacc9f403d7d34ca9573af44df9c9230a
|
08656c8368f10d54e5b9c8e4a758989239224dc6
|
refs/heads/main
| 2023-01-13T03:50:21.779274 | 2020-11-14T19:05:20 | 2020-11-14T19:05:20 | 305,471,783 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 284 |
#!/Users/alexsandromonteiro/Desktop/Prog_Python/qualiy_applications/venv/bin/python3.8
# -*- coding: utf-8 -*-
import re
import sys
from sqlparse.__main__ import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
|
[
"[email protected]"
] | ||
da8b517d85a513db952f23fb60cba730404ee688
|
3c8cc407d7547f8702bfe06deb5f1c9087778ce3
|
/hiword/dataloader.py
|
4d6c1c1a4918a7d6da18e55b56ca7658f88ceae8
|
[
"Apache-2.0"
] |
permissive
|
jadbin/hiword
|
accaebbdee899e8e3ed11e024d6c488bca36c445
|
7789412747a2b6b59ee974f2a2efd57e355e3282
|
refs/heads/master
| 2023-04-06T21:57:17.260338 | 2023-03-15T16:21:49 | 2023-03-15T16:21:49 | 166,003,998 | 6 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,158 |
py
|
import os
from collections import defaultdict
from os import listdir
from os.path import join, dirname, isdir
class DictLoader:
def __init__(self):
self.dict = self._load_dict()
def word_freq(self, word):
return self.dict[word]
@staticmethod
def _load_dict():
res = defaultdict(lambda: 0)
d = join(dirname(__file__), 'data', 'dicts')
for name in os.listdir(d):
if name.endswith('.txt'):
dict_file = join(d, name)
with open(dict_file) as f:
for line in f:
s = line.split()
res[s[0]] = max(res[s[0]], int(s[1]))
return res
class IDFLoader:
def __init__(self):
self.idf = self._load_idf()
self.median_idf = sorted(self.idf.values())[len(self.idf) // 2]
def word_idf(self, word):
return self.idf.get(word, self.median_idf)
@staticmethod
def _load_idf():
idf_file = join(dirname(__file__), 'data', 'idf.txt')
res = {}
with open(idf_file) as f:
while True:
line = f.readline()
if not line:
break
s = line.split()
res[s[0]] = float(s[1])
return res
class StopwordsLoader:
def __init__(self):
self.stopwords = self._load_stopwords()
def is_stopword(self, word):
return word in self.stopwords
def remove(self, word):
self.stopwords.remove(word)
@staticmethod
def _load_stopwords():
file = join(dirname(__file__), 'data', 'stopwords')
res = set()
files = []
if isdir(file):
for fname in listdir(file):
if fname.endswith('.txt'):
files.append(join(file, fname))
else:
files.append(file)
for fname in files:
with open(fname) as f:
while True:
line = f.readline()
if not line:
break
s = line.strip()
res.add(s)
return res
|
[
"[email protected]"
] | |
db0ec3865a75078674e752309dad2296cd1bbd26
|
f305f84ea6f721c2391300f0a60e21d2ce14f2a5
|
/19_数学/计算几何/直线/practice/面试题 16.14. 最佳直线 copy.py
|
9a95efaf08f17ae51ca4cf2e9edb0dae3b4f6f05
|
[] |
no_license
|
981377660LMT/algorithm-study
|
f2ada3e6959338ae1bc21934a84f7314a8ecff82
|
7e79e26bb8f641868561b186e34c1127ed63c9e0
|
refs/heads/master
| 2023-09-01T18:26:16.525579 | 2023-09-01T12:21:58 | 2023-09-01T12:21:58 | 385,861,235 | 225 | 24 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,033 |
py
|
from typing import List
from collections import defaultdict
def gcd(a, b):
return b if a == 0 else gcd(b % a, b)
class Solution:
def bestLine(self, points: List[List[int]]) -> List[int]:
n = len(points)
res = []
maxCount = 0
for i in range(n):
x1, y1 = points[i]
counter = defaultdict(int)
firstpair = defaultdict(list)
for j in range(i + 1, n):
x2, y2 = points[j]
A, B = (y2 - y1), (x2 - x1)
if B == 0:
key = (0, 0)
else:
gcd_ = gcd(A, B)
key = (A / gcd_, B / gcd_)
counter[key] += 1
firstpair.setdefault(key, [i, j])
if counter[key] > maxCount: # 只有更多,才更新
maxCount = counter[key]
res = firstpair[key]
return res
print(
Solution().bestLine(
[
[-24272, -29606],
[-37644, -4251],
[2691, -22513],
[-14592, -33765],
[-21858, 28550],
[-22264, 41303],
[-6960, 12785],
[-39133, -41833],
[25151, -26643],
[-19416, 28550],
[-17420, 22270],
[-8793, 16457],
[-4303, -25680],
[-14405, 26607],
[-49083, -26336],
[22629, 20544],
[-23939, -25038],
[-40441, -26962],
[-29484, -30503],
[-32927, -18287],
[-13312, -22513],
[15026, 12965],
[-16361, -23282],
[7296, -15750],
[-11690, -21723],
[-34850, -25928],
[-14933, -16169],
[23459, -9358],
[-45719, -13202],
[-26868, 28550],
[4627, 16457],
[-7296, -27760],
[-32230, 8174],
[-28233, -8627],
[-26520, 28550],
[5515, -26001],
[-16766, 28550],
[21888, -3740],
[1251, 28550],
[15333, -26322],
[-27677, -19790],
[20311, 7075],
[-10751, 16457],
[-47762, -44638],
[20991, 24942],
[-19056, -11105],
[-26639, 28550],
[-19862, 16457],
[-27506, -4251],
[-20172, -5440],
[-33757, -24717],
[-9411, -17379],
[12493, 29906],
[0, -21755],
[-36885, -16192],
[-38195, -40088],
[-40079, 7667],
[-29294, -34032],
[-55968, 23947],
[-22724, -22513],
[20362, -11530],
[-11817, -23957],
[-33742, 5259],
[-10350, -4251],
[-11690, -22513],
[-20241, -22513],
]
)
)
|
[
"[email protected]"
] | |
d1d2cb8fa62f4abf11d0f3c031e100adb3008d82
|
6ed01f4503fc9de234a561c945adff7cf4b1c81b
|
/ncar_lib/incites_authors/incites_people_reporter.py
|
24328f4c760673ff83a6c2cbdde1365ae3f9d3f8
|
[] |
no_license
|
ostwald/python-lib
|
b851943c913a68424a05ce3c7b42878ff9519f68
|
9acd97ffaa2f57b3e9e632e1b75016549beb29e5
|
refs/heads/master
| 2021-10-28T06:33:34.156095 | 2021-10-21T23:54:49 | 2021-10-21T23:54:49 | 69,060,616 | 0 | 1 | null | 2018-06-21T16:05:30 | 2016-09-23T21:04:46 |
Roff
|
UTF-8
|
Python
| false | false | 5,465 |
py
|
"""
"""
import os, sys
from UserList import UserList
from find_person import PersonFinder, resolveFullName
from data_filter import FilteredAuthorData
from HyperText.HTML40 import *
from html import HtmlDocument
class InCitesAuthorInfo:
def __init__ (self, id, finder, matches):
"""
names are derived from inCitesAuthor data
matches come from peopleDB
"""
self.id = id
# self.inCitesName = fullname
for attr in ['fullname', 'firstName', 'middleName', 'lastName', 'note']:
setattr (self, attr, getattr (finder, attr))
self.matches = matches
self.numMatches = len(matches)
class SimpleReporter (UserList):
def __init__ (self):
self.data = []
self.recordsToReport = 10
self.errors = []
self.notes = []
self.people = FilteredAuthorData().people # UniquePeople
# self.report()
self.getAuthorMatchInfo()
def getAuthorMatchInfo (self):
person_counter = 1
max_count = self.recordsToReport or len(self.people.keys())
for fullname in self.people.keys()[:max_count]:
try:
finder = PersonFinder (fullname)
candidates = finder.candidates
id = 'author-%d' % person_counter
person_counter += 1
# print 'processing authorInfo for ' + fullname
authorInfo = InCitesAuthorInfo (id, finder, candidates)
self.append(authorInfo)
except KeyError, e:
print 'ERROR', e
self.errors.append(fullname + ": " + str(e))
def report(self):
for authorInfo in self:
try:
if authorInfo.numMatches == 1:
continue
if authorInfo.note:
self.notes.append(authorInfo.note)
## print '\n%s (%d)' % (fullname, size)
print "\n%d candidates found for '%s' (%s | %s)" % \
(len(authorInfo.matches),
authorInfo.fullname,
authorInfo.lastName,
authorInfo.firstName)
for person in authorInfo.matches:
print '- ', person
except Exception, e:
self.errors.append(authorInfo.fullname + ": " + str(e))
def showErrors (self):
if self.errors:
print '\nNames that could not be parsed'
for error in self.errors:
print error
else:
print '\nAll names were parsed'
def showNotes(self):
if self.notes:
print '\nNotes'
for note in notes:
print note
else:
print '\nNo notes generated'
class HtmlReporter (SimpleReporter):
results_columns = ['numMatches', 'inCitesName', 'peopleDBlink']
def __init__ (self):
SimpleReporter.__init__ (self)
self.htmlDoc = None
print '%d authorInfo instances' % len(self.data)
def asHtmlDoc (self):
if self.htmlDoc is None:
mockup_link = Href ("../reporter-mockup.html", 'to Mockup')
reportTable = self.makeReportHtml()
javascript = [
'javascript/prototype.js',
'javascript/scriptaculous-js-1.9.0/scriptaculous.js',
'javascript/toggler.js',
'javascript/decorate_upids.js'
]
self.htmlDoc = HtmlDocument(mockup_link,
reportTable,
title="inCites Author Reporter",
stylesheet="styles.css",
javascript=javascript)
return self.htmlDoc
def getInCitesAuthorInfo (self, authorInfo):
"""
make the html for a inCitesAuthor & its matches
"""
print 'getInCitesAuthorInfo with ' + authorInfo.fullname
id = authorInfo.id
togglerClass = authorInfo.matches and "toggler" or ""
toggler = DIV (id=id, klass=togglerClass)
if authorInfo.numMatches > 0:
togglerLnkClass = "inCitesAuthor togglerClosed"
else:
togglerLnkClass = "inCitesAuthor noTogglerClosed"
# print "%s %s" % (authorInfo.inCitesName, authorInfo.numMatches)
togglerLnk = DIV(id='toggler-lnk-'+id, klass=togglerLnkClass)
toggler.append(togglerLnk)
authorTable = TABLE(klass="authorTable")
togglerLnk.append(authorTable)
matchesContent = '(%d matches)' % authorInfo.numMatches
authorTable.append(
TR(
TD (authorInfo.fullname, klass="author"),
TD (matchesContent, klass="matches")
)
)
if authorInfo.numMatches > 0:
togglerCon = DIV(id='toggler-con-'+id, style="display:none")
toggler.append(togglerCon)
matchTable = TABLE(klass="matchTable")
togglerCon.append(matchTable)
for match in authorInfo.matches:
match_row = TR (klass="peopleDBmatch", id=match.upid)
match_row.append (
TD (match.getName(), klass="match-name"),
TD (match.upid, klass="match-upid"))
matchTable.append(match_row);
return toggler
def makeReportHtml (self):
report = DIV (id='reportTable')
person_counter = 1
max_count = self.recordsToReport or len(self.people.keys())
for authorInfo in self:
# print authorInfo.fullname, authorInfo.numMatches
try:
if authorInfo.numMatches == 1:
continue
if authorInfo.note:
self.notes.append(authorInfo.note)
# print 'processing authorInfo for ' + fullname
report.append (HR (klass="divider"))
report.append(self.getInCitesAuthorInfo (authorInfo))
except Exception, e:
self.errors.append(authorInfo.fullname + ": " + str(e))
return report
def writeHtmlDoc (self, path=None):
path = path or "report_html/INCITES_REPORT.html"
fp = open (path, 'w')
fp.write (self.asHtmlDoc().__str__())
fp.close()
print "wrote to " + path
if __name__ == '__main__':
# findPeople()
if 0:
reporter = SimpleReporter()
reporter.report()
if 1:
reporter = HtmlReporter()
print reporter.asHtmlDoc()
# reporter.writeHtmlDoc()
reporter.showErrors()
reporter.showNotes()
# print reporter.asHtmlDoc()
|
[
"[email protected]"
] | |
e5159573514325ddff9f0569957cbbf720a52f0e
|
a34ec07c3464369a88e68c9006fa1115f5b61e5f
|
/B_HashTable/Basic/L1_2593_Find_Score_of_an_Array_After_Marking_All_Elements.py
|
c21970e05cffcfd0aa4f1a71aaa041f071a3ed8a
|
[] |
no_license
|
824zzy/Leetcode
|
9220f2fb13e03d601d2b471b5cfa0c2364dbdf41
|
93b7f4448a366a709214c271a570c3399f5fc4d3
|
refs/heads/master
| 2023-06-27T02:53:51.812177 | 2023-06-16T16:25:39 | 2023-06-16T16:25:39 | 69,733,624 | 14 | 3 | null | 2022-05-25T06:48:38 | 2016-10-01T10:56:07 |
Python
|
UTF-8
|
Python
| false | false | 493 |
py
|
""" https://leetcode.com/problems/find-score-of-an-array-after-marking-all-elements/
Implement the problem description using a seen array
"""
from header import *
class Solution:
def findScore(self, A: List[int]) -> int:
seen = [0]*(len(A)+2)
ans = 0
for i, x in sorted(enumerate(A), key=lambda x: x[1]):
if not seen[i]:
ans += x
seen[i] = 1
seen[i-1] = 1
seen[i+1] = 1
return ans
|
[
"[email protected]"
] | |
6831b0fbb7a6dadcaef55a6df4497df57ec91df1
|
2b45cbccd03fb09be78b2241d05beeae171a2e18
|
/字节跳动测试开发工程师面试准备/reverseList.py
|
58fa0f6880134d2ea79618baafedb614640d8d8f
|
[] |
no_license
|
MaoningGuan/LeetCode
|
c90f78ce87a8116458a86c49dbe32e172036f7b4
|
62419b49000e79962bcdc99cd98afd2fb82ea345
|
refs/heads/master
| 2023-01-03T14:52:04.278708 | 2020-11-01T12:15:41 | 2020-11-01T12:15:41 | 282,859,997 | 3 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,567 |
py
|
# -*- coding: utf-8 -*-
"""
206. 反转链表
反转一个单链表。
示例:
输入: 1->2->3->4->5->NULL
输出: 5->4->3->2->1->NULL
进阶:
你可以迭代或递归地反转链表。你能否用两种方法解决这道题?
"""
# Definition for singly-linked list.
class ListNode:
def __init__(self, x):
self.val = x
self.next = None
def generateList(l: list) -> ListNode:
prenode = ListNode(0)
lastnode = prenode
for val in l:
lastnode.next = ListNode(val)
lastnode = lastnode.next
return prenode.next
def printList(l: ListNode):
while l:
print("%d, " % (l.val), end='')
l = l.next
print('')
class Solution:
def reverseList_recursive(self, head: ListNode) -> ListNode:
"""
递归解法
:type head: ListNode
:rtype: ListNode
"""
# 递归终止条件是当前为空,或者下一个节点为空
if (head == None or head.next == None):
return head
# 这里的cur就是最后一个节点
cur = self.reverseList_recursive(head.next)
# 这里请配合动画演示理解
# 如果链表是 1->2->3->4->5,那么此时的cur就是5
# 而head是4,head的下一个是5,下下一个是空
# 所以head.next.next 就是5->4
head.next.next = head
# 防止链表循环,需要将head.next设置为空(如:5 -> 4 -> 3 -> 2 <-> 1)
# if head.val == 1:
# head.next = None
head.next = None
# 每层递归函数都返回cur,也就是最后一个节点
return cur
def reverseList_iterate(self, head: ListNode) -> ListNode:
"""
迭代解法
:type head: ListNode
:rtype: ListNode
"""
# 申请两个节点,pre和 cur,pre指向None
pre = None
cur = head
# 遍历链表,while循环里面的内容其实可以写成一行
# 这里只做演示,就不搞那么骚气的写法了
while cur:
# pre, cur.next, cur = cur, pre, cur.next
# 记录当前节点的下一个节点
tmp = cur.next
# 然后将当前节点指向pre
cur.next = pre
# pre和cur节点都前进一位
pre = cur
cur = tmp
return pre
if __name__ == '__main__':
solution = Solution()
nodes = [1, 2, 3, 4, 5]
linked_list = generateList(nodes)
reversed_link = solution.reverseList_iterate(linked_list)
printList(reversed_link)
|
[
"[email protected]"
] | |
26c93247ee4200baadf6355f694c14262a0ea35e
|
0d09e32620e2e82f243ba86e2cc7bec19e521b1b
|
/Exercises/01-Sequential-Structure/ex06.py
|
ac27e05e4d8dd83905a5665183e94f38cefa3c27
|
[] |
no_license
|
hikarocarvalho/Python_Wiki
|
401f7466377f2053cda8bfa850afd0bd64cce047
|
01f755ecc18de13a9ded794ece9e7a8bd4ad7d9e
|
refs/heads/main
| 2023-06-14T08:37:10.728067 | 2021-07-12T20:24:41 | 2021-07-12T20:24:41 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 270 |
py
|
#the user gives the ray value
#o usuário dá o valor do raio
ray = float(input("Enter with the ray value: "))
#calculate the area of a circle
#calcula a área do circulo
area = ray**2 * 3.14
#Show the result
#mostra o resultado
print("The area of this circle is:",area)
|
[
"[email protected]"
] | |
65e75880a88ed030115589f4646788aa413fd91e
|
7f7213fe407f252b2323025c9b9e381a73474b7d
|
/drag_sheet/sds/check_files.py
|
077feba9a15d97280ae0c711e7c9b67f9866a925
|
[] |
no_license
|
ahy3nz/graphene_build
|
0ce62c2123b8c39248048d2cafbd0aafdd06ff9a
|
44590b8db799136929fc06e490151f450ad30029
|
refs/heads/master
| 2021-06-20T07:28:20.958807 | 2019-07-03T19:48:24 | 2019-07-03T19:48:24 | 135,504,561 | 0 | 1 | null | 2018-08-06T21:47:17 | 2018-05-30T22:40:17 |
Python
|
UTF-8
|
Python
| false | false | 572 |
py
|
import os
import itertools as it
curr_dir = os.getcwd()
sds_folders = ['10sds', '20sds', '30sds', '40sds', '50sds', '60sds',
'70sds', '80sds', '90sds', '100sds']
k_folders = ['k50']
angle_folders = ['0']
trials = ['a', 'b', 'c']
for combo in it.product(sds_folders, k_folders, angle_folders,trials):
sim_dir = os.path.join(curr_dir, '{0}/{1}_{2}_{3}'.format(*combo))
if os.path.isdir(sim_dir):
os.chdir(sim_dir)
if not os.path.isfile('pull.gro'):
print(sim_dir)
else:
print("{} doesn't exist".format(sim_dir))
|
[
"[email protected]"
] | |
1cb4aefd9a5b1077a5c844f51cf26e0e25ef605b
|
f6761bd4b74ed9c3bc0e8f62e5a1db70c03096f0
|
/torch/nn/quantized/dynamic/modules/linear.py
|
7574dd53eb761570aa1b6445bc8593eba60de6c2
|
[
"BSD-2-Clause",
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"Apache-2.0"
] |
permissive
|
MarisaKirisame/pytorch
|
b638790a0997d776ad4c5e4c77badc77e5dc94f9
|
59c5de4d0eda8d4f5494602034093933600d0a3d
|
refs/heads/master
| 2021-06-19T10:44:33.846286 | 2019-10-31T22:56:55 | 2019-10-31T22:58:28 | 218,881,408 | 2 | 0 |
NOASSERTION
| 2019-11-01T00:02:51 | 2019-11-01T00:02:51 | null |
UTF-8
|
Python
| false | false | 3,423 |
py
|
from __future__ import absolute_import, division, print_function, unicode_literals
import torch
from ....modules.linear import Linear as NNLinear
import torch.nn.quantized as nnq
class Linear(nnq.Linear):
r"""
A dynamic quantized linear module with quantized tensor as inputs and outputs.
We adopt the same interface as `torch.nn.Linear`, please see
https://pytorch.org/docs/stable/nn.html#torch.nn.Linear for documentation.
Similar to :class:`torch.nn.Linear`, attributes will be randomly
initialized at module creation time and will be overwritten later
Attributes:
weight (Tensor): the non-learnable quantized weights of the module which are of
shape :math:`(\text{out\_features}, \text{in\_features})`.
bias (Tensor): the non-learnable bias of the module of shape :math:`(\text{out\_features})`.
If :attr:`bias` is ``True``, the values are initialized to zero.
Examples::
>>> m = nn.quantized.dynamic.Linear(20, 30)
>>> input = torch.randn(128, 20)
>>> output = m(input)
>>> print(output.size())
torch.Size([128, 30])
"""
def __init__(self, in_features, out_features, bias_=True):
super(Linear, self).__init__(in_features, out_features, bias_)
# We don't muck around with buffers or attributes or anything here
# to keep the module simple. *everything* is simply a Python attribute.
# Serialization logic is explicitly handled in the below serialization and
# deserialization modules
def forward(self, x):
# Note that we can handle self.bias == None case.
Y = torch.ops.quantized.linear_dynamic(
x, self._packed_params)
return Y.to(x.dtype)
def _get_name(self):
return 'DynamicQuantizedLinear'
def extra_repr(self):
return 'in_features={}, out_features={}'.format(
self.in_features, self.out_features
)
@classmethod
def from_float(cls, mod):
r"""Create a dynamic quantized module from a float module or qparams_dict
Args:
mod (Module): a float module, either produced by torch.quantization
utilities or provided by the user
"""
assert type(mod) == NNLinear, 'nn.quantized.dynamic.Linear.from_float only works for nn.Linear'
assert hasattr(mod, 'qconfig'), 'Input float module must have qconfig defined'
if mod.qconfig is not None and mod.qconfig.weight is not None:
weight_observer = mod.qconfig.weight()
else:
# We have the circular import issues if we import the qconfig in the beginning of this file:
# https://github.com/pytorch/pytorch/pull/24231. The current workaround is to postpone the
# import until we need it.
from torch.quantization.qconfig import default_dynamic_qconfig
weight_observer = default_dynamic_qconfig.weight()
assert weight_observer.dtype == torch.qint8, 'Weight observer must have dtype torch.qint8'
weight_observer(mod.weight)
wt_scale, wt_zp = weight_observer.calculate_qparams()
qweight = torch.quantize_per_tensor(mod.weight.float(), float(wt_scale), int(wt_zp), torch.qint8)
qlinear = Linear(mod.in_features, mod.out_features)
qlinear.set_weight_bias(qweight, mod.bias)
return qlinear
|
[
"[email protected]"
] | |
094f154dc9007753efa071553ad662baa9cb66f4
|
9ae6ce54bf9a2a86201961fdbd5e7b0ec913ff56
|
/google/ads/googleads/v10/enums/types/matching_function_operator.py
|
11a366438b14b5a9625600a5fa27c1e72a1abe49
|
[
"Apache-2.0"
] |
permissive
|
GerhardusM/google-ads-python
|
73b275a06e5401e6b951a6cd99af98c247e34aa3
|
676ac5fcb5bec0d9b5897f4c950049dac5647555
|
refs/heads/master
| 2022-07-06T19:05:50.932553 | 2022-06-17T20:41:17 | 2022-06-17T20:41:17 | 207,535,443 | 0 | 0 |
Apache-2.0
| 2019-09-10T10:58:55 | 2019-09-10T10:58:55 | null |
UTF-8
|
Python
| false | false | 1,228 |
py
|
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
__protobuf__ = proto.module(
package="google.ads.googleads.v10.enums",
marshal="google.ads.googleads.v10",
manifest={
"MatchingFunctionOperatorEnum",
},
)
class MatchingFunctionOperatorEnum(proto.Message):
r"""Container for enum describing matching function operator."""
class MatchingFunctionOperator(proto.Enum):
r"""Possible operators in a matching function."""
UNSPECIFIED = 0
UNKNOWN = 1
IN = 2
IDENTITY = 3
EQUALS = 4
AND = 5
CONTAINS_ANY = 6
__all__ = tuple(sorted(__protobuf__.manifest))
|
[
"[email protected]"
] | |
233695bb1c57dade93d46c11765d5914bc3e29e0
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p03039/s199470028.py
|
5afbcf0436e1e2a41276ee67cfaa4072153695af
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 673 |
py
|
from itertools import combinations
def solve(N, M, K):
#A = [(n, m) for n in range(N) for m in range(M)]
r = 0
for x in range(N):
r += x * (N - x) * M * M
for y in range(M):
r += y * (M - y) * N * N
return r
def main():
N, M, K = map(int, input().split())
g1 = [1, 1]
g2 = [1, 1]
inverse = [0, 1]
mod = 10 ** 9 + 7
for i in range(2, N * M):
g1.append((g1[-1] * i) % mod)
inverse.append((-inverse[mod % i] * (mod // i)) % mod)
g2.append((g2[-1] * inverse[-1]) % mod)
t = solve(N, M, 2)
for k in range(2, K):
t = t * (N * M - k) * inverse[k - 1] % mod
print(t)
main()
|
[
"[email protected]"
] | |
6aa35f7716f196962697548423b8318a68aeb789
|
981e6d9d34a91852407d45c4b7863779e228a516
|
/venv/bin/django-admin.py
|
92b413158aeac93a5d38ab2670da12a6bbeeaa4c
|
[] |
no_license
|
starwayagency/astrolabium_viber_bot
|
5062ffcb7b35b3608f9434fd486e5806e9084ae1
|
ec4e699bbc32e7275da0f12d77a0ae5cf32d000e
|
refs/heads/master
| 2023-08-18T06:36:43.315701 | 2021-10-24T18:04:31 | 2021-10-24T18:04:31 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 710 |
py
|
#!/home/jurgeon/projects/astrolabium/astrolabium_viber_bot/venv/bin/python3
# When the django-admin.py deprecation ends, remove this script.
import warnings
from django.core import management
try:
from django.utils.deprecation import RemovedInDjango40Warning
except ImportError:
raise ImportError(
'django-admin.py was deprecated in Django 3.1 and removed in Django '
'4.0. Please manually remove this script from your virtual environment '
'and use django-admin instead.'
)
if __name__ == "__main__":
warnings.warn(
'django-admin.py is deprecated in favor of django-admin.',
RemovedInDjango40Warning,
)
management.execute_from_command_line()
|
[
"[email protected]"
] | |
87a8307caea5976b9dea43adb38dbb519f275bcd
|
3474b315da3cc5cb3f7823f19a18b63a8da6a526
|
/scratch/KRAMS/src/apps/scratch/jakub/nonlocal_averaging/2d_rotation.py
|
44dd801a3538e23552901d09a1db0e020abcbe31
|
[] |
no_license
|
h4ck3rm1k3/scratch
|
8df97462f696bc2be00f1e58232e1cd915f0fafd
|
0a114a41b0d1e9b2d68dbe7af7cf34db11512539
|
refs/heads/master
| 2021-01-21T15:31:38.718039 | 2013-09-19T10:48:24 | 2013-09-19T10:48:24 | 29,173,525 | 0 | 0 | null | 2015-01-13T04:58:57 | 2015-01-13T04:58:56 | null |
UTF-8
|
Python
| false | false | 5,857 |
py
|
from ibvpy.api import \
TStepper as TS, RTraceGraph, TLoop, \
TLine, BCDof, IBVPSolve as IS, DOTSEval, FEDomain, FERefinementGrid,\
FEGrid, BCSlice
from apps.scratch.jakub.mlab.mlab_trace import RTraceDomainListField
from ibvpy.mats.mats2D.mats2D_sdamage.mats2D_sdamage import MATS2DScalarDamage
from ibvpy.mats.mats2D.mats2D_elastic.mats2D_elastic import MATS2DElastic
from ibvpy.mats.mats2D.mats2D_sdamage.strain_norm2d import Euclidean, Mazars, Rankine
from ibvpy.fets.fets2D.fets2D4q import FETS2D4Q
from ibvpy.fets.fets2D.fets2D4q9u import FETS2D4Q9U
from ibvpy.fets.fets2D.fets2D4q8u import FETS2D4Q8U
from averaging import UniformDomainAveraging, LinearAF, QuarticAF
from numpy import array, cos, sin, pi,sqrt, deg2rad, arctan
from mathkit.mfn.mfn_line.mfn_line import MFnLineArray
from ibvpy.dots.avg_fn import AveragingFunction, LinearAF,QuarticAF
def app():
mp = MATS2DScalarDamage(E = 1.,
nu = 0.2,
epsilon_0 = 1.e-3,
epsilon_f = 5.e-3,
#stiffness = "algorithmic",
stress_state = "plane_strain",
stiffness = "secant",
strain_norm = Euclidean())
me = MATS2DElastic(E = 34e3,
nu = 0.,
stress_state = "plane_strain")
fets_eval = FETS2D4Q9U(mats_eval = me, ngp_r = 3, ngp_s = 3)
# Discretization
fe_domain = FEDomain()
fe_level1 = FERefinementGrid( domain = fe_domain,
fets_eval = fets_eval,
averaging = QuarticAF(radius = 0.25,
correction = True))
fe_grid = FEGrid( #coord_min = (-1.,-.5,0.),
coord_max = (2.,1.,0.),
shape = (20,10),
fets_eval = fets_eval,
level = fe_level1 )
mf = MFnLineArray( #xdata = arange(10),
ydata = array([0,1,1]) )
angle = 2.#[deg]
angle_r = deg2rad(angle)
s_angle = sin(angle_r/2.)
c_angle = cos(angle_r/2.)
l_diag = sqrt(5.)
d_angle = arctan(0.5)
s_diag = sin((angle_r+d_angle))
c_diag = cos((angle_r+d_angle))
ts = TS(sdomain = fe_domain,
# conversion to list (square brackets) is only necessary for slicing of
# single dofs, e.g "get_left_dofs()[0,1]" which elsewise retuns an integer only
bcond_list = [
# constraint for all left dofs in y-direction:
BCSlice(var='u', slice = fe_grid[0,0,0,0],dims=[0,1], value = 0.),
BCSlice(var='u', slice = fe_grid[-1,0,-1,0],dims=[1],
time_function = mf.get_value, value = 2*s_angle*2*c_angle),
BCSlice(var='u', slice = fe_grid[-1,0,-1,0],dims=[0],
time_function = mf.get_value, value = - 2*s_angle**2*2),
BCSlice(var='u', slice = fe_grid[0,-1,0,-1],dims=[0],
time_function = mf.get_value, value = - 1*s_angle*2*c_angle),
BCSlice(var='u', slice = fe_grid[0,-1,0,-1],dims=[1],
time_function = mf.get_value, value = - 1*s_angle**2*2),
BCSlice(var='u', slice = fe_grid[-1,-1,-1,-1],dims = [1],
time_function = mf.get_value, value = s_diag*l_diag - 1.),
BCSlice(var='u', slice = fe_grid[-1,-1,-1,-1],dims = [0],
time_function = mf.get_value, value = c_diag*l_diag - 2.)
],
rtrace_list = [
# RTraceGraph(name = 'Fi,right over u_right (iteration)' ,
# var_y = 'F_int', idx_y = right_dof,
# var_x = 'U_k', idx_x = right_dof,
# record_on = 'update'),
RTraceDomainListField(name = 'Deformation' ,
var = 'eps', idx = 0,
record_on = 'update'),
RTraceDomainListField(name = 'Displacement' ,
var = 'u', idx = 1,
record_on = 'update',
warp = True),
RTraceDomainListField(name = 'Damage' ,
var = 'omega', idx = 0,
record_on = 'update',
warp = True),
# RTraceDomainField(name = 'Stress' ,
# var = 'sig', idx = 0,
# record_on = 'update'),
# RTraceDomainField(name = 'N0' ,
# var = 'N_mtx', idx = 0,
# record_on = 'update')
]
)
# Add the time-loop control
#
tl = TLoop( tstepper = ts,
tolerance = 1.e-4,
tline = TLine( min = 0.0, step = 1., max = 2.0 ))
tl.eval()
# Put the whole stuff into the simulation-framework to map the
# individual pieces of definition into the user interface.
#
from ibvpy.plugins.ibvpy_app import IBVPyApp
ibvpy_app = IBVPyApp( ibv_resource = ts )
ibvpy_app.main()
if __name__ == '__main__':
app()
|
[
"Axel@Axel-Pc"
] |
Axel@Axel-Pc
|
2a09d365cb4047774eb081599078201fca564efa
|
4d8cfbfe6498d0808eefa8983b07940e006c49fb
|
/ges/forms.py
|
7da85805e7e5201851422ce41a3674ddf51edaf3
|
[] |
no_license
|
nikolzp/ges_google
|
4b7d18e4fa039a0d4b21e5d71b2a249df958ed2b
|
fe89f150a624411867877491f127d71eff92bfc9
|
refs/heads/master
| 2020-03-12T00:59:24.072648 | 2018-04-20T14:43:38 | 2018-04-20T14:43:38 | 130,363,767 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 407 |
py
|
from django import forms
class GesFilterForm(forms.Form):
max_power = forms.IntegerField(label='Мощность от МВт', required=False)
min_power = forms.IntegerField(label='Мощность до МВт', required=False)
max_area = forms.IntegerField(label='Объем от кв.км', required=False)
min_area = forms.IntegerField(label='Объем до кв.км', required=False)
|
[
"[email protected]"
] | |
92208027272a6e16363b60e6e399cc6ec08fcbb5
|
f3d757f421497e19f2de0d3be21b9ae381511577
|
/phoneconfirmation/urls.py
|
57e758f3e36f49419e6051dbeed37811f6ed3296
|
[
"MIT"
] |
permissive
|
pinax/pinax-phone-confirmation
|
526ba350a5bbaaa58f229fad224cf9db41f5bcbc
|
102d997db0a7cc00bd862a94987338c25ba24f98
|
refs/heads/master
| 2023-06-22T15:57:32.364754 | 2019-04-11T23:59:58 | 2019-04-11T23:59:58 | 22,494,944 | 12 | 3 |
MIT
| 2019-04-11T23:46:54 | 2014-08-01T04:14:23 |
Python
|
UTF-8
|
Python
| false | false | 369 |
py
|
from django.conf.urls import url, patterns
urlpatterns = patterns(
"phoneconfirmation.views",
url(r"^$", "phone_list", name="phone_list"),
url(r"^confirm_phone/(\w+)/$", "confirm_phone", name="phone_confirm"),
url(r"^action/$", "action", name="phone_action"),
url(r"^get-country-for-code/$", "get_country_for_code", name="get_country_for_code")
)
|
[
"[email protected]"
] | |
bf1b684d24bbc4cf5a7179c2bf9f39cda4883aac
|
f9d564f1aa83eca45872dab7fbaa26dd48210d08
|
/huaweicloud-sdk-eihealth/huaweicloudsdkeihealth/v1/model/delete_nextflow_job_request.py
|
83b25425a82c6befc3917dc50bb320e6b8812723
|
[
"Apache-2.0"
] |
permissive
|
huaweicloud/huaweicloud-sdk-python-v3
|
cde6d849ce5b1de05ac5ebfd6153f27803837d84
|
f69344c1dadb79067746ddf9bfde4bddc18d5ecf
|
refs/heads/master
| 2023-09-01T19:29:43.013318 | 2023-08-31T08:28:59 | 2023-08-31T08:28:59 | 262,207,814 | 103 | 44 |
NOASSERTION
| 2023-06-22T14:50:48 | 2020-05-08T02:28:43 |
Python
|
UTF-8
|
Python
| false | false | 4,327 |
py
|
# coding: utf-8
import six
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class DeleteNextflowJobRequest:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'eihealth_project_id': 'str',
'job_id': 'str'
}
attribute_map = {
'eihealth_project_id': 'eihealth_project_id',
'job_id': 'job_id'
}
def __init__(self, eihealth_project_id=None, job_id=None):
"""DeleteNextflowJobRequest
The model defined in huaweicloud sdk
:param eihealth_project_id: 医疗智能体平台项目ID,您可以在EIHealth平台单击所需的项目名称,进入项目设置页面查看。
:type eihealth_project_id: str
:param job_id: 作业id
:type job_id: str
"""
self._eihealth_project_id = None
self._job_id = None
self.discriminator = None
self.eihealth_project_id = eihealth_project_id
self.job_id = job_id
@property
def eihealth_project_id(self):
"""Gets the eihealth_project_id of this DeleteNextflowJobRequest.
医疗智能体平台项目ID,您可以在EIHealth平台单击所需的项目名称,进入项目设置页面查看。
:return: The eihealth_project_id of this DeleteNextflowJobRequest.
:rtype: str
"""
return self._eihealth_project_id
@eihealth_project_id.setter
def eihealth_project_id(self, eihealth_project_id):
"""Sets the eihealth_project_id of this DeleteNextflowJobRequest.
医疗智能体平台项目ID,您可以在EIHealth平台单击所需的项目名称,进入项目设置页面查看。
:param eihealth_project_id: The eihealth_project_id of this DeleteNextflowJobRequest.
:type eihealth_project_id: str
"""
self._eihealth_project_id = eihealth_project_id
@property
def job_id(self):
"""Gets the job_id of this DeleteNextflowJobRequest.
作业id
:return: The job_id of this DeleteNextflowJobRequest.
:rtype: str
"""
return self._job_id
@job_id.setter
def job_id(self, job_id):
"""Sets the job_id of this DeleteNextflowJobRequest.
作业id
:param job_id: The job_id of this DeleteNextflowJobRequest.
:type job_id: str
"""
self._job_id = job_id
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, DeleteNextflowJobRequest):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
[
"[email protected]"
] | |
4b7ffa1ba61b1b2c13a7c33cbe25688ed235e748
|
673e829dda9583c8dd2ac8d958ba1dc304bffeaf
|
/data/multilingual/Latn.BAX/Mono_16/pdf_to_json_test_Latn.BAX_Mono_16.py
|
419f2ec7b49e7c247dfa03dc2764cd5ebdafafec
|
[
"BSD-3-Clause"
] |
permissive
|
antoinecarme/pdf_to_json_tests
|
58bab9f6ba263531e69f793233ddc4d33b783b7e
|
d57a024fde862e698d916a1178f285883d7a3b2f
|
refs/heads/master
| 2021-01-26T08:41:47.327804 | 2020-02-27T15:54:48 | 2020-02-27T15:54:48 | 243,359,934 | 2 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 303 |
py
|
import pdf_to_json as p2j
import json
url = "file:data/multilingual/Latn.BAX/Mono_16/udhr_Latn.BAX_Mono_16.pdf"
lConverter = p2j.pdf_to_json.pdf_to_json_converter()
lConverter.mImageHashOnly = True
lDict = lConverter.convert(url)
print(json.dumps(lDict, indent=4, ensure_ascii=False, sort_keys=True))
|
[
"[email protected]"
] | |
7e5bdb95eceb8d543706dd352ce4101905da500f
|
95c71453ed6cc6f9b94f38a3c1655680618d71a4
|
/kickstart/DE/EC.py
|
477f7b6f874fc7fe1f0ee7f7bd160a909212de3b
|
[] |
no_license
|
ZX1209/gl-algorithm-practise
|
95f4d6627c1dbaf2b70be90149d897f003f9cb3a
|
dd0a1c92414e12d82053c3df981897e975063bb8
|
refs/heads/master
| 2020-05-16T14:56:34.568878 | 2019-12-27T07:37:11 | 2019-12-27T07:37:11 | 183,116,501 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,112 |
py
|
from collections import Counter
def isOdd(n):
return n % 2
def sortl(ls):
sums = Counter()
tmpl = []
for i in range(len(ls)):
sums[i] = sum(ls[i])
for i, j in sums.most_common():
tmpl.append(ls[i])
return tmpl
def rate(lhu,lla):
win = 0
total = 0
for hu in lhu:
for la in lla:
total += 1
if hu>la:
win+=1
return wi
def dfs()
# la win >=
# hu win >
def solve_EC(N, hu, la):
lla = [sum(la[i:i+N]) for i in range(N)]
win = 0
lla.sort()
hu.sort()
while
for i in range(3*N):
if hu[i] > la[i]:
win += 1
return win/(3*N)
def main():
T = int(input())
for t in range(T):
tmp = input().split()
tmp = list(map(int, tmp))
N, = tmp
tmp = input().split()
tmp = list(map(int, tmp))
hu = tmp
tmp = input().split()
tmp = list(map(int, tmp))
la = tmp
print('Case #' + str(t + 1) + ': ', end='')
print(solve_EC(N, hu, la))
if __name__ == '__main__':
main()
|
[
"[email protected]"
] | |
2180fbd40a9cda6cf6e7180218f7f525f2c351ce
|
664269ec1346b69b1af11d041d5352921ebef060
|
/sample-apps/rds/sample-app/src/pymysql/_compat.py
|
252789ec4460a3ee383f18f8af26e42ba82b666d
|
[
"Apache-2.0"
] |
permissive
|
awslabs/aws-servicebroker
|
0f288d4da0201a85e99f27bf7d95cc84d30d2f93
|
b28f42ad1e5861fd3009a10ad4bd511a384d3943
|
refs/heads/main
| 2023-08-30T01:09:05.351854 | 2023-07-06T18:09:22 | 2023-07-06T18:09:22 | 125,404,208 | 468 | 165 |
Apache-2.0
| 2023-08-30T14:07:12 | 2018-03-15T17:36:28 |
Python
|
UTF-8
|
Python
| false | false | 481 |
py
|
import sys
PY2 = sys.version_info[0] == 2
PYPY = hasattr(sys, 'pypy_translation_info')
JYTHON = sys.platform.startswith('java')
IRONPYTHON = sys.platform == 'cli'
CPYTHON = not PYPY and not JYTHON and not IRONPYTHON
if PY2:
import __builtin__
range_type = xrange
text_type = unicode
long_type = long
str_type = basestring
unichr = __builtin__.unichr
else:
range_type = range
text_type = str
long_type = int
str_type = str
unichr = chr
|
[
"[email protected]"
] | |
0f0b988db044a90843787fcfa17790f226c36531
|
ce1f8877fa9ff084b75bceec4cc7ddf5b3153b07
|
/clif/testing/python/imported_methods_test.py
|
be75ee040777a7e9a8efe35837f737faf0fc1cda
|
[
"Apache-2.0"
] |
permissive
|
HenriChataing/clif
|
034aba392294ac30e40801815cf4d3172d3d44bd
|
307ac5b7957424706c598876d883936c245e2078
|
refs/heads/master
| 2021-01-23T16:25:19.543400 | 2017-09-01T22:18:03 | 2017-09-01T22:18:03 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 968 |
py
|
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for clif.testing.python.imported_methods."""
import unittest
from clif.testing.python import imported_methods
class InheritedConstructorsTest(unittest.TestCase):
def testInheritedConstructor(self):
d = imported_methods.Derived(12345)
self.assertEqual(d.GetA(), 12345)
self.assertEqual(d.GetAWithOffset(43210), 55555)
if __name__ == '__main__':
unittest.main()
|
[
"[email protected]"
] | |
c32fdb4787b51913dcb94e2128d2912fad182b06
|
3b871bdc672632e72bbdb72f98c914db660829b4
|
/Django_Project/Django_Project/asgi.py
|
5eb588cf3ef16a21bf99a45c2a9698189ff79917
|
[] |
no_license
|
JasbirCodeSpace/Django-Blog-Web-App
|
b1a58730a17c204fe4c8ad8ab4f3f1d47d5b30e1
|
6af67d03bbec997b972feacb2873efaa542becaa
|
refs/heads/master
| 2022-08-25T23:12:05.591494 | 2020-05-20T07:19:48 | 2020-05-20T07:19:48 | 264,860,882 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 411 |
py
|
"""
ASGI config for Blog project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'Django_Project.settings')
application = get_asgi_application()
|
[
"[email protected]"
] | |
950cf5404ea2b75c9cadf94aa12dfbb274256e43
|
70ad3badf3fa6e2edf1889d8640f25a7ec0d9db1
|
/ros_catkin_ws/devel_isolated/rosparam/lib/python2.7/dist-packages/rosparam/__init__.py
|
979cdadf5761c2736f68558fa36dbd74e4175656
|
[] |
no_license
|
MathieuHwei/OldGaitMaven
|
758a937dfda2cf4f1aee266dbbf682ef34989199
|
873f7d9089c5d1c0772bd3447e2b0a31dac68b70
|
refs/heads/main
| 2023-06-17T18:40:06.230823 | 2021-07-19T23:08:20 | 2021-07-19T23:08:20 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,030 |
py
|
# -*- coding: utf-8 -*-
# generated from catkin/cmake/template/__init__.py.in
# keep symbol table as clean as possible by deleting all unnecessary symbols
from os import path as os_path
from sys import path as sys_path
from pkgutil import extend_path
__extended_path = "/home/pi/ros_catkin_ws/src/ros_comm/rosparam/src".split(";")
for p in reversed(__extended_path):
sys_path.insert(0, p)
del p
del sys_path
__path__ = extend_path(__path__, __name__)
del extend_path
__execfiles = []
for p in __extended_path:
src_init_file = os_path.join(p, __name__ + '.py')
if os_path.isfile(src_init_file):
__execfiles.append(src_init_file)
else:
src_init_file = os_path.join(p, __name__, '__init__.py')
if os_path.isfile(src_init_file):
__execfiles.append(src_init_file)
del src_init_file
del p
del os_path
del __extended_path
for __execfile in __execfiles:
with open(__execfile, 'r') as __fh:
exec(__fh.read())
del __fh
del __execfile
del __execfiles
|
[
"[email protected]"
] | |
9b51264685632fddec2373e3a341f25d8d1d3fc9
|
e00fe1e065b448f6f8c0472ed2b8a39991fa7b1b
|
/Fuzzy_clustering/version2/template/project_run.py
|
4188a8c1f9dfdd2d18eda05f8e884d4dcc2f62af
|
[
"Apache-2.0"
] |
permissive
|
joesider9/forecasting_library
|
1a4ded5b09fc603f91fa1c075e79fc2ed06c08a8
|
db07ff8f0f2693983058d49004f2fc6f8849d197
|
refs/heads/master
| 2023-03-29T12:18:22.261488 | 2021-04-01T08:57:08 | 2021-04-01T08:57:08 | 319,906,316 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,106 |
py
|
from Fuzzy_clustering.version2.project_managers.project_eval_manager import ProjectsEvalManager
from Fuzzy_clustering.version2.project_managers.projects_data_manager import ProjectsDataManager
from Fuzzy_clustering.version2.project_managers.projects_train_manager import ProjectsTrainManager
from Fuzzy_clustering.version2.template.constants import *
from Fuzzy_clustering.version2.template.util_database_timos import write_database
def prepare_data():
static_data = write_database()
project_data_manager = ProjectsDataManager(static_data, is_test=False)
nwp_response = project_data_manager.nwp_extractor()
if nwp_response == DONE:
data_response = project_data_manager.create_datasets()
else:
raise RuntimeError('Something was going wrong with nwp extractor')
if data_response == DONE:
project_data_manager.create_projects_relations()
else:
raise RuntimeError('Something was going wrong with data manager')
if hasattr(project_data_manager, 'data_eval'):
project_data_manager.is_test = True
nwp_response = project_data_manager.nwp_extractor()
if nwp_response == DONE:
nwp_response = project_data_manager.create_datasets()
if nwp_response != DONE:
raise RuntimeError('Something was going wrong with on evaluation dataset creator')
else:
raise RuntimeError('Something was going wrong with nwp extractor on evaluation')
print("Data is prepared, training can start")
def train_project():
static_data = write_database()
project_train_manager = ProjectsTrainManager(static_data)
project_train_manager.fit()
def eval_project():
static_data = write_database()
project_eval_manager = ProjectsEvalManager(static_data)
project_eval_manager.evaluate()
def backup_project():
static_data = write_database()
project_backup_manager = ProjectsTrainManager(static_data)
project_backup_manager.clear_backup_projects()
if __name__ == '__main__':
prepare_data()
train_project()
eval_project()
backup_project()
|
[
"[email protected]"
] | |
a1b9c909d2e60fb563ed2c58c3bf28e228f2e771
|
751691a21ed1d8c69c35f3cd9b9fd395dc5c1aa8
|
/{{cookiecutter.项目名称}}/{{cookiecutter.初始化app名称}}/custom_viewset.py
|
10b0164eb8f5f4fa01443e26d36608d2830e48cc
|
[] |
no_license
|
mtianyan/cookiecutter-drf-mtianyan
|
5899847f46e853a0ec5be9bcbf9e7294ce2b70cd
|
b1298f6c5b20149db4589ce127b2e6e0392552b6
|
refs/heads/master
| 2022-12-28T18:26:57.969693 | 2020-10-10T08:29:08 | 2020-10-10T08:29:08 | 275,175,974 | 0 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,619 |
py
|
from rest_framework import viewsets, status
from rest_framework.response import Response
from utils import change_key
class CustomViewSet(viewsets.ModelViewSet):
def retrieve(self, request, *args, **kwargs):
instance = self.get_object()
serializer = self.get_serializer(instance)
res = serializer.data
if "status" in res.keys():
res["status"] = str(res["status"])
return Response({
"code": 200,
"data": res
})
def create(self, request, *args, **kwargs):
serializer = self.get_serializer(data=request.data)
serializer.is_valid(raise_exception=True)
self.perform_create(serializer)
headers = self.get_success_headers(serializer.data)
return Response({'code': 200}, status=status.HTTP_201_CREATED, headers=headers)
def put(self, request, *args, **kwargs):
change_key(request)
update_fields = [one for one in request.data.keys() if one != self.serializer_class.Meta.model._meta.pk.name]
self.serializer_class.Meta.model(**request.data).save(update_fields=update_fields)
return Response({'code': 200, 'msg': '修改成功'})
# def destroy(self, request, *args, **kwargs):
# instance = self.get_object()
# self.perform_destroy(instance)
# return Response({'code': 200}, status=status.HTTP_200_OK)
def destroy(self, request, *args, **kwargs):
ids = kwargs["pk"].split(",")
self.serializer_class.Meta.model.objects.filter(pk__in=ids).delete()
return Response({
"code": 200
})
|
[
"[email protected]"
] | |
7126110b6be5e67ec95d040579d17ce5b4278f11
|
0b51bc6c7a98d07880955a31e147c0c15b1e3151
|
/tonkho/models/stock_quant.py
|
da383abe5fd406bbfc64072e6fd0731db111501c
|
[] |
no_license
|
tu95ctv/duan_mi2
|
72e8bcbad73dfea1b57b69dbfd1c8d48ecebb975
|
f1728d99e27fcc18684d50f5719f3dcedcffd755
|
refs/heads/master
| 2020-04-28T21:30:25.017845 | 2019-07-07T13:25:43 | 2019-07-07T13:25:43 | 175,584,277 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 8,283 |
py
|
# -*- coding: utf-8 -*-
from odoo import models, fields, api
from odoo.exceptions import UserError, ValidationError
from odoo.tools.translate import _
from odoo.tools.float_utils import float_compare
from odoo.addons.tutool.mytools import name_compute
from odoo.addons.tonkho.tonkho_tool import write_to_current_path
from odoo.addons.tutool.mytools import pn_replace
from lxml import etree
class Quant(models.Model):
""" Quants are the smallest unit of stock physical instances """
_inherit = "stock.quant"
pn = fields.Char(related='product_id.pn', store=True,string="Part number")
categ_id = fields.Many2one('product.category', related='product_id.categ_id',store=True,string=u'Nhóm')
thiet_bi_id = fields.Many2one('tonkho.thietbi',related='product_id.thiet_bi_id', string = u'Thiết bị',store=True)
brand_id = fields.Many2one('tonkho.brand',related='product_id.brand_id',string=u'Hãng sản xuất',store=True)
tracking = fields.Selection([
('serial', 'By Unique Serial Number'),
('none', 'No Tracking')], string=u"Có SN hay không", related='product_id.tracking',store=True)
stock_location_id_selection = fields.Selection('get_stock_for_selection_field_',store=False)
tinh_trang = fields.Selection([('tot',u'Tốt'),('hong',u'Hỏng')],default='tot',related='lot_id.tinh_trang',store=True,string=u'Tình trạng')
ghi_chu = fields.Text(string=u'Ghi chú',related='lot_id.ghi_chu')
stt = fields.Integer()
inventory_line_id = fields.Many2one('stock.inventory.line')
# ml_ids = fields.One2many('stock.move.line','lot_id',compute='ml_ids_',string=u'Các dòng điều chỉnh')
ml_ids = fields.Many2many('stock.move.line','stock_quant_stock_move_line_rel','quant_id','move_line_id',compute='ml_ids_',
string=u'Các dòng điều chỉnh')
# @api.depends('is_done_ml_filter','is_your_department_filter','id_show')
def ml_ids_(self):
for r in self:
# active_id = r.id_show
domain = [('lot_id','=',r.lot_id.id),('product_id','=',r.product_id.id),'|',('location_id','=',r.location_id.id),('location_dest_id','=',r.location_id.id)]# r.id = new object nên không được
# if r.is_done_ml_filter:
# domain.append(('state','=','done'))
# if r.is_your_department_filter:
# your_department_id = self.env.user.department_id.id
# # department_domain = ['|',('location_id.department_id','=',your_department_id),('location_dest_id.department_id','=',your_department_id)]
# domain.extend(department_domain)
r.ml_ids = self.env['stock.move.line'].search(domain,order='id desc')
# is_done_ml_filter = fields.Boolean(default= True,store=False, string=u'Chỉ lọc dòng hoàn thành')
# is_your_department_filter = fields.Boolean(default= True,store=False,string =u'Chỉ lọc kho đơn vị của bạn')
# id_show = fields.Integer(compute='id_show_')
# def id_show_(self):
# for r in self:
# r.id_show = r.id
@api.model
def create(self, values):
if 'update_inventory' in self._context:
values.update(self._context['update_inventory'])
res = super(Quant, self).create(values)
return res
def get_stock_for_selection_field_(self):
locs = self.env['stock.location'].search([('is_kho_cha','=',True)])
rs = list(map(lambda i:(i.name,i.name),locs))
return rs
# @api.constrains('lot_id')
# def check_product_id(self):
# not_allow_check_lot_id_in_different_location =self.env['ir.config_parameter'].sudo().get_param('tonkho.not_allow_check_lot_id_in_different_location' )
# if not_allow_check_lot_id_in_different_location ==False:
# if self.lot_id:
# rs = self.env['stock.quant'].search([('lot_id','=',self.lot_id.id),('quantity','>',0)])
# if len(rs)>1:
# raise UserError(u'Không được có quants nội bộ chung lot_id và quantity > 0 product:%s-sn: %s'%(self.product_id.name,self.lot_id.name))
@api.constrains('location_id','quantity')
def not_allow_negative_qty(self):
for r in self:
if not r.location_id.cho_phep_am:
if r.quantity < 0:
raise UserError ( u' Kho:%s, không cho phép tạo âm- sản phẩm:%s-Serial number:%s'%(r.location_id.name,r.product_id.name,r.lot_id.name))
# GHI ĐÈ CÁI XEM DỊCH CHUYỂN KHO, KHÔNG CẦN LỌC VỊ TRÍ KHO
def action_view_stock_moves(self):
self.ensure_one()
action = self.env.ref('stock.stock_move_line_action').read()[0]
action['domain'] = [
('product_id', '=', self.product_id.id),
# '|', ('location_id', '=', self.location_id.id),
# ('location_dest_id', '=', self.location_id.id),
('lot_id', '=', self.lot_id.id),
('package_id', '=', self.package_id.id)]
return action
def name_get(self):
res = []
for r in self:
adict=[
('product_id',{'pr':None,'func':lambda r: r.name + (' [PN:%s]'%r.pn if r.pn else '')}),
# ('product_id',{'pr':None}),
('lot_id',{'pr':None,'func':lambda r: r.name,'skip_if_False':False}),
('quantity',{'pr':None,'func':lambda val:'%s'%val,'skip_if_False':False}),
]
name = name_compute(r,adict,join_char = u' | ')
res.append((r.id,name))
return res
@api.model
def name_search(self, name, args=None, operator='ilike', limit=100):
args = args or []
context = self._context or {}
if context.get('kho_da_chon') !=None:
choosed_list = context.get('kho_da_chon') [0][2]
args +=[('id','not in',choosed_list)]
if name:
pn_replace_str = pn_replace(name)
else:
pn_replace_str = ''
recs = self.search(['|','|',('product_id', operator, name),('product_id.pn_replace', operator, pn_replace_str),('lot_id.name', operator, name)] + args, limit=limit)
return recs.name_get()
@api.model
def search(self, args, offset=0, limit=None, order=None, count=False):
context = self._context or {}
if context.get('kho_da_chon') !=None:
choosed_list = context.get('kho_da_chon') [0][2]
args +=[('id','not in',choosed_list)]
return super(Quant, self).search(args, offset, limit, order, count=count)
@api.constrains('quantity')
def check_quantity(self):
for quant in self:
if float_compare(quant.quantity, 1, precision_rounding=quant.product_uom_id.rounding) > 0 and quant.lot_id and quant.product_id.tracking == 'serial':
raise ValidationError(_('A serial number should only be linked to a single product. %s,%s,%s'%(quant.quantity,quant.product_id.name,quant.lot_id.name)))
@api.model
def fields_view_get(self, view_id=None, view_type='form', toolbar=False, submenu=False):
res = super(Quant, self).fields_view_get(
view_id=view_id, view_type=view_type, toolbar=toolbar, submenu=submenu)
if view_type =='search':
# write_to_current_path(u'%s'%res['arch'])
# print ("res['arch']",res['arch'])
doc = etree.fromstring(res['arch'])
node = doc.xpath("//filter[@name='locationgroup']")[0]
node.addnext(etree.Element('separator', {}))
node.addnext(etree.Element('filter', {'string':'Lọc theo kho của trạm %s'%self.env.user.department_id.name,'name': 'loc_theo_tram_137', 'domain': "[('location_id.department_id','=',%s)]"%self.env.user.department_id.id}))
res['arch'] = etree.tostring(doc, encoding='unicode')
return res
|
[
"[email protected]"
] | |
44a6e5f1f5fda5f40b242e469cc4314b106c8306
|
e6b969b7c50de5ae61c4b76ec31a982d16523e46
|
/sym.py
|
3f88fe9fcc929640105de765ca5654c69c9dd65f
|
[] |
no_license
|
dlovemore/parle
|
7d52dc76716f3f8a5f085aa26277b2c52b98b098
|
e949c743b2760079eb3c3eb67198e69562521d20
|
refs/heads/master
| 2021-01-03T07:04:09.060552 | 2020-10-20T13:01:59 | 2020-10-20T13:01:59 | 239,972,201 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 7,418 |
py
|
class Base:
def __init__(self, *args):
self._args = args
@property
def args(self): return self._args
@args.setter
def args(self, value): self._args = value
def __repr__(self):
rargs = repr(list(self.args))[1:-1]
return f'{type(self).__name__}({rargs})'
class E(Base):
@property
def op(self):
return self.args[0]
@property
def exprs(self):
return self.args[1:]
@property
def lhs(self):
return self.args[1]
@property
def rhs(self):
return self.args[2]
@property
def a1(self):
return self.args[1]
@property
def a2(self):
return self.args[2]
def __add__(self, rhs):
return E('+',self,rhs)
def __contains__(self, lhs):
return E(' in ',lhs,self)
def __truediv__(self, rhs):
return E('/',self,rhs)
def __floordiv__(self, rhs):
return E('//',self,rhs)
def __and__(self, rhs):
return E('&',self,rhs)
def __xor__(self, rhs):
return E('^',self,rhs)
def __invert__(self):
return E('~_',self)
def __or__(self, rhs):
return E('|',self,rhs)
def __pow__(self, rhs):
return E('**',self,rhs)
def __getitem__(self, k):
return E('[]',self, k)
def __lshift__(self, rhs):
return E('<<',self, rhs)
def __mod__(self, rhs):
return E('%',self, rhs)
def __mul__(self, rhs):
return E('*',self, rhs)
def __matmul__(self, rhs):
return E('@',self, rhs)
def __neg__(self):
return E('-_',self)
def __pos__(self):
return E('+_',self)
def __rshift__(self, rhs):
return E('>>',self, rhs)
def __sub__(self, rhs):
return E('-',self, rhs)
def __lt__(self, rhs):
return E('<',self, rhs)
def __le__(self, rhs):
return E('<=',self, rhs)
def __eq__(self, rhs):
return E('==',self, rhs)
def __ne__(self, rhs):
return E('!=',self, rhs)
def __ge__(self, rhs):
return E('>=',self, rhs)
def __gt__(self, rhs):
return E('>',self, rhs)
def __call__(self, *args):
return E('_()',self, *args)
def dolet(k,v): return E('=',k,v)
class LetClause(Base):
def __getitem__(self, k):
if isinstance(k,tuple):
k=E(',',*k)
return E('=',E('args',*self.args), k)
class Let:
def __setitem__(self,k,v):
stmts += dolet(k,v)
def __call__(self,*args):
return LetClause(*args)
let=Let()
class Stmt:
def __init__(self,k):
self.op = k
def __getitem__(self, k):
if isinstance(k,tuple):
k=E(',',*k)
return E(self.op, k)
# Use like:
# let(x)[x+1]
# or [let(x)[4], let(Y)[X+1]]
class Env:
def __init__(self, globals, op='var'):
self.globals=globals
self.vars=dict()
self.op=op
def __call__(self, name):
if name not in self.vars:
v=E(self.op, name)
self.globals[name]=v
self.vars[name]=v
return self.vars[name]
def __getattr__(self, name):
return self(name)
var=Env(globals())
v=var
arg=var
class OnClause:
def __init__(self, e):
self.e=e
def __getitem__(self, rhs):
if isinstance(rhs, slice):
assert(rhs.step is None)
return E('?:', self.e, rhs.start, rhs.stop)
else:
return E('?', self.e, rhs)
class On:
def __call__(self, e):
return OnClause(e)
on=On()
IF=on
class LambdaClause:
def __init__(self, *args):
self.args=args
def __getitem__(self, rhs):
return E('λ',self.args,rhs)
class LambdaDefiner:
def __call__(self, *args):
return LambdaClause(args)
λ=LambdaDefiner()
class Ref:
def __init__(self, r, uid):
self.refmaker = r
self.uid = uid
def __matmul__(self, rhs):
if self in self.refmaker.rees:
raise RuntimeError
self.refmaker.rees[self]=rhs
return rhs
def __repr__(self):
return f'{self.uid}@R'
class RefMaker:
def __init__(self):
self.refs = dict() # uid->ref
self.rees = dict() # ref->referee
def __rmatmul__(self, uid):
"Handles uid@self"
if uid not in self.refs:
self.refs[uid] = Ref(self,uid)
return self.refs[uid]
def save(x):
seen=set()
many=set()
def mr(x):
if id(x) in seen:
many.add(id(x))
else:
seen.add(id(x))
if isinstance(x, Base):
for a in x.args:
mr(a)
mr(x)
uids=dict() # ref id->ids
uid=1
def pr(x):
nonlocal uid
s=''
if id(x) in many:
if id(x) in uids:
return f'{uids[id(x)]}@R'
else:
uids[id(x)]=uid
s+=f'{uid}@R@'
uid+=1
if isinstance(x, Base):
first=True
s+=f'{type(x).__name__}('
for arg in x.args:
if first: first=False
else: s+=','
s+=pr(arg)
s+=')'
else:
s+=repr(x)
return s
return pr(x)
def load(s):
global R
R=RefMaker()
b=eval(s)
seen=set()
def resolve(x):
if id(x) not in seen:
seen.add(id(x))
if isinstance(x, Base):
x.args=[resolve(a) for a in x.args]
if isinstance(x, Ref):
return R.rees[x]
else:
return x
resolve(b)
return b
# >>> from sym import *
# >>> X=var.X
# >>> print(v.Y)
# E('var', 'Y')
# >>> a=[1,2]
# >>> a[0]=3
# >>> a[0]+=3
# >>> E('a','var')[3]
# E('[]', E('a', 'var'), 3)
# >>> a=v.a
# >>> a[0]
# E('[]', E('var', 'a'), 0)
# >>> v.X<v.Y
# E('<', E('var', 'X'), E('var', 'Y'))
# >>> v.X[v.X+1,]
# E('[]', E('var', 'X'), (E('+', E('var', 'X'), 1),))
# >>>
# >>> globals()['ai']=12
# >>> ai
# 12
# >>>
# >>> on(X)[3:4]
# E('?:', E('var', 'X'), 3, 4)
# >>> on(X)[3]
# E('?', E('var', 'X'), 3)
# >>> E(E('X','var'),'?',3)
# E(E('X', 'var'), '?', 3)
# >>> var.A
# E('var', 'A')
# >>> A
# Traceback (most recent call last):
# File "<console>", line 1, in <module>
# NameError: name 'A' is not defined
# >>> var=Env(globals())
# >>> var.A
# E('var', 'A')
# >>> A
# E('var', 'A')
# >>> E
# <class 'sym.E'>
# >>>
# >>> [getattr(var,x) for x in 'ABCDEFGHIJKLMNOPQRSTUVWXYZ']
# [E('var', 'A'), E('var', 'B'), E('var', 'C'), E('var', 'D'), E('var', 'E'), E('var', 'F'), E('var', 'G'), E('var', 'H'), E('var', 'I'), E('var', 'J'), E('var', 'K'), E('var', 'L'), E('var', 'M'), E('var', 'N'), E('var', 'O'), E('var', 'P'), E('var', 'Q'), E('var', 'R'), E('var', 'S'), E('var', 'T'), E('var', 'U'), E('var', 'V'), E('var', 'W'), E('var', 'X'), E('var', 'Y'), E('var', 'Z')]
# >>> A
# E('var', 'A')
# >>> E
# E('var', 'E')
# >>> import fun
# >>> fun.E
# <class 'parle.sym.E'>
# >>> var.E
# E('var', 'E')
# >>> fun.E
# <class 'parle.sym.E'>
# >>> λ(X)[X+1]
# E('λ', ((E('var', 'X'),),), E('+', E('var', 'X'), 1))
# >>>
# >>>
# >>> let(X)
# LetClause(E('var', 'X'))
# >>> let(X)[X+1]
# E('=', E('args', E('var', 'X')), E('+', E('var', 'X'), 1))
# >>> LET=Stmt('let')
# >>>
# >>> LET(X)
# Traceback (most recent call last):
# File "<console>", line 1, in <module>
# TypeError: 'Stmt' object is not callable
# >>> LET[X]
# E('let', E('var', 'X'))
# >>>
|
[
"[email protected]"
] | |
f64233795111df760e19371a35a584413081cff7
|
c9ddbdb5678ba6e1c5c7e64adf2802ca16df778c
|
/cases/synthetic/class_def_attr-big-407.py
|
d189ce0fed43c4a777ecf1f02981982293253209
|
[] |
no_license
|
Virtlink/ccbench-chocopy
|
c3f7f6af6349aff6503196f727ef89f210a1eac8
|
c7efae43bf32696ee2b2ee781bdfe4f7730dec3f
|
refs/heads/main
| 2023-04-07T15:07:12.464038 | 2022-02-03T15:42:39 | 2022-02-03T15:42:39 | 451,969,776 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,297 |
py
|
class A(object):
x:int = 1
class A2(object):
x:int = 1
x2:int = 1
class A3(object):
x:int = 1
x2:int = 1
x3:int = 1
class A4(object):
x:int = 1
x2:int = 1
x3:int = 1
x4:int = 1
class A5(object):
x:int = 1
x2:int = 1
x3:int = 1
x4:int = 1
x5:int = 1
class B(A):
def __init__(self: "B"):
pass
class B2(A):
def __init__(self: "B2"):
pass
class B3(A):
def __init__(self: "B3"):
pass
class B4(A):
def __init__(self: "B4"):
pass
class B5(A):
def __init__(self: "B5"):
pass
class C(B):
z:bool = True
class C2(B):
z:bool = True
z2:bool = True
class C3(B):
z:bool = True
z2:bool = True
z3:bool = True
class C4(B):
z:bool = True
z2:bool = True
z3:bool = True
z4:bool = True
class C5(B):
z:bool = True
z2:bool = True
z3:bool = True
z4:bool = True
z5:bool = True
a:A = None
a2:A = None
a3:A = $Literal
a4:A = None
a5:A = None
b:B = None
b2:B = None
b3:B = None
b4:B = None
b5:B = None
c:C = None
c2:C = None
c3:C = None
c4:C = None
c5:C = None
a = A()
a2 = A()
a3 = A()
a4 = A()
a5 = A()
b = B()
b2 = B()
b3 = B()
b4 = B()
b5 = B()
c = C()
c2 = C()
c3 = C()
c4 = C()
c5 = C()
a.x = 1
b.x = a.x
c.z = a.x == b.x
|
[
"[email protected]"
] | |
21f188524361b8fa84956085533990c2bc3dbde9
|
dcc25b784213b17015d2080a7623c772d474dc22
|
/reproduce/AlphaFold2-Chinese/tests/st/mindelec/networks/test_frequency_domain_maxwell/test_frequency_domain_maxwell.py
|
65c3d50f3c3832682e1414cb4e3413c5f6f49489
|
[
"LicenseRef-scancode-generic-cla",
"Apache-2.0"
] |
permissive
|
mindspore-ai/community
|
930c9d9fdbead852e3597d522a72fe5b66bfc005
|
c72ce898482419117550ad16d93b38298f4306a1
|
refs/heads/master
| 2023-07-19T19:43:20.785198 | 2023-07-17T06:51:22 | 2023-07-17T06:51:22 | 250,693,100 | 193 | 10 |
Apache-2.0
| 2022-10-29T10:01:40 | 2020-03-28T02:00:02 |
Python
|
UTF-8
|
Python
| false | false | 5,395 |
py
|
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""
train
"""
import os
import pytest
import numpy as np
import mindspore.nn as nn
import mindspore.ops as ops
from mindspore import context, ms_function
from mindspore.common import set_seed
from mindspore.train.callback import LossMonitor
from mindspore.train.loss_scale_manager import DynamicLossScaleManager
from mindelec.solver import Solver, Problem
from mindelec.geometry import Rectangle, create_config_from_edict
from mindelec.common import L2
from mindelec.data import Dataset
from mindelec.operators import SecondOrderGrad as Hessian
from mindelec.loss import Constraints
from src.config import rectangle_sampling_config, helmholtz_2d_config
from src.model import FFNN
from src.dataset import test_data_prepare
from src.callback import PredictCallback, TimeMonitor
set_seed(0)
np.random.seed(0)
print("pid:", os.getpid())
context.set_context(mode=context.GRAPH_MODE, save_graphs=False, device_target="Ascend")
# define problem
class Helmholtz2D(Problem):
"""2D Helmholtz equation"""
def __init__(self, domain_name, bc_name, net, wavenumber=2):
super(Helmholtz2D, self).__init__()
self.domain_name = domain_name
self.bc_name = bc_name
self.type = "Equation"
self.wave_number = wavenumber
self.grad_xx = Hessian(net, input_idx1=0, input_idx2=0, output_idx=0)
self.grad_yy = Hessian(net, input_idx1=1, input_idx2=1, output_idx=0)
self.reshape = ops.Reshape()
@ms_function
def governing_equation(self, *output, **kwargs):
"""governing equation"""
u = output[0]
x = kwargs[self.domain_name][:, 0]
y = kwargs[self.domain_name][:, 1]
x = self.reshape(x, (-1, 1))
y = self.reshape(y, (-1, 1))
u_xx = self.grad_xx(kwargs[self.domain_name])
u_yy = self.grad_yy(kwargs[self.domain_name])
return u_xx + u_yy + self.wave_number**2 * u
@ms_function
def boundary_condition(self, *output, **kwargs):
"""boundary condition"""
u = output[0]
x = kwargs[self.bc_name][:, 0]
y = kwargs[self.bc_name][:, 1]
x = self.reshape(x, (-1, 1))
y = self.reshape(y, (-1, 1))
test_label = ops.sin(self.wave_number * x)
return 100 * (u - test_label)
@pytest.mark.level0
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
def test_frequency_domain_maxwell():
"""train process"""
net = FFNN(input_dim=2, output_dim=1, hidden_layer=64)
# define geometry
geom_name = "rectangle"
rect_space = Rectangle(geom_name,
coord_min=helmholtz_2d_config["coord_min"],
coord_max=helmholtz_2d_config["coord_max"],
sampling_config=create_config_from_edict(rectangle_sampling_config))
geom_dict = {rect_space: ["domain", "BC"]}
# create dataset for train and test
train_dataset = Dataset(geom_dict)
train_data = train_dataset.create_dataset(batch_size=helmholtz_2d_config.get("batch_size", 128),
shuffle=True, drop_remainder=False)
test_input, test_label = test_data_prepare(helmholtz_2d_config)
# define problem and constraints
train_prob_dict = {geom_name: Helmholtz2D(domain_name=geom_name + "_domain_points",
bc_name=geom_name + "_BC_points",
net=net,
wavenumber=helmholtz_2d_config.get("wavenumber", 2)),
}
train_constraints = Constraints(train_dataset, train_prob_dict)
# optimizer
optim = nn.Adam(net.trainable_params(), learning_rate=helmholtz_2d_config.get("lr", 1e-4))
# solver
solver = Solver(net,
optimizer=optim,
mode="PINNs",
train_constraints=train_constraints,
test_constraints=None,
amp_level="O2",
metrics={'l2': L2(), 'distance': nn.MAE()},
loss_scale_manager=DynamicLossScaleManager()
)
# train
time_cb = TimeMonitor()
loss_cb = PredictCallback(model=net, predict_interval=10, input_data=test_input, label=test_label)
solver.train(epoch=helmholtz_2d_config.get("epochs", 10),
train_dataset=train_data,
callbacks=[time_cb, LossMonitor(), loss_cb])
per_step_time = time_cb.get_step_time()
l2_error = loss_cb.get_l2_error()
print(f'l2 error: {l2_error:.10f}')
print(f'per step time: {per_step_time:.10f}')
assert l2_error <= 0.05
assert per_step_time <= 10.0
|
[
"[email protected]"
] | |
a87bf5f31c6025305ca0fd7c72b461abad7671a5
|
b2075a92c3854c921a95673a3c5ebb424ab08112
|
/python/postprocessing/framework/postprocessor.py
|
9eb1775d8dbd0f52159a9b31d8202b7f33272466
|
[] |
no_license
|
vhbb/nanoAOD-tools
|
cd2a6305991369948bb9577c5da3c7e4db275c52
|
14bce3dca68288e65b2daefce755d65914a3765d
|
refs/heads/master
| 2021-09-04T21:44:29.892241 | 2018-01-22T12:50:50 | 2018-01-22T12:50:50 | 106,291,673 | 1 | 1 | null | 2018-01-22T12:50:51 | 2017-10-09T14:06:47 |
Python
|
UTF-8
|
Python
| false | false | 4,843 |
py
|
#!/usr/bin/env python
import os
import time
import ROOT
ROOT.PyConfig.IgnoreCommandLineOptions = True
from PhysicsTools.NanoAODTools.postprocessing.framework.branchselection import BranchSelection
from PhysicsTools.NanoAODTools.postprocessing.framework.datamodel import InputTree
from PhysicsTools.NanoAODTools.postprocessing.framework.eventloop import eventLoop
from PhysicsTools.NanoAODTools.postprocessing.framework.output import FriendOutput, FullOutput
from PhysicsTools.NanoAODTools.postprocessing.framework.preskimming import preSkim
from PhysicsTools.NanoAODTools.postprocessing.framework.jobreport import JobReport
class PostProcessor :
def __init__(self,outputDir,inputFiles,cut=None,branchsel=None,modules=[],compression="LZMA:9",friend=False,postfix=None,
jsonInput=None,noOut=False,justcount=False,provenance=False,haddFileName=None,fwkJobReport=False):
self.outputDir=outputDir
self.inputFiles=inputFiles
self.cut=cut
self.modules=modules
self.compression=compression
self.postfix=postfix
self.json=jsonInput
self.noOut=noOut
self.friend=friend
self.justcount=justcount
self.provenance=provenance
self.jobReport = JobReport() if fwkJobReport else None
self.haddFileName=haddFileName
if self.jobReport and not self.haddFileName :
print "Because you requested a FJR we assume you want the final hadd. No name specified for the output file, will use tree.root"
self.haddFileName="tree.root"
self.branchsel = BranchSelection(branchsel) if branchsel else None
def run(self) :
if not self.noOut:
outpostfix = self.postfix if self.postfix != None else ("_Friend" if self.friend else "_Skim")
if self.compression != "none":
ROOT.gInterpreter.ProcessLine("#include <Compression.h>")
(algo, level) = self.compression.split(":")
compressionLevel = int(level)
if algo == "LZMA": compressionAlgo = ROOT.ROOT.kLZMA
elif algo == "ZLIB": compressionAlgo = ROOT.ROOT.kZLIB
else: raise RuntimeError("Unsupported compression %s" % algo)
else:
compressionLevel = 0
print "Will write selected trees to "+self.outputDir
if not self.justcount:
if not os.path.exists(self.outputDir):
os.system("mkdir -p "+self.outputDir)
if self.noOut:
if len(self.modules) == 0:
raise RuntimeError("Running with --noout and no modules does nothing!")
for m in self.modules: m.beginJob()
fullClone = (len(self.modules) == 0)
outFileNames=[]
t0 = time.clock()
totEntriesRead=0
for fname in self.inputFiles:
# open input file
inFile = ROOT.TFile.Open(fname)
#get input tree
inTree = inFile.Get("Events")
totEntriesRead+=inTree.GetEntries()
# pre-skimming
elist,jsonFilter = preSkim(inTree, self.json, self.cut)
if self.justcount:
print 'Would select %d entries from %s'%(elist.GetN() if elist else inTree.GetEntries(), fname)
continue
else:
print 'Pre-select %d entries out of %s '%(elist.GetN() if elist else inTree.GetEntries(),inTree.GetEntries())
if fullClone:
# no need of a reader (no event loop), but set up the elist if available
if elist: inTree.SetEntryList(elist)
else:
# initialize reader
inTree = InputTree(inTree, elist)
# prepare output file
outFileName = os.path.join(self.outputDir, os.path.basename(fname).replace(".root",outpostfix+".root"))
outFile = ROOT.TFile.Open(outFileName, "RECREATE", "", compressionLevel)
outFileNames.append(outFileName)
if compressionLevel: outFile.SetCompressionAlgorithm(compressionAlgo)
# prepare output tree
if self.friend:
outTree = FriendOutput(inFile, inTree, outFile)
else:
outTree = FullOutput(inFile, inTree, outFile, branchSelection = self.branchsel, fullClone = fullClone, jsonFilter = jsonFilter,provenance=self.provenance)
# process events, if needed
if not fullClone:
(nall, npass, timeLoop) = eventLoop(self.modules, inFile, outFile, inTree, outTree)
print 'Processed %d preselected entries from %s (%s entries). Finally selected %d entries' % (nall, fname, inTree.GetEntries(), npass)
else:
print 'Selected %d entries from %s' % (outTree.tree().GetEntries(), fname)
# now write the output
outTree.write()
outFile.Close()
print "Done %s" % outFileName
if self.jobReport:
self.jobReport.addInputFile(fname,nall)
for m in self.modules: m.endJob()
print totEntriesRead/(time.clock()-t0), "Hz"
if self.haddFileName :
os.system("./haddnano.py %s %s" %(self.haddFileName," ".join(outFileNames))) #FIXME: remove "./" once haddnano.py is distributed with cms releases
if self.jobReport :
self.jobReport.addOutputFile(self.haddFileName)
self.jobReport.save()
|
[
"[email protected]"
] | |
c74d3d817ada2bcf2794d7cffebfb2b3ccbf0e02
|
23a3c76882589d302b614da5f4be0fc626b4f3cd
|
/python_modules/dagster/dagster/api/snapshot_trigger.py
|
d9414b7c2318bcc7dc7ca624569ba3ba47f8ef8b
|
[
"Apache-2.0"
] |
permissive
|
DavidKatz-il/dagster
|
3641d04d387cdbe5535ae4f9726ce7dc1981a8c3
|
7c6d16eb8b3610a21020ecb479101db622d1535f
|
refs/heads/master
| 2022-12-20T13:08:36.462058 | 2020-09-14T18:12:12 | 2020-09-14T22:43:26 | 264,703,873 | 0 | 0 |
Apache-2.0
| 2020-06-16T09:49:00 | 2020-05-17T15:56:57 |
Python
|
UTF-8
|
Python
| false | false | 2,276 |
py
|
from dagster import check
from dagster.core.host_representation.external_data import (
ExternalExecutionParamsData,
ExternalExecutionParamsErrorData,
)
from dagster.core.host_representation.handle import RepositoryHandle
from dagster.core.types.loadable_target_origin import LoadableTargetOrigin
from dagster.grpc.types import ExternalTriggeredExecutionArgs
from .utils import execute_unary_api_cli_command
def sync_get_external_trigger_execution_params(instance, repository_handle, trigger_name):
check.inst_param(repository_handle, "repository_handle", RepositoryHandle)
check.str_param(trigger_name, "trigger_name")
origin = repository_handle.get_origin()
return check.inst(
execute_unary_api_cli_command(
origin.executable_path,
"trigger_execution_params",
ExternalTriggeredExecutionArgs(
repository_origin=origin,
instance_ref=instance.get_ref(),
trigger_name=trigger_name,
),
),
(ExternalExecutionParamsData, ExternalExecutionParamsErrorData),
)
def sync_get_external_trigger_execution_params_ephemeral_grpc(
instance, repository_handle, trigger_name
):
from dagster.grpc.client import ephemeral_grpc_api_client
origin = repository_handle.get_origin()
with ephemeral_grpc_api_client(
LoadableTargetOrigin(executable_path=origin.executable_path)
) as api_client:
return sync_get_external_trigger_execution_params_grpc(
api_client, instance, repository_handle, trigger_name
)
def sync_get_external_trigger_execution_params_grpc(
api_client, instance, repository_handle, trigger_name
):
check.inst_param(repository_handle, "repository_handle", RepositoryHandle)
check.str_param(trigger_name, "trigger_name")
origin = repository_handle.get_origin()
return check.inst(
api_client.external_trigger_execution_params(
external_triggered_execution_args=ExternalTriggeredExecutionArgs(
repository_origin=origin,
instance_ref=instance.get_ref(),
trigger_name=trigger_name,
)
),
(ExternalExecutionParamsData, ExternalExecutionParamsErrorData),
)
|
[
"[email protected]"
] | |
2e5db24847888b7364737d3edcf63f609a59d47b
|
65c001b5f572a6b0ca09dd9821016d628b745009
|
/frappe-bench/env/lib/python2.7/site-packages/cssutils/css/colors.py
|
0c4e4803b12d140e5337d66ce04c6406d01dfd2f
|
[
"MIT"
] |
permissive
|
ibrahmm22/library-management
|
666dffebdef1333db122c2a4a99286e7c174c518
|
b88a2129a5a2e96ce1f945ec8ba99a0b63b8c506
|
refs/heads/master
| 2022-10-30T17:53:01.238240 | 2020-06-11T18:36:41 | 2020-06-11T18:36:41 | 271,620,992 | 0 | 1 |
MIT
| 2022-10-23T05:04:57 | 2020-06-11T18:36:21 |
CSS
|
UTF-8
|
Python
| false | false | 6,669 |
py
|
# -*- coding: utf-8 -*-
"""
Built from something like this:
print [
(
row[2].text_content().strip(),
eval(row[4].text_content().strip())
)
for row in lxml.html.parse('http://www.w3.org/TR/css3-color/')
.xpath("//*[@class='colortable']//tr[position()>1]")
]
by Simon Sapin
"""
COLORS = {
'transparent': (0, 0, 0, 0.0),
'black': (0, 0, 0, 1.0),
'silver': (192, 192, 192, 1.0),
'gray': (128, 128, 128, 1.0),
'white': (255, 255, 255, 1.0),
'maroon': (128, 0, 0, 1.0),
'red': (255, 0, 0, 1.0),
'purple': (128, 0, 128, 1.0),
'fuchsia': (255, 0, 255, 1.0),
'green': (0, 128, 0, 1.0),
'lime': (0, 255, 0, 1.0),
'olive': (128, 128, 0, 1.0),
'yellow': (255, 255, 0, 1.0),
'navy': (0, 0, 128, 1.0),
'blue': (0, 0, 255, 1.0),
'teal': (0, 128, 128, 1.0),
'aqua': (0, 255, 255, 1.0),
'aliceblue': (240, 248, 255, 1.0),
'antiquewhite': (250, 235, 215, 1.0),
'aqua': (0, 255, 255, 1.0),
'aquamarine': (127, 255, 212, 1.0),
'azure': (240, 255, 255, 1.0),
'beige': (245, 245, 220, 1.0),
'bisque': (255, 228, 196, 1.0),
'black': (0, 0, 0, 1.0),
'blanchedalmond': (255, 235, 205, 1.0),
'blue': (0, 0, 255, 1.0),
'blueviolet': (138, 43, 226, 1.0),
'brown': (165, 42, 42, 1.0),
'burlywood': (222, 184, 135, 1.0),
'cadetblue': (95, 158, 160, 1.0),
'chartreuse': (127, 255, 0, 1.0),
'chocolate': (210, 105, 30, 1.0),
'coral': (255, 127, 80, 1.0),
'cornflowerblue': (100, 149, 237, 1.0),
'cornsilk': (255, 248, 220, 1.0),
'crimson': (220, 20, 60, 1.0),
'cyan': (0, 255, 255, 1.0),
'darkblue': (0, 0, 139, 1.0),
'darkcyan': (0, 139, 139, 1.0),
'darkgoldenrod': (184, 134, 11, 1.0),
'darkgray': (169, 169, 169, 1.0),
'darkgreen': (0, 100, 0, 1.0),
'darkgrey': (169, 169, 169, 1.0),
'darkkhaki': (189, 183, 107, 1.0),
'darkmagenta': (139, 0, 139, 1.0),
'darkolivegreen': (85, 107, 47, 1.0),
'darkorange': (255, 140, 0, 1.0),
'darkorchid': (153, 50, 204, 1.0),
'darkred': (139, 0, 0, 1.0),
'darksalmon': (233, 150, 122, 1.0),
'darkseagreen': (143, 188, 143, 1.0),
'darkslateblue': (72, 61, 139, 1.0),
'darkslategray': (47, 79, 79, 1.0),
'darkslategrey': (47, 79, 79, 1.0),
'darkturquoise': (0, 206, 209, 1.0),
'darkviolet': (148, 0, 211, 1.0),
'deeppink': (255, 20, 147, 1.0),
'deepskyblue': (0, 191, 255, 1.0),
'dimgray': (105, 105, 105, 1.0),
'dimgrey': (105, 105, 105, 1.0),
'dodgerblue': (30, 144, 255, 1.0),
'firebrick': (178, 34, 34, 1.0),
'floralwhite': (255, 250, 240, 1.0),
'forestgreen': (34, 139, 34, 1.0),
'fuchsia': (255, 0, 255, 1.0),
'gainsboro': (220, 220, 220, 1.0),
'ghostwhite': (248, 248, 255, 1.0),
'gold': (255, 215, 0, 1.0),
'goldenrod': (218, 165, 32, 1.0),
'gray': (128, 128, 128, 1.0),
'green': (0, 128, 0, 1.0),
'greenyellow': (173, 255, 47, 1.0),
'grey': (128, 128, 128, 1.0),
'honeydew': (240, 255, 240, 1.0),
'hotpink': (255, 105, 180, 1.0),
'indianred': (205, 92, 92, 1.0),
'indigo': (75, 0, 130, 1.0),
'ivory': (255, 255, 240, 1.0),
'khaki': (240, 230, 140, 1.0),
'lavender': (230, 230, 250, 1.0),
'lavenderblush': (255, 240, 245, 1.0),
'lawngreen': (124, 252, 0, 1.0),
'lemonchiffon': (255, 250, 205, 1.0),
'lightblue': (173, 216, 230, 1.0),
'lightcoral': (240, 128, 128, 1.0),
'lightcyan': (224, 255, 255, 1.0),
'lightgoldenrodyellow': (250, 250, 210, 1.0),
'lightgray': (211, 211, 211, 1.0),
'lightgreen': (144, 238, 144, 1.0),
'lightgrey': (211, 211, 211, 1.0),
'lightpink': (255, 182, 193, 1.0),
'lightsalmon': (255, 160, 122, 1.0),
'lightseagreen': (32, 178, 170, 1.0),
'lightskyblue': (135, 206, 250, 1.0),
'lightslategray': (119, 136, 153, 1.0),
'lightslategrey': (119, 136, 153, 1.0),
'lightsteelblue': (176, 196, 222, 1.0),
'lightyellow': (255, 255, 224, 1.0),
'lime': (0, 255, 0, 1.0),
'limegreen': (50, 205, 50, 1.0),
'linen': (250, 240, 230, 1.0),
'magenta': (255, 0, 255, 1.0),
'maroon': (128, 0, 0, 1.0),
'mediumaquamarine': (102, 205, 170, 1.0),
'mediumblue': (0, 0, 205, 1.0),
'mediumorchid': (186, 85, 211, 1.0),
'mediumpurple': (147, 112, 219, 1.0),
'mediumseagreen': (60, 179, 113, 1.0),
'mediumslateblue': (123, 104, 238, 1.0),
'mediumspringgreen': (0, 250, 154, 1.0),
'mediumturquoise': (72, 209, 204, 1.0),
'mediumvioletred': (199, 21, 133, 1.0),
'midnightblue': (25, 25, 112, 1.0),
'mintcream': (245, 255, 250, 1.0),
'mistyrose': (255, 228, 225, 1.0),
'moccasin': (255, 228, 181, 1.0),
'navajowhite': (255, 222, 173, 1.0),
'navy': (0, 0, 128, 1.0),
'oldlace': (253, 245, 230, 1.0),
'olive': (128, 128, 0, 1.0),
'olivedrab': (107, 142, 35, 1.0),
'orange': (255, 165, 0, 1.0),
'orangered': (255, 69, 0, 1.0),
'orchid': (218, 112, 214, 1.0),
'palegoldenrod': (238, 232, 170, 1.0),
'palegreen': (152, 251, 152, 1.0),
'paleturquoise': (175, 238, 238, 1.0),
'palevioletred': (219, 112, 147, 1.0),
'papayawhip': (255, 239, 213, 1.0),
'peachpuff': (255, 218, 185, 1.0),
'peru': (205, 133, 63, 1.0),
'pink': (255, 192, 203, 1.0),
'plum': (221, 160, 221, 1.0),
'powderblue': (176, 224, 230, 1.0),
'purple': (128, 0, 128, 1.0),
'red': (255, 0, 0, 1.0),
'rosybrown': (188, 143, 143, 1.0),
'royalblue': (65, 105, 225, 1.0),
'saddlebrown': (139, 69, 19, 1.0),
'salmon': (250, 128, 114, 1.0),
'sandybrown': (244, 164, 96, 1.0),
'seagreen': (46, 139, 87, 1.0),
'seashell': (255, 245, 238, 1.0),
'sienna': (160, 82, 45, 1.0),
'silver': (192, 192, 192, 1.0),
'skyblue': (135, 206, 235, 1.0),
'slateblue': (106, 90, 205, 1.0),
'slategray': (112, 128, 144, 1.0),
'slategrey': (112, 128, 144, 1.0),
'snow': (255, 250, 250, 1.0),
'springgreen': (0, 255, 127, 1.0),
'steelblue': (70, 130, 180, 1.0),
'tan': (210, 180, 140, 1.0),
'teal': (0, 128, 128, 1.0),
'thistle': (216, 191, 216, 1.0),
'tomato': (255, 99, 71, 1.0),
'turquoise': (64, 224, 208, 1.0),
'violet': (238, 130, 238, 1.0),
'wheat': (245, 222, 179, 1.0),
'white': (255, 255, 255, 1.0),
'whitesmoke': (245, 245, 245, 1.0),
'yellow': (255, 255, 0, 1.0),
'yellowgreen': (154, 205, 50, 1.0),
}
|
[
"[email protected]"
] | |
b731f7bb0a905cd69ba11d5d934cc0ac33f22050
|
781e2692049e87a4256320c76e82a19be257a05d
|
/all_data/exercism_data/python/simple-cipher/7df478df5b6546c4b554e717f00f4c75.py
|
902cdef37788b91c86d3d3b606190688274c6913
|
[] |
no_license
|
itsolutionscorp/AutoStyle-Clustering
|
54bde86fe6dbad35b568b38cfcb14c5ffaab51b0
|
be0e2f635a7558f56c61bc0b36c6146b01d1e6e6
|
refs/heads/master
| 2020-12-11T07:27:19.291038 | 2016-03-16T03:18:00 | 2016-03-16T03:18:42 | 59,454,921 | 4 | 0 | null | 2016-05-23T05:40:56 | 2016-05-23T05:40:56 | null |
UTF-8
|
Python
| false | false | 1,338 |
py
|
'''cipher.py
created 6 Nov 2014
by @jestuber '''
import string
class Caesar(object):
"""docstring for Caesar"""
def __init__(self):
super(Caesar, self).__init__()
# self.arg = arg
def encode(self,plaintext):
return Cipher().encode(plaintext)
def decode(self,encoded):
return Cipher().decode(encoded)
class Cipher(object):
"""docstring for cipher"""
def __init__(self, key='d'):
super(Cipher, self).__init__()
self.key = key
self.shift = [string.lowercase.index(c) for c in key]
def encode(self,plaintext):
encoded = []
plaintext = plaintext.translate(None, string.punctuation+string.digits+' ').lower()
ishift = 0
for c in plaintext:
plainkey = string.lowercase.index(c)
newkey = plainkey + self.shift[ishift]
if newkey > 25:
newkey -= 26
encoded.append(string.lowercase[newkey])
ishift = 0 if ishift>=len(self.shift)-1 else ishift+1
return ''.join(encoded)
def decode(self,encoded):
plaintext = []
encoded = encoded.translate(None, string.punctuation+string.digits+' ').lower()
ishift = 0
for c in encoded:
enckey = string.lowercase.index(c)
newkey = enckey - self.shift[ishift]
if newkey < 0:
newkey += 26
plaintext.append(string.lowercase[newkey])
ishift = 0 if ishift>=len(self.shift)-1 else ishift+1
return ''.join(plaintext)
|
[
"[email protected]"
] | |
51ef9ebcaa98ebc7587f1a24b2cf0e33fca79a0f
|
127ed1ba90dcced8cce8366a5139973f1d21c372
|
/python/lang/security/audit/insecure-transport/urllib/insecure-urlopener-open-ftp.py
|
edb25f3b511977c953c437733a648bdd97fd483d
|
[] |
no_license
|
Silentsoul04/semgrep-rules-1
|
f0c53e04b4239555a688bca687340af4736d2514
|
81b81481c0a81e45d3ffba8d60dd98491a1b0446
|
refs/heads/master
| 2022-12-22T15:41:34.399652 | 2020-09-13T14:59:38 | 2020-09-13T14:59:38 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,388 |
py
|
from urllib.request import URLopener
def test1():
od = URLopener()
# ruleid: insecure-urlopener-open-ftp
od.open("ftp://example.com")
def test1_ok():
od = URLopener()
# ok: insecure-urlopener-open-ftp
od.open("ftps://example.com")
def test2():
od = URLopener()
# ruleid: insecure-urlopener-open-ftp
url = "ftp://example.com"
od.open(url)
def test2_ok():
od = URLopener()
# ok: insecure-urlopener-open-ftp
url = "ftps://example.com"
od.open(url)
def test3():
# ruleid: insecure-urlopener-open-ftp
URLopener().open("ftp://example.com")
def test3_ok():
# ok: insecure-urlopener-open-ftp
URLopener().open("ftps://example.com")
def test4():
# ruleid: insecure-urlopener-open-ftp
url = "ftp://example.com"
URLopener().open(url)
def test4_ok():
# ok: insecure-urlopener-open-ftp
url = "ftps://example.com"
URLopener().open(url)
def test5(url = "ftp://example.com"):
# ruleid: insecure-urlopener-open-ftp
URLopener().open(url)
def test5_ok(url = "ftps://example.com"):
# ok: insecure-urlopener-open-ftp
URLopener().open(url)
def test6(url = "ftp://example.com"):
od = URLopener()
# ruleid: insecure-urlopener-open-ftp
od.open(url)
def test6_ok(url = "ftps://example.com"):
od = URLopener()
# ok: insecure-urlopener-open-ftp
od.open(url)
|
[
"[email protected]"
] | |
d8df3e108eb2a60fcac671fff7ece2212a4fd8a5
|
f0e11aeb7b5bd96c828cf39728eb2fa523f320df
|
/snapflow/cli/commands/generate.py
|
10a6882c60f47d5c9c2a9a96f8435d9b031bb621
|
[
"BSD-3-Clause"
] |
permissive
|
sathya-reddy-m/snapflow
|
7bc1fa7de7fd93b81e5b0538ba73ca68e9e109db
|
9e9e73f0d5a3d6b92f528ef1e2840ad92582502e
|
refs/heads/master
| 2023-05-01T05:14:08.479073 | 2021-05-21T00:14:56 | 2021-05-21T00:14:56 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,840 |
py
|
from __future__ import annotations
import os
from snapflow.cli.commands.base import SnapflowCommandBase
import sys
from contextlib import contextmanager
from importlib import import_module
from pathlib import Path
from types import ModuleType
from typing import List, Pattern
from cleo import Command
from snapflow.core.declarative.dataspace import DataspaceCfg
from snapflow.templates.generator import generate_template, insert_into_file
def strip_snapflow(s: str) -> str:
if s.startswith("snapflow_"):
return s[9:]
return s
class GenerateCommand(SnapflowCommandBase, Command):
"""
Generate new snapflow component
new
{type : Type of component to generate (module, dataspace, function, schema, or flow)}
{name : name of the component }
{--s|namespace : namespace of the component, defaults to current module namespace }
"""
def handle(self):
# self.import_current_snapflow_module()
type_ = self.argument("type")
name = self.argument("name")
namespace = self.option("namespace")
try:
getattr(self, f"handle_{type_}")(name, namespace)
except AttributeError:
raise ValueError(
f"Invalid type {type_}, must be one of (module, dataspace, flow, function, schema)"
)
def handle_module(self, name: str, namespace: str):
namespace = namespace or name
generate_template(
"module", namespace=namespace, name=name,
)
# generate_template("tests", py_module_name=py_module_name, module_name=name)
def handle_dataspace(self, name: str, namespace: str):
name = namespace or name
generate_template(
"dataspace", name=name,
)
# Move single file back down to root (cookiecutter doesn't support)
os.rename(f"{name}/snapflow.yml", "snapflow.yml")
def handle_function(self, name: str, namespace: str):
module = self.import_current_snapflow_module()
namespace = getattr(module, "namespace", None)
with self.chdir_relative("functions"):
generate_template("function", function_name=name, namespace=namespace)
self.insert_function_into_current_init_file(name)
def handle_schema(self, name: str, namespace: str):
namespace = strip_snapflow(namespace or self.get_current_snapflow_module_name())
with self.chdir_relative("schemas"):
generate_template("schema", schema_name=name, namespace=namespace)
self.insert_schema_into_current_init_file(name)
def handle_flow(self, name: str, namespace: str):
namespace = strip_snapflow(namespace or self.get_current_snapflow_module_name())
os.chdir(self.abs_path("flows"))
generate_template("flow", flow_name=name, namespace=namespace)
|
[
"[email protected]"
] | |
30a67ecaa65f58462ea307f9e7814f41c0df1c1a
|
2aace9bb170363e181eb7520e93def25f38dbe5c
|
/build/idea-sandbox/system/python_stubs/cache/2e033ce6e3a2cdde5174895cadb3b406b2a013729dd641fee2cebd9f7ed97879/cv2/cv2/StereoMatcher.py
|
7ab88dbc440f1ef092cd9bd0c28536beb666920f
|
[] |
no_license
|
qkpqkp/PlagCheck
|
13cb66fd2b2caa2451690bb72a2634bdaa07f1e6
|
d229904674a5a6e46738179c7494488ca930045e
|
refs/heads/master
| 2023-05-28T15:06:08.723143 | 2021-06-09T05:36:34 | 2021-06-09T05:36:34 | 375,235,940 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 5,845 |
py
|
# encoding: utf-8
# module cv2.cv2
# from C:\Users\Doly\Anaconda3\lib\site-packages\cv2\cv2.cp37-win_amd64.pyd
# by generator 1.147
""" Python wrapper for OpenCV. """
# imports
import cv2.cv2 as # C:\Users\Doly\Anaconda3\lib\site-packages\cv2\cv2.cp37-win_amd64.pyd
import cv2.Error as Error # <module 'cv2.Error'>
import cv2.aruco as aruco # <module 'cv2.aruco'>
import cv2.bgsegm as bgsegm # <module 'cv2.bgsegm'>
import cv2.bioinspired as bioinspired # <module 'cv2.bioinspired'>
import cv2.cuda as cuda # <module 'cv2.cuda'>
import cv2.datasets as datasets # <module 'cv2.datasets'>
import cv2.detail as detail # <module 'cv2.detail'>
import cv2.dnn as dnn # <module 'cv2.dnn'>
import cv2.face as face # <module 'cv2.face'>
import cv2.fisheye as fisheye # <module 'cv2.fisheye'>
import cv2.flann as flann # <module 'cv2.flann'>
import cv2.ft as ft # <module 'cv2.ft'>
import cv2.hfs as hfs # <module 'cv2.hfs'>
import cv2.img_hash as img_hash # <module 'cv2.img_hash'>
import cv2.instr as instr # <module 'cv2.instr'>
import cv2.ipp as ipp # <module 'cv2.ipp'>
import cv2.kinfu as kinfu # <module 'cv2.kinfu'>
import cv2.line_descriptor as line_descriptor # <module 'cv2.line_descriptor'>
import cv2.linemod as linemod # <module 'cv2.linemod'>
import cv2.ml as ml # <module 'cv2.ml'>
import cv2.motempl as motempl # <module 'cv2.motempl'>
import cv2.multicalib as multicalib # <module 'cv2.multicalib'>
import cv2.ocl as ocl # <module 'cv2.ocl'>
import cv2.ogl as ogl # <module 'cv2.ogl'>
import cv2.omnidir as omnidir # <module 'cv2.omnidir'>
import cv2.optflow as optflow # <module 'cv2.optflow'>
import cv2.plot as plot # <module 'cv2.plot'>
import cv2.ppf_match_3d as ppf_match_3d # <module 'cv2.ppf_match_3d'>
import cv2.quality as quality # <module 'cv2.quality'>
import cv2.reg as reg # <module 'cv2.reg'>
import cv2.rgbd as rgbd # <module 'cv2.rgbd'>
import cv2.saliency as saliency # <module 'cv2.saliency'>
import cv2.samples as samples # <module 'cv2.samples'>
import cv2.structured_light as structured_light # <module 'cv2.structured_light'>
import cv2.text as text # <module 'cv2.text'>
import cv2.utils as utils # <module 'cv2.utils'>
import cv2.videoio_registry as videoio_registry # <module 'cv2.videoio_registry'>
import cv2.videostab as videostab # <module 'cv2.videostab'>
import cv2.xfeatures2d as xfeatures2d # <module 'cv2.xfeatures2d'>
import cv2.ximgproc as ximgproc # <module 'cv2.ximgproc'>
import cv2.xphoto as xphoto # <module 'cv2.xphoto'>
import cv2 as __cv2
class StereoMatcher(__cv2.Algorithm):
# no doc
def compute(self, left, right, disparity=None): # real signature unknown; restored from __doc__
"""
compute(left, right[, disparity]) -> disparity
. @brief Computes disparity map for the specified stereo pair
.
. @param left Left 8-bit single-channel image.
. @param right Right image of the same size and the same type as the left one.
. @param disparity Output disparity map. It has the same size as the input images. Some algorithms,
. like StereoBM or StereoSGBM compute 16-bit fixed-point disparity map (where each disparity value
. has 4 fractional bits), whereas other algorithms output 32-bit floating-point disparity map.
"""
pass
def getBlockSize(self): # real signature unknown; restored from __doc__
"""
getBlockSize() -> retval
.
"""
pass
def getDisp12MaxDiff(self): # real signature unknown; restored from __doc__
"""
getDisp12MaxDiff() -> retval
.
"""
pass
def getMinDisparity(self): # real signature unknown; restored from __doc__
"""
getMinDisparity() -> retval
.
"""
pass
def getNumDisparities(self): # real signature unknown; restored from __doc__
"""
getNumDisparities() -> retval
.
"""
pass
def getSpeckleRange(self): # real signature unknown; restored from __doc__
"""
getSpeckleRange() -> retval
.
"""
pass
def getSpeckleWindowSize(self): # real signature unknown; restored from __doc__
"""
getSpeckleWindowSize() -> retval
.
"""
pass
def setBlockSize(self, blockSize): # real signature unknown; restored from __doc__
"""
setBlockSize(blockSize) -> None
.
"""
pass
def setDisp12MaxDiff(self, disp12MaxDiff): # real signature unknown; restored from __doc__
"""
setDisp12MaxDiff(disp12MaxDiff) -> None
.
"""
pass
def setMinDisparity(self, minDisparity): # real signature unknown; restored from __doc__
"""
setMinDisparity(minDisparity) -> None
.
"""
pass
def setNumDisparities(self, numDisparities): # real signature unknown; restored from __doc__
"""
setNumDisparities(numDisparities) -> None
.
"""
pass
def setSpeckleRange(self, speckleRange): # real signature unknown; restored from __doc__
"""
setSpeckleRange(speckleRange) -> None
.
"""
pass
def setSpeckleWindowSize(self, speckleWindowSize): # real signature unknown; restored from __doc__
"""
setSpeckleWindowSize(speckleWindowSize) -> None
.
"""
pass
def __init__(self, *args, **kwargs): # real signature unknown
pass
@staticmethod # known case of __new__
def __new__(*args, **kwargs): # real signature unknown
""" Create and return a new object. See help(type) for accurate signature. """
pass
def __repr__(self, *args, **kwargs): # real signature unknown
""" Return repr(self). """
pass
|
[
"[email protected]"
] | |
60c9db8974c4fcdaf3f0fc22cf0b0a1ad6083ca1
|
5095a2cbc3fea5b63b6f3cabf4ae1bd930cdb479
|
/영동/16_숨바꼭질.py
|
68332c98c92b749a3fb174e52d26d5d881b07e15
|
[] |
no_license
|
syeeuns/week03
|
a198150d94caf772d6421b4adf6d8e28793853db
|
cf40b994fa285800854bac07b7ef86ad5dbdf35a
|
refs/heads/master
| 2023-02-06T13:13:11.012967 | 2020-12-31T04:14:10 | 2020-12-31T04:14:10 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 438 |
py
|
from collections import deque
N,K = map(int, input().split())
Max=10**5+1
queue=deque([N])
D=[[-1]*2 for _ in range(Max)]
D[N][0]=0
T=[K]
z=K
while queue:
v = queue.popleft()
for newv in [v-1,v+1,v*2]:
if 0<= newv < Max and D[newv][0]==-1:
queue.append(newv)
D[newv][0]=D[v][0]+1
D[newv][1]=v
while D[z][1]!=-1:
T.append(D[z][1])
z=D[z][1]
print(D[K][0])
print(*reversed(T))
|
[
"[email protected]"
] | |
cde76863a99e655e46b43112532dd7da3bcc13d4
|
1bde0c807f17fc431b04b4b9cb338ee3acd34b7d
|
/.history/predict_20210713124241.py
|
7c01488a87ec48a2cd94cb2965aba58fb29d0d56
|
[] |
no_license
|
Harrysibbenga/Pytorch-NLP
|
cf9d7e6376d5e19929e6703c3342c81c1a128be1
|
6f22f6ac5f2bf37f27ed2d6285f3a154eda4b566
|
refs/heads/main
| 2023-06-19T22:43:35.513874 | 2021-07-14T19:45:15 | 2021-07-14T19:45:15 | 385,595,855 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 822 |
py
|
from model import *
from data import *
import sys
rnn = torch.load('char-rnn-classification.pt')
# Just return an output given a line
def evaluate(line_tensor):
hidden = rnn.initHidden()
for i in range(line_tensor.size()[0]):
output, hidden = rnn(line_tensor[i], hidden)
return output
def predict(line, n_predictions=3):
output = evaluate(Variable(lineToTensor(line)))
# Get top N categories
topv, topi = output.data.topk(n_predictions, 1, True)
predictions = []
for i in range(n_predictions):
value = topv[0][i]
category_index = topi[0][i]
print('(%.2f) %s' % (value, all_categories[category_index]))
predictions.append([value, all_categories[category_index]])
return predictions
if __name__ == '__main__':
predict(sys.argv[1])
|
[
"[email protected]"
] | |
e2a883be7a61493ac52b48a563af686087b2640a
|
9dba277eeb0d5e9d2ac75e2e17ab5b5eda100612
|
/19100101/sundyyang/d11_training1.py
|
1583902d982bb0769ab8ae4731fc7504566a7143
|
[] |
no_license
|
shen-huang/selfteaching-python-camp
|
e8410bfc06eca24ee2866c5d890fd063e9d4be89
|
459f90c9f09bd3a3df9e776fc64dfd64ac65f976
|
refs/heads/master
| 2022-05-02T05:39:08.932008 | 2022-03-17T07:56:30 | 2022-03-17T07:56:30 | 201,287,222 | 9 | 6 | null | 2019-08-08T15:34:26 | 2019-08-08T15:34:25 | null |
UTF-8
|
Python
| false | false | 875 |
py
|
# day 11
import requests
import getpass
import yagmail
from pyquery import PyQuery
from mymodule import stats_word
# 提取微信公众号正文
response = requests.get('https://mp.weixin.qq.com/s/pLmuGoc4bZrMNl7MSoWgiA')
document = PyQuery(response.text)
content = document('#js_content').text()
# 应用 stats_word 方法提取前100个词
day11 = stats_word.stats_text_cn(content)
content = day11.most_common(100)
day11_1 = str(day11)
# print(day11_1)
# 设置邮箱
user = input('请输入你的邮箱:') #邮箱账号
password = getpass.getpass('请输入发件人邮箱密码(可复制粘贴):') #邮箱开通smtp服务授权码
recipient = input('请输入收件人邮箱:')
smtp = "smtp.163.com" #服务器地址
# print(user,password,recipient) #检查
# 发送邮件
yag = yagmail.SMTP(user,password,smtp)
yag.send(recipient,'19100101 sundyyang',day11_1)
|
[
"[email protected]"
] | |
2b7b1e3cfa9dbc03cc1d297534895c8a4362ab7a
|
3fa27b3ad1c1ca90f2bcf311d89fe8c2ca241cb4
|
/Location/models.py
|
86164283ca0e8eeb3356a4bbbb87c92cee61e3a0
|
[] |
no_license
|
emperorDuke/django-backend-for-ecommerce
|
717e15d7be899abcd5a4b7a7d2203c612f001aeb
|
83c1ca4d016d876a5c8711ac5cdc448d5a4a533d
|
refs/heads/master
| 2023-02-10T08:57:17.852721 | 2021-01-02T15:49:07 | 2021-01-02T15:49:07 | 271,039,012 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 869 |
py
|
from django.db import models
from django.utils.translation import gettext_lazy as _
from phonenumber_field.modelfields import PhoneNumberField
# Create your models here
class Location(models.Model):
address = models.CharField(_('address'), max_length=100, blank=False)
city = models.CharField(_('city'), max_length=50, blank=False)
country = models.CharField(_('country'), max_length=50, blank=False)
zip_code = models.CharField(_('zip code'), max_length=50, blank=True)
state = models.CharField(_('state'), max_length=50, blank=False)
phone_number = PhoneNumberField(_('Phone number'), blank=False)
added_at = models.DateField(auto_now=True)
class Meta:
unique_together = ('address', 'city', 'state')
db_table = 'location'
def __str__(self):
return '%s, %s, %s' % (self.city, self.state, self.country)
|
[
"[email protected]"
] | |
093ee2463b1a86d455d7fd106f214c73722c2ee1
|
503d2f8f5f5f547acb82f7299d86886691966ca5
|
/atcoder/abc172_a.py
|
b5c6641365ed028613ce1898499a4b6fd7f432d3
|
[] |
no_license
|
Hironobu-Kawaguchi/atcoder
|
3fcb649cb920dd837a1ced6713bbb939ecc090a9
|
df4b55cc7d557bf61607ffde8bda8655cf129017
|
refs/heads/master
| 2023-08-21T14:13:13.856604 | 2023-08-12T14:53:03 | 2023-08-12T14:53:03 | 197,216,790 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 751 |
py
|
# https://atcoder.jp/contests/abc172/tasks/abc172_a
# import sys
# def input(): return sys.stdin.readline().rstrip()
# input = sys.stdin.readline
# input = sys.stdin.buffer.readline
# from numba import njit
# from functools import lru_cache
# sys.setrecursionlimit(10 ** 7)
# @njit('(i8,i8[::1],i4[::1])', cache=True)
# @njit(cache=True)
# def main():
# # @lru_cache(None)
# # def dfs():
# # return
# A, B = map(int, input().split())
# print(A*B)
# return
# main()
a = int(input())
ans = a + a*a + a*a*a
print(ans)
# S = input()
# n = int(input())
# N, K = map(int, input().split())
# l = list(map(int, (input().split())))
# A = [[int(i) for i in input().split()] for _ in range(N)]
|
[
"[email protected]"
] | |
e8bb363682e5f8e63ac0e56a986076f1e43f91b6
|
7722c0c2d616af965124120c76bed472900d48bf
|
/build/pi_io/cmake/pi_io-genmsg-context.py
|
093469fa89dd16ff069e8425074390da5c5219da
|
[] |
no_license
|
EdisonAltamirano/Turtlebot_Mapping
|
f1cfb190b5a239d0e0bb4932e766cce1ec6c42ba
|
cfb98058745e5fbf84b2388254dbad2d045362ef
|
refs/heads/master
| 2022-11-13T11:48:27.915328 | 2020-07-06T00:02:45 | 2020-07-06T00:02:45 | 276,209,053 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 602 |
py
|
# generated from genmsg/cmake/pkg-genmsg.context.in
messages_str = "/home/edison/turtle_ws1/src/pi_io/msg/gpio_input.msg"
services_str = "/home/edison/turtle_ws1/src/pi_io/srv/gpio_output.srv"
pkg_name = "pi_io"
dependencies_str = "std_msgs"
langs = "gencpp;geneus;genlisp;gennodejs;genpy"
dep_include_paths_str = "pi_io;/home/edison/turtle_ws1/src/pi_io/msg;std_msgs;/opt/ros/kinetic/share/std_msgs/cmake/../msg"
PYTHON_EXECUTABLE = "/usr/bin/python2"
package_has_static_sources = '' == 'TRUE'
genmsg_check_deps_script = "/opt/ros/kinetic/share/genmsg/cmake/../../../lib/genmsg/genmsg_check_deps.py"
|
[
"[email protected]"
] | |
3a332e60dcd0d31aa40dd5f4b190704f255a6135
|
9d484077026b7fcf26188d77281f573eaec1f1d3
|
/scripts/adhoc/dendrogram_cuts.py
|
fef1177f5d92df2b34a79f14c35a9204d2660122
|
[] |
no_license
|
gaberosser/qmul-bioinf
|
603d0fe1ed07d7233f752e9d8fe7b02c7cf505fe
|
3cb6fa0e763ddc0a375fcd99a55eab5f9df26fe3
|
refs/heads/master
| 2022-02-22T06:40:29.539333 | 2022-02-12T00:44:04 | 2022-02-12T00:44:04 | 202,544,760 | 3 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 582 |
py
|
plt.figure(figsize=(10, 4))
ax = plt.gca()
hierarchy.dendrogram(z, no_labels=True, color_threshold=0., above_threshold_color='k')
ax.axis('off')
plt.tight_layout()
plt.savefig('methyl_dendrogram_%dcuts.png' % 0, dpi=200)
for i in range(2, 7):
c = clustering.dendrogram_threshold_by_nclust(z, i)
plt.figure(figsize=(10, 4))
ax = plt.gca()
hierarchy.dendrogram(z, no_labels=True, color_threshold=c, above_threshold_color='k')
ax.axhline(c, c='gray', ls='--')
ax.axis('off')
plt.tight_layout()
plt.savefig('methyl_dendrogram_%dcuts.png' % i, dpi=200)
|
[
"[email protected]"
] | |
aa1b71d24e45defa75b428b775fb1f2ecb43ba1d
|
0c4bb0b0c91a6bfb8c6ea529b93a303661bcd625
|
/python/table_parse/table_data.py
|
c6dd3b8e0345ebfde4e1382caa04ecbcd3604d16
|
[] |
no_license
|
shahrukhqasim/TIES
|
28de4bd962624856e58f5293b5a91427132fa38d
|
279ce12ef5303f9428cf72d3f01a1922c7c5d584
|
refs/heads/master
| 2021-08-23T16:29:02.057736 | 2017-12-05T17:34:55 | 2017-12-05T17:34:55 | 106,605,384 | 3 | 2 | null | null | null | null |
UTF-8
|
Python
| false | false | 844 |
py
|
class TableData:
def __init__(self, tokens_embeddings, tokens_rects, neighbor_distance_matrix, tokens_neighbor_matrix,
tokens_share_row_matrix, tokens_share_col_matrix, tokens_share_cell_matrix, neighbors_same_row,
neighbors_same_col, neighbors_same_cell, conv_features):
self.embeddings = tokens_embeddings
self.rects = tokens_rects
self.distances = neighbor_distance_matrix
self.neighbor_graph = tokens_neighbor_matrix
self.row_share = tokens_share_row_matrix
self.col_share = tokens_share_col_matrix
self.cell_share = tokens_share_cell_matrix
self.neighbors_same_row = neighbors_same_row
self.neighbors_same_col = neighbors_same_col
self.neighbors_same_cell = neighbors_same_cell
self.conv_features = conv_features
|
[
"[email protected]"
] | |
a5ca0dba0718aa09e17d7542056fc9af17a7eb38
|
49c2492d91789b3c2def7d654a7396e8c6ce6d9f
|
/ROS/catkin_ws/build/dyros_simulator/dataspeed_can_tools/catkin_generated/pkg.installspace.context.pc.py
|
aaf615e0d8cdbcc7a35dbfeacc60e39121b30380
|
[] |
no_license
|
DavidHan008/lockdpwn
|
edd571165f9188e0ee93da7222c0155abb427927
|
5078a1b08916b84c5c3723fc61a1964d7fb9ae20
|
refs/heads/master
| 2021-01-23T14:10:53.209406 | 2017-09-02T18:02:50 | 2017-09-02T18:02:50 | 102,670,531 | 0 | 2 | null | 2017-09-07T00:11:33 | 2017-09-07T00:11:33 | null |
UTF-8
|
Python
| false | false | 453 |
py
|
# generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "".split(';') if "" != "" else []
PROJECT_CATKIN_DEPENDS = "".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "-ldataspeed_can_tools".split(';') if "-ldataspeed_can_tools" != "" else []
PROJECT_NAME = "dataspeed_can_tools"
PROJECT_SPACE_DIR = "/home/dyros-vehicle/gitrepo/lockdpwn/ROS/catkin_ws/install"
PROJECT_VERSION = "1.0.4"
|
[
"[email protected]"
] | |
d5fc2e9c95367713fde53a9b10a7e522573cc1da
|
4fe1dc7170d2d44e2c9988c71b08f66d469ee4b8
|
/Appendices/E/ejE5.py
|
77ce7a4cada9e676303b27e369f41adfd4fb3073
|
[] |
no_license
|
ftorresi/PythonLearning
|
53c0689a6f3e7e219a6314a673a318b25cda82d1
|
f2aeb5f81d9090a5a5aa69a8d1203688e9f01adf
|
refs/heads/master
| 2023-01-12T00:40:05.806774 | 2020-11-13T14:33:08 | 2020-11-13T14:33:08 | 267,460,517 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,930 |
py
|
"""Now we solve the ODE problem u - 10u' = 0 u(0)= 0.2 in [0,20] using HEUN's method"""
import numpy as np
import matplotlib.pyplot as plt
#Exact solution
def exact_u(t):
return 0.2*np.exp(0.1*t)
#u'=f(u,t) as a class
class f:
def __init__(self):
pass
def __call__(self,u,t):
return 0.1*u
#Forward Euler Method as a class
class Heun:
def __init__(self, f, U0, T, n):
if not callable(f):
raise TypeError('f is %s, not a function' % type(f))
self.f, self.U0, self.T, self.n = f, U0, T, n
self.dt = T/float(n)
self.u = np.zeros(n+1)
self.t = np.linspace(0,T,n+1)
def solve(self):
"""Compute solution for 0 <= t <= T."""
self.u[0] = float(self.U0)
for k in range(self.n):
self.k = k
self.u[k+1] = self.advance()
return self.u, self.t
def advance(self):
"""Advance the solution one time step."""
u, dt, f, k, t = self.u, self.dt, self.f, self.k, self.t
f_eval=f(u[k], t[k])
u_mid= u[k] + dt*f_eval
u_new = u[k] + 0.5*dt*(f_eval+f(u_mid, t[k+1]))
return u_new
#Parameters
T=20
U0=0.2
#Plot exact solution
tgrid=np.linspace(0,T,2001)
uexact=exact_u(tgrid)
plt.plot(tgrid, uexact, "r-", label="Exact Solution")
#Numerical calculations and plots
nlist=[4,40,400]
f_init=f()
for n in nlist:
solver=Heun(f=f_init, U0=U0, T=T, n=n)
sol, t = solver.solve()
plt.plot(t, sol, "--", label="dt=%g"%(t[1]-t[0]))
plt.legend()
plt.title("u-10u'=0, u(0)=0.2 with Heun's method")
plt.xlabel("t")
plt.ylabel("u(t)")
plt.savefig("ejE5.png")
#Save to file (only last solution)
with open("ejE5.out","w") as outfile:
outfile.write("Numerical Solution to u-10u'=0, u(0)=0.2 with Heun's method\n")
outfile.write(" t u(t)\n")
for i in range(len(t)):
outfile.write("%5.2f %7.4f\n"%(t[i], sol[i]))
|
[
"[email protected]"
] | |
aeda73e4de7393ca198519384998e625a5a63d26
|
6f33381dcb19a042d916b4a452f9cb7438729798
|
/jabba/graphs/legend.py
|
6767d1bc8f8140580a6220c89e4327bd31cd22ab
|
[
"MIT"
] |
permissive
|
puppetlabs/jabba
|
8308adf5be9ba25efb414f384bf3568854be55e2
|
71c1d008ab497020fba6ffa12a600721eb3f5ef7
|
refs/heads/master
| 2023-06-13T09:17:49.274408 | 2017-06-30T11:02:27 | 2017-06-30T11:02:27 | 185,443,592 | 0 | 1 | null | 2019-05-07T16:54:03 | 2019-05-07T16:54:02 | null |
UTF-8
|
Python
| false | false | 939 |
py
|
import graphviz as gv
class Legend(object):
"""
GraphViz graph for rendering legend
"""
def __init__(self):
# Legend is presented as map (name -> settings)
self.items = {}
def add_item(self, name, settings):
self.items[name] = settings
def render(self):
legend = gv.Digraph('cluster_legend')
legend.body.extend(['label="Legend"'])
for name, settings in self.items.items():
legend.node("{}-1".format(name), label="")
legend.node("{}-2".format(name), label="")
# format label so it doesn't overlap with edge
label = " {}".format(name)
legend.edge("{}-1".format(name), "{}-2".format(name), label=label, **settings)
legend_wrapper = gv.Digraph('cluster_legend_wrapper')
legend_wrapper.subgraph(legend)
legend_wrapper.body.extend(['style=invis'])
return legend_wrapper
|
[
"[email protected]"
] | |
90254a2e8ba7c81e196fd637cbd4598c1fdaa717
|
3e1b5d7cb529be1529ae45fa062a423f8328d6d2
|
/Edgar-new-codes/Getting-10-K- and-10-Q doc links.py
|
54aab244b45e5b116a35eee0099a5ad3de1aba53
|
[] |
no_license
|
abhigupta4/Finrsch
|
260687b3b53d3c94a03dc2b9e640952718033486
|
ca0b7f1631fbbe109b81403b9ffc36c67c759d23
|
refs/heads/master
| 2021-01-19T10:54:19.705213 | 2017-07-06T12:22:37 | 2017-07-06T12:22:37 | 87,913,197 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,806 |
py
|
import requests
from bs4 import BeautifulSoup
import urlparse
def get_file(cur,val):
base = 'https://www.sec.gov'
r = requests.get(cur)
document = BeautifulSoup(r.content,"lxml")
links = document.find_all('a')
for link in links:
if 'Archives' in link.get("href"):
print 'Document link'
print base+link.get("href")
break
def take_second_link(cur,cik,val):
begin = 'https://www.sec.gov'
r = requests.get(cur)
document = BeautifulSoup(r.content,"lxml")
links = document.find_all('a')
for link in links:
temp = link.get("href")
if 'index' in temp and 'headers' not in temp and cik in temp:
print
print 'Company link'
if val:
print "Type 10-K"
else:
print "Type 10-Q"
print begin+temp
get_file(begin + temp,val)
def find_link1(entire,val):
begin = 'https://www.sec.gov/Archives/edgar/data/'
for part in entire:
if 'data' in part:
temp = part.split('/')
last = ''
for ele in temp[-1]:
if ele.isdigit():
last += ele
new = begin + temp[-2] + '/' + last
take_second_link(new,temp[-2],val)
def inside_index(link1):
r = requests.get(main_link+link1)
document = BeautifulSoup(r.content,"lxml")
soup = document.get_text()
lines = soup.split("\n")
flag = 1
for line in lines:
temp = line.split(" ")
for i in xrange(len(temp)):
if temp[i] == '10-Q' and temp[i-1] == '' and temp[i+1] == '':
find_link1(temp,0)
break
if temp[i] == '10-K' and temp[i-1] == '' and temp[i+1] == '':
find_link1(temp,1)
break
main_link = 'https://www.sec.gov/Archives/edgar/daily-index/2017/QTR2/'
r = requests.get(main_link)
document = BeautifulSoup(r.content,"lxml")
links = document.find_all('a')
for link in links:
if 'company' in link.get("href") and '.idx' in link.get("href"):
inside_index(link.get("href"))
# break
|
[
"[email protected]"
] | |
ec47bc066bc69f6cf12e1ef76fe29f8be677394c
|
5667cc877342204b7d54b6c3cc5a9f4854f08829
|
/.history/apppersona/views_20201101174230.py
|
3ca271ad58977d9585b9c4096dc875f160abb1d5
|
[] |
no_license
|
Nyckhos/TestCommit
|
d62e3f6fefb04ab5647475cc7ead0d72cbd89efa
|
9aa8e2e35280b7862960cc8a864e9c02ac7f4796
|
refs/heads/main
| 2023-01-05T05:57:59.223641 | 2020-11-02T02:08:18 | 2020-11-02T02:08:18 | 309,237,224 | 2 | 0 | null | 2020-11-02T02:30:43 | 2020-11-02T02:30:43 | null |
UTF-8
|
Python
| false | false | 3,787 |
py
|
from django.http import request
from django.shortcuts import redirect, render
from django.http import HttpResponse
from .models import *
from .forms import *
from django.contrib.auth.models import User
from django.contrib.auth import *
from django.urls import reverse
from django.contrib.auth import login
from django.contrib.auth.decorators import *
from django.contrib.admin.views.decorators import *
from django.shortcuts import render, redirect
from django.core.mail import send_mail, BadHeaderError
from django.http import HttpResponse
from django.contrib.auth.forms import PasswordResetForm
from django.contrib.auth.models import User
from django.template.loader import render_to_string
from django.db.models.query_utils import Q
from django.utils.http import urlsafe_base64_encode
from django.contrib.auth.tokens import default_token_generator
from django.utils.encoding import force_bytes
# Create your views here.
#@login_required
#def index(request):
# return render(request,'appersona/index.html')
def lista_personas(request):
lista = User.objects.all() # Todas las personas
return render(request, 'apppersona/lista_personas.html', {'lista': lista})
def lista_tarjetas(request):
tarjetas = TarjetaJunaeb.objects.all()
return render(request, 'apppersona/lista_tarjetas.html', {'listaTarjetas': tarjetas})
def tarjetas_con_plata(request):
tarjetas = TarjetaJunaeb.objects.filter(montoDisponible__gte=1)
return render(request, 'apppersona/lista_tarjetas.html', {'listaTarjetas': tarjetas})
def index(request):
return render(request, 'apppersona/index.html')
def contacto(request):
return render(request, 'apppersona/contacto.html')
def nosotros(request):
return render(request, 'apppersona/nosotros.html')
def register(request):
if request.method == "POST":
form = ExtendedUserCreationForm(request.POST)
profile_form = FormularioPersona(request.POST)
if form.is_valid() and profile_form.is_valid():
user = form.save()
profile = profile_form.save(commit=False)
profile.user = user
profile.save()
username = form.cleaned_data.get('username')
password = form.cleaned_data.get('password1')
user = authenticate(username=username, password=password)
login(request, user)
return redirect('index')
else:
form = ExtendedUserCreationForm()
profile_form = FormularioPersona()
context = {'form': form, 'profile_form': profile_form}
return render(request, "apppersona/registro.html", context)
def password_reset_request(request):
if request.method == "POST":
password_reset_form = PasswordResetForm(request.POST)
if password_reset_form.is_valid():
data = password_reset_form.cleaned_data['email']
associated_users = User.objects.filter(Q(email=data))
if associated_users.exists():
for user in associated_users:
subject = "Password Reset Requested"
email_template_name = "main/password/password_reset_email.txt"
c = {
"email":user.email,
'domain':'127.0.0.1:8000',
'site_name': 'Website',
"uid": urlsafe_base64_encode(force_bytes(user.pk)),
"user": user,
'token': default_token_generator.make_token(user),
'protocol': 'http',
}
email = render_to_string(email_template_name, c)
try:
send_mail(subject, email, '[email protected]' , [user.email], fail_silently=False)
except BadHeaderError:
return HttpResponse('Invalid header found.')
return redirect ("/password_reset/done/")
password_reset_form = PasswordResetForm()
return render(request=request, template_name="main/password/password_reset.html", context={"password_reset_form":password_reset_form})
|
[
"[email protected]"
] | |
827e3bcf2fca26a7b7abc2fc74da531da077f856
|
f6078890ba792d5734d289d7a0b1d429d945a03a
|
/extra/oconnorcollin_24162_1340359_Collin_O'Connor_1607318_ExtraCredit_week7.py
|
768973e38dd5ce426badbd6677369e038d6aa08c
|
[] |
no_license
|
huazhige/EART119_Lab
|
1c3d0b986a0f59727ee4ce11ded1bc7a87f5b7c0
|
47931d6f6a2c7bc053cd15cef662eb2f2027712c
|
refs/heads/master
| 2020-05-04T23:40:53.709217 | 2019-06-11T18:30:45 | 2019-06-11T18:30:45 | 179,552,067 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 5,750 |
py
|
# -*- coding: utf-8 -*-
"""
Created on Mon May 13 08:26:50 2019
@author: collin O'Connor
"""
import numpy as np
import integrate_utils as int_utils
#==============================================================================
#Question 1
#==============================================================================
"""
Numerical integration of difinite integrals:
ex: f(t) = 3t**2 * exp(t^3)
F(t) = exp(t^3)
between: a, b with F'(t) =f(t)
"""
#==============================================================================
#fn defs
#==============================================================================
def fct_f(t):
return 3*t**2 * np.exp(t**3)
def fct_F(t):
return np.exp(t**3)
###########integration fn##########
def trapezoidal(fct_x, x0, xn, N):
"""
composite trapezoidal method
impelmentation of eq 3.17 pg 60 in Linge & Langtangen
params:
fct_x = compute integral of the fn.
x0, xn = integration bounds
N = number of trapezoids
return:
value of definite integral of fct_x
between x0 and xn
"""
dx = float(xn-x0)/N
#wrtite sum as for loop
f_Integ = 0.5*(fct_x(x0) + fct_x(xn))
for i in range(1, N):
f_Integ += fct_x(x0 + i*dx)
## write sum in vectorized form
#f_Integ = 0.5*(fct_x(x0) + fct_x(xn)) + (fct_x(x0 + dx*np.arange(1, N, 1))).sum()
return dx*f_Integ
def midpoint( fct_x, x0, xn, N):
"""
Composite Midpoint method, eq. 3.21 page 66 in Linge & Langtangen
:param fct_x: - function whose integral is in question
:param x1: - integration bounds
:param x2: - integration bounds
:param N: - number of trapezoids, chose high number for high accuracy
:return: - integral of fct_x between x0 and xn
"""
dx = float( xn-x0)/N
a_xi = x0 + 0.5*dx + np.arange( N)*dx
f_int = dx*( fct_x(a_xi)).sum()
return f_int
#==============================================================================
#parameters
#==============================================================================
xmin, xmax = 0, 1
N = 1000
#==============================================================================
#num integration and plotting
#==============================================================================
#exact solution
f_IntExact = fct_F(xmax) - fct_F(xmin)
#Trapazoidal method numerical approximation
f_IntNum = trapezoidal(fct_f, xmin, xmax, N)
#Midpoint method numerical approximation
f_mid=midpoint(fct_f, xmin, xmax, N)
#compare exact and Numerical
print "Question 1:"
print 'exact integral: ', f_IntExact
print 'Trapazoidal Method Numerical approx.: ', f_IntNum
print 'Midpoint Method Numerical approx.: ', f_mid
print
#==============================================================================
#Question 2
#==============================================================================
"""Compute mean value of fns
and compare to the definite integral:
f(x)=sin(x)
g(x)=2x*exp(x**2)
"""
def fx(x):
return np.sin(x)
def gx(x):
return 2*x * np.exp(x**2)
def mean_val(integral_fx, xmax, xmin):
return (1/(xmax - xmin)) * integral_fx
print "Question 2:"
print 'mean value of f(x): ', round(mean_val(trapezoidal(fx,0, np.pi, 1000), np.pi, 0), 3)
print 'integral of f(x): ', round(trapezoidal(fx,0, np.pi, 1000), 3)
print 'mean value of g(x): ', round(mean_val(trapezoidal(gx, 0, 1, 1000), 1, 0), 3)
print 'Integral of g(x): ', round(trapezoidal(gx, 0, 1, 1000), 3)
print
#==============================================================================
#Question 3
#==============================================================================
#================================================
# fct definition
#================================================
def fct2_xy( x, y):
return (x**2 + y**2)**0.5
def fct_xy( x, y):
return x*(y**2)
def fct_gxy( x, y):
"""
- rectangular domain
return: -1 for points outside
"""
f_retVal = -1
if x >= xmin and x <= xmax and y >= ymin and y <= ymax:
f_retVal = 1
return f_retVal
def fct_Fxy_exact(x, y):
return (0.5*(x**2))+ ((1./3)*(y**3))
def fct_exact(r, theta):
return theta*((r**3)/3.)
#================================================
# parameters
#================================================
xmin, xmax = 0, 2
ymin, ymax = 0, 1.5
rmin, rmax = 0, 2
theta_min, theta_max = 0, 2*np.pi
#================================================
# compute integral
#================================================
#compute definite integral
print "Question 3:"
print ('exact solution part a: ', round(fct_exact(rmax, theta_max) - fct_exact(rmin, theta_min), 3))
print 'monte Carlo solution part a: '
for n in np.arange(100, 1200, 200):
gInt = int_utils.monteCarlo(fct2_xy, fct_gxy, rmin, rmax, theta_min, theta_max, n)
#in unt_utils the MonteCarlo method results was supposed to be squared, but they never were.
gInt = gInt**2
print 'no. random points', n, 'number integral', round(gInt, 3)
print
print('exac. sol part b: ', round(fct_Fxy_exact(xmax, ymax) - fct_Fxy_exact(xmin, ymin), 3))
print 'monte Carlo solution part b: '
for n in np.arange(100, 1200, 200):
fInt=int_utils.monteCarlo(fct_xy, fct_gxy, xmin+1, xmax+1, ymin, ymax, n )
#in unt_utils the MonteCarlo method results was supposed to be squared, but they never were.
fInt = (fInt**2)
print 'no. random points', n, 'number integral', round(fInt, 3)
|
[
"[email protected]"
] | |
69fe2635469cacf0543c8bdc6588c35e1ff15509
|
aa1972e6978d5f983c48578bdf3b51e311cb4396
|
/nitro-python-1.0/nssrc/com/citrix/netscaler/nitro/resource/config/network/vrid6_interface_binding.py
|
514e8bd208b3024680ba6c2b0c5d4530d8b2a8a3
|
[
"Python-2.0",
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
MayankTahil/nitro-ide
|
3d7ddfd13ff6510d6709bdeaef37c187b9f22f38
|
50054929214a35a7bb19ed10c4905fffa37c3451
|
refs/heads/master
| 2020-12-03T02:27:03.672953 | 2017-07-05T18:09:09 | 2017-07-05T18:09:09 | 95,933,896 | 2 | 5 | null | 2017-07-05T16:51:29 | 2017-07-01T01:03:20 |
HTML
|
UTF-8
|
Python
| false | false | 6,678 |
py
|
#
# Copyright (c) 2008-2016 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_resource
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_response
from nssrc.com.citrix.netscaler.nitro.service.options import options
from nssrc.com.citrix.netscaler.nitro.exception.nitro_exception import nitro_exception
from nssrc.com.citrix.netscaler.nitro.util.nitro_util import nitro_util
class vrid6_interface_binding(base_resource) :
""" Binding class showing the interface that can be bound to vrid6.
"""
def __init__(self) :
self._ifnum = None
self._vlan = None
self._flags = None
self._id = None
self.___count = 0
@property
def id(self) :
r"""Integer value that uniquely identifies a VMAC6 address.<br/>Minimum value = 1<br/>Maximum value = 255.
"""
try :
return self._id
except Exception as e:
raise e
@id.setter
def id(self, id) :
r"""Integer value that uniquely identifies a VMAC6 address.<br/>Minimum value = 1<br/>Maximum value = 255
"""
try :
self._id = id
except Exception as e:
raise e
@property
def ifnum(self) :
r"""Interfaces to bind to the VMAC6, specified in (slot/port) notation (for example, 1/2).Use spaces to separate multiple entries.
"""
try :
return self._ifnum
except Exception as e:
raise e
@ifnum.setter
def ifnum(self, ifnum) :
r"""Interfaces to bind to the VMAC6, specified in (slot/port) notation (for example, 1/2).Use spaces to separate multiple entries.
"""
try :
self._ifnum = ifnum
except Exception as e:
raise e
@property
def flags(self) :
r"""Flags.
"""
try :
return self._flags
except Exception as e:
raise e
@property
def vlan(self) :
r"""The VLAN in which this VRID resides.
"""
try :
return self._vlan
except Exception as e:
raise e
def _get_nitro_response(self, service, response) :
r""" converts nitro response into object and returns the object array in case of get request.
"""
try :
result = service.payload_formatter.string_to_resource(vrid6_interface_binding_response, response, self.__class__.__name__)
if(result.errorcode != 0) :
if (result.errorcode == 444) :
service.clear_session(self)
if result.severity :
if (result.severity == "ERROR") :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
else :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
return result.vrid6_interface_binding
except Exception as e :
raise e
def _get_object_name(self) :
r""" Returns the value of object identifier argument
"""
try :
if self.id is not None :
return str(self.id)
return None
except Exception as e :
raise e
@classmethod
def add(cls, client, resource) :
try :
if resource and type(resource) is not list :
updateresource = vrid6_interface_binding()
updateresource.id = resource.id
updateresource.ifnum = resource.ifnum
return updateresource.update_resource(client)
else :
if resource and len(resource) > 0 :
updateresources = [vrid6_interface_binding() for _ in range(len(resource))]
for i in range(len(resource)) :
updateresources[i].id = resource[i].id
updateresources[i].ifnum = resource[i].ifnum
return cls.update_bulk_request(client, updateresources)
except Exception as e :
raise e
@classmethod
def delete(cls, client, resource) :
try :
if resource and type(resource) is not list :
deleteresource = vrid6_interface_binding()
deleteresource.id = resource.id
deleteresource.ifnum = resource.ifnum
return deleteresource.delete_resource(client)
else :
if resource and len(resource) > 0 :
deleteresources = [vrid6_interface_binding() for _ in range(len(resource))]
for i in range(len(resource)) :
deleteresources[i].id = resource[i].id
deleteresources[i].ifnum = resource[i].ifnum
return cls.delete_bulk_request(client, deleteresources)
except Exception as e :
raise e
@classmethod
def get(cls, service, id="", option_="") :
r""" Use this API to fetch vrid6_interface_binding resources.
"""
try :
if not id :
obj = vrid6_interface_binding()
response = obj.get_resources(service, option_)
else :
obj = vrid6_interface_binding()
obj.id = id
response = obj.get_resources(service)
return response
except Exception as e:
raise e
@classmethod
def get_filtered(cls, service, id, filter_) :
r""" Use this API to fetch filtered set of vrid6_interface_binding resources.
Filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = vrid6_interface_binding()
obj.id = id
option_ = options()
option_.filter = filter_
response = obj.getfiltered(service, option_)
return response
except Exception as e:
raise e
@classmethod
def count(cls, service, id) :
r""" Use this API to count vrid6_interface_binding resources configued on NetScaler.
"""
try :
obj = vrid6_interface_binding()
obj.id = id
option_ = options()
option_.count = True
response = obj.get_resources(service, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e:
raise e
@classmethod
def count_filtered(cls, service, id, filter_) :
r""" Use this API to count the filtered set of vrid6_interface_binding resources.
Filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = vrid6_interface_binding()
obj.id = id
option_ = options()
option_.count = True
option_.filter = filter_
response = obj.getfiltered(service, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e:
raise e
class vrid6_interface_binding_response(base_response) :
def __init__(self, length=1) :
self.vrid6_interface_binding = []
self.errorcode = 0
self.message = ""
self.severity = ""
self.sessionid = ""
self.vrid6_interface_binding = [vrid6_interface_binding() for _ in range(length)]
|
[
"[email protected]"
] | |
4e6f7728e1ccc0ee08f9eab26f26539c32f245f1
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p02779/s326665097.py
|
af3b88d1cfb1a25941fa607ee97b983b715cf65f
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 221 |
py
|
from collections import Counter
n = int(input())
a = list(map(int, input().split()))
c = Counter(a)
new = c.values()
for i in new:
if i != 1:
re = "NO"
break
else:
re = "YES"
print(re)
|
[
"[email protected]"
] | |
2bc1c55aa465d41767f5a4a17e88f2902fa650a2
|
115b5356242176b8873ae7e43cd313e41cbd0ee6
|
/webstuff/webscraper/tidytext.py
|
057e7c3a5a8d77e0574b55b38fb0fe5b7a3b444a
|
[] |
no_license
|
squeakus/bitsandbytes
|
b71ec737431bc46b7d93969a7b84bc4514fd365b
|
218687d84db42c13bfd9296c476e54cf3d0b43d2
|
refs/heads/master
| 2023-08-26T19:37:15.190367 | 2023-07-18T21:41:58 | 2023-07-18T21:42:14 | 80,018,346 | 2 | 4 | null | 2022-06-22T04:08:35 | 2017-01-25T13:46:28 |
C
|
UTF-8
|
Python
| false | false | 976 |
py
|
from BeautifulSoup import BeautifulSoup
import re
page = open('out2.txt','r')
for idx,line in enumerate(page):
parts = line.split(';')
for part in parts:
#print part, '\n'
if part.startswith('var point = new GLatLng'):
print "\n", part.lstrip('var point = new GLatLng')
m = re.search('table(.+?)table', line)
if m:
found = m.group(1)
found = '<table' + found +'table>'
found = found.replace('\\','')
soup = BeautifulSoup(found)
info = soup.findAll('tr',{'class':'wind_row'})
name = soup.findAll('a')
print name[0].text
for data in info:
direction = str(data.find('img'))
direction = direction.rstrip('.png" />')
direction = direction.lstrip('<img src="images/wind/')
print direction
n = re.search('Wind:(.+?)km', str(data))
if n:
speed = n.group(1)
print speed
|
[
"[email protected]"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.