blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
288
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 684
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 25
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 128
12.7k
| extension
stringclasses 142
values | content
stringlengths 128
8.19k
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
6ee5824a7f0a33926ee6524a24a32398e7a7a209
|
e82a5480b960abc154025168a27742149ae74de3
|
/Leetcode/Dynamic Programming/Medium/1043_partition_array_for_maximum_sum.py
|
ea7a058f3689100fc3140e54fb2a74d56b88cb62
|
[] |
no_license
|
harshsodi/DSA
|
8e700f0284f5f3c5559a7e385b82e0a6c96d3363
|
18f82f9b17a287abe3f318118691b62607e61ff9
|
refs/heads/master
| 2021-07-07T23:42:50.750471 | 2020-09-11T03:16:41 | 2020-09-11T03:16:41 | 186,679,663 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 918 |
py
|
# Runtime: 448 ms, faster than 41.60% of Python online submissions for Partition Array for Maximum Sum.
# Memory Usage: 11.8 MB, less than 100.00% of Python online submissions for Partition Array for Maximum Sum.
class Solution(object):
def maxSumAfterPartitioning(self, A, K):
"""
:type A: List[int]
:type K: int
:rtype: int
"""
n = len(A)
dp = [0 for _ in A]
dp[0] = A[0]
for i in range(1, n):
cmax = A[i]
for j in range(0, K):
c = i - j
if c < 0:
break
if c == 0:
prev = 0
else:
prev = dp[c-1]
cmax = max(cmax, A[c])
dp[i] = max(dp[i], cmax * (j+1) + prev)
return dp[n-1]
|
[
"[email protected]"
] | |
bcbe4e83dec0fe91a1870110287f8df495d3f9c4
|
737c0920b33fddb3fc7b6ff7287f06faaf9958bb
|
/models/temp/common_spec_2.py
|
e47cda732a65a69386b22619f5cf0ec7033e294e
|
[] |
no_license
|
Willamjie/CCWH-ACB
|
aa51b412adccf0078bc2f575dd47e22cd2daa689
|
e15176c9d74c1b9232d72d79114f0bf6aa0d315e
|
refs/heads/main
| 2023-02-25T14:30:57.389888 | 2021-02-02T14:08:06 | 2021-02-02T14:08:06 | 335,209,023 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 4,308 |
py
|
# This file contains modules common to various models
from utils.utils import *
from models.DConv import DOConv2d
def autopad(k, p=None): # kernel, padding
# Pad to 'same'
if p is None:
p = k // 2 if isinstance(k, int) else [x // 2 for x in k] # auto-pad
return p
def DWConv(c1, c2, k=1, s=1, act=True):
# Depthwise convolution
return Conv(c1, c2, k, s, g=math.gcd(c1, c2), act=act)
class Conv(nn.Module):
# Standard convolution
def __init__(self, c1, c2, k=1, s=1, p=None, g=1, act=True): # ch_in, ch_out, kernel, stride, padding, groups
super(Conv, self).__init__()
self.conv = DOConv2d(c1, c2, k, s, autopad(k, p), groups=g, bias=False)
self.bn = nn.BatchNorm2d(c2)
self.act = nn.LeakyReLU(0.1, inplace=True) if act else nn.Identity()
def forward(self, x):
return self.act(self.bn(self.conv(x)))
def fuseforward(self, x):
return self.act(self.conv(x))
class Bottleneck(nn.Module):
# Standard bottleneck
def __init__(self, c1, c2, shortcut=True, g=1, e=0.5): # ch_in, ch_out, shortcut, groups, expansion
super(Bottleneck, self).__init__()
c_ = int(c2 * e) # hidden channels
self.cv1 = Conv(c1, c_, 1, 1)
self.cv2 = Conv(c_, c2, 3, 1, g=g)
self.add = shortcut and c1 == c2
def forward(self, x):
return x + self.cv2(self.cv1(x)) if self.add else self.cv2(self.cv1(x))
class BottleneckCSP(nn.Module):
# CSP Bottleneck https://github.com/WongKinYiu/CrossStagePartialNetworks
def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5): # ch_in, ch_out, number, shortcut, groups, expansion
super(BottleneckCSP, self).__init__()
c_ = int(c2 * e) # hidden channels
self.cv1 = Conv(c1, c_, 1, 1)
self.cv2 = nn.Conv2d(c1, c_, 1, 1, bias=False)
self.cv3 = nn.Conv2d(c_, c_, 1, 1, bias=False)
self.cv4 = Conv(2 * c_, c2, 1, 1)
self.bn = nn.BatchNorm2d(2 * c_) # applied to cat(cv2, cv3)
self.act = nn.LeakyReLU(0.1, inplace=True)
self.m = nn.Sequential(*[Bottleneck(c_, c_, shortcut, g, e=1.0) for _ in range(n)])
def forward(self, x):
y1 = self.cv3(self.m(self.cv1(x)))
y2 = self.cv2(x)
return self.cv4(self.act(self.bn(torch.cat((y1, y2), dim=1))))
class SPP(nn.Module):
# Spatial pyramid pooling layer used in YOLOv3-SPP
def __init__(self, c1, c2, k=(5, 9, 13)):
super(SPP, self).__init__()
c_ = c1 // 2 # hidden channels
self.cv1 = Conv(c1, c_, 1, 1)
self.cv2 = Conv(c_ * (len(k) + 1), c2, 1, 1)
self.m = nn.ModuleList([nn.MaxPool2d(kernel_size=x, stride=1, padding=x // 2) for x in k])
def forward(self, x):
x = self.cv1(x)
return self.cv2(torch.cat([x] + [m(x) for m in self.m], 1))
class Focus(nn.Module):
# Focus wh information into c-space
def __init__(self, c1, c2, k=1, s=1, p=None, g=1, act=True): # ch_in, ch_out, kernel, stride, padding, groups
super(Focus, self).__init__()
self.conv = Conv(c1 * 4, c2, k, s, p, g, act)
def forward(self, x): # x(b,c,w,h) -> y(b,4c,w/2,h/2)
return self.conv(torch.cat([x[..., ::2, ::2], x[..., 1::2, ::2], x[..., ::2, 1::2], x[..., 1::2, 1::2]], 1))
class Concat(nn.Module):
# Concatenate a list of tensors along dimension
def __init__(self, dimension=1):
super(Concat, self).__init__()
self.d = dimension
def forward(self, x):
return torch.cat(x, self.d)
class Flatten(nn.Module):
# Use after nn.AdaptiveAvgPool2d(1) to remove last 2 dimensions
@staticmethod
def forward(x):
return x.view(x.size(0), -1)
class Classify(nn.Module):
# Classification head, i.e. x(b,c1,20,20) to x(b,c2)
def __init__(self, c1, c2, k=1, s=1, p=None, g=1): # ch_in, ch_out, kernel, stride, padding, groups
super(Classify, self).__init__()
self.aap = nn.AdaptiveAvgPool2d(1) # to x(b,c1,1,1)
self.conv = nn.Conv2d(c1, c2, k, s, autopad(k, p), groups=g, bias=False) # to x(b,c2,1,1)
self.flat = Flatten()
def forward(self, x):
z = torch.cat([self.aap(y) for y in (x if isinstance(x, list) else [x])], 1) # cat if list
return self.flat(self.conv(z)) # flatten to x(b,c2)
|
[
"[email protected]"
] | |
2efc75247312dda6a4b3a75b13341709c8291fe0
|
09e57dd1374713f06b70d7b37a580130d9bbab0d
|
/data/p4VQE/R4/benchmark/startPyquil304.py
|
bbb13e55f149de899519a4a930fcac157e7752b1
|
[
"BSD-3-Clause"
] |
permissive
|
UCLA-SEAL/QDiff
|
ad53650034897abb5941e74539e3aee8edb600ab
|
d968cbc47fe926b7f88b4adf10490f1edd6f8819
|
refs/heads/main
| 2023-08-05T04:52:24.961998 | 2021-09-19T02:56:16 | 2021-09-19T02:56:16 | 405,159,939 | 2 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,220 |
py
|
# qubit number=4
# total number=13
import pyquil
from pyquil.api import local_forest_runtime, QVMConnection
from pyquil import Program, get_qc
from pyquil.gates import *
import numpy as np
conn = QVMConnection()
def make_circuit()-> Program:
prog = Program() # circuit begin
prog += H(0) # number=1
prog += H(1) # number=2
prog += H(2) # number=3
prog += CNOT(0,2) # number=7
prog += X(2) # number=8
prog += CNOT(0,2) # number=9
prog += CNOT(3,1) # number=10
prog += H(3) # number=4
prog += Y(3) # number=5
prog += X(0) # number=11
prog += X(0) # number=12
# circuit end
return prog
def summrise_results(bitstrings) -> dict:
d = {}
for l in bitstrings:
if d.get(l) is None:
d[l] = 1
else:
d[l] = d[l] + 1
return d
if __name__ == '__main__':
prog = make_circuit()
qvm = get_qc('4q-qvm')
results = qvm.run_and_measure(prog,1024)
bitstrings = np.vstack([results[i] for i in qvm.qubits()]).T
bitstrings = [''.join(map(str, l)) for l in bitstrings]
writefile = open("../data/startPyquil304.csv","w")
print(summrise_results(bitstrings),file=writefile)
writefile.close()
|
[
"[email protected]"
] | |
95e1ea6aacd9e0b2bfe38b9c9de93cfd60182a95
|
51108a50ffb48ad154f587c230045bb783f22240
|
/bfgame/factories/recipes/base.py
|
f4d2fbb68a4894ed9e44bdc747ab6a6932072734
|
[
"MIT"
] |
permissive
|
ChrisLR/BasicDungeonRL
|
c90bd0866c457557cccbad24e14689d5d6db7b00
|
b293d40bd9a0d3b7aec41b5e1d58441165997ff1
|
refs/heads/master
| 2021-06-15T13:56:53.888646 | 2019-08-05T16:33:57 | 2019-08-05T16:33:57 | 104,269,987 | 3 | 0 |
MIT
| 2019-08-05T16:28:23 | 2017-09-20T21:35:19 |
Python
|
UTF-8
|
Python
| false | false | 161 |
py
|
class Recipe(object):
name = ""
base_object_type = None
depends_on = []
@staticmethod
def build_components(object_type, game):
pass
|
[
"[email protected]"
] | |
94912c9ed339cdf676610f0ca9397675dcf1e0ec
|
f9a8ee37334771f37edda863db08a7dcccc9522f
|
/AtCoder/Contest/ABC/ZONeエナジー プログラミングコンテスト/abc200E.py
|
dc144abd33d75de438010a0aa9fffe4eff052492
|
[] |
no_license
|
shimmee/competitive-programming
|
25b008ee225858b7b208c3f3ca7681e33f6c0190
|
894f0b7d557d6997789af3fcf91fe65a33619080
|
refs/heads/master
| 2023-06-07T13:07:17.850769 | 2021-07-05T17:20:47 | 2021-07-05T17:20:47 | 331,076,780 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 529 |
py
|
# ZONeエナジー プログラミングコンテスト E
# URL:
# Date:
# ---------- Ideas ----------
#
# ------------------- Solution --------------------
#
# ------------------- Answer --------------------
#code:python
# ------------------ Sample Input -------------------
# ----------------- Length of time ------------------
#
# -------------- Editorial / my impression -------------
#
# ----------------- Category ------------------
#AtCoder
#AC_with_editorial #解説AC
#wanna_review #hard復習 #復習したい
|
[
"[email protected]"
] | |
c723f5fdff701d3e8e5da3916b313407906b7a1e
|
377e3a552fb807febc18ce036af77edbce93ca19
|
/searching algo/exponential_search.py
|
776eff5ca5b6589ddfd1b7d6f43e0a9e8c67c45e
|
[] |
no_license
|
souravs17031999/100dayscodingchallenge
|
940eb9b6d6037be4fc0dd5605f9f808614085bd9
|
d05966f3e6875a5ec5a8870b9d2627be570d18d9
|
refs/heads/master
| 2022-10-29T11:05:46.762554 | 2022-09-28T13:04:32 | 2022-09-28T13:04:32 | 215,993,823 | 44 | 12 | null | 2022-08-18T14:58:50 | 2019-10-18T09:55:03 |
Python
|
UTF-8
|
Python
| false | false | 1,882 |
py
|
# Program to search the element using exponential search algorithm
# IDEA: logic of the algorithm is to use the fact that if we are able to find the bounds of the answer where it may lie
# and then using binary search algorithm because that range is already ordered, and we just need to check our answer (if it actually exists)
# TIME : 0(lg(i)) where i is the index of the element to be existence (if it is in the list), assuming the list is already sorted (in comparison to binary
# search , it is much faster especially in case if the key is near to the first element)
def binary_search(l, start, end, key):
while(start <= end):
middle = (start + end) // 2
if key < l[middle]:
end = middle - 1
elif key > l[middle]:
start = middle + 1
else:
return middle
return -1
# function to implement exponential search
def exponential_search(arr, key):
# base case
if arr[0] == key:
return 0
# starting with 1th index
i = 1
n = len(arr)
# trying to search for first exponent j, for which 2^j is greater than key element
# that is to find if the current element is smaller than key, and since it is sorted, then we need to increase the range by doubling it
# also to avoid going out of bounds, we should ensure the invariant : i < n - 1
while i < n - 1 and arr[i] <= key:
i *= 2
print(i)
# lower bound will be i/2 , since we already have doubled then we have found greater number,
# and higher bound will be whatever last greater number index we have found
return binary_search(arr, i//2, i, key)
# driver function
if __name__ == '__main__':
arr = [2, 3, 4, 10, 20]
key = 10
index = exponential_search(arr, key)
if index == -1:
print("element not found !")
else:
print(f"element found at : {index}")
|
[
"[email protected]"
] | |
ff4e0d6e7c0b10941ced2e6a74ccfc027bb1206b
|
b50f8de2f35858f866b8f7d54da2994e5b59a391
|
/src/dataload/sources/entrez/entrez_genomic_pos.py
|
839f8410cfc258609ad333cca65c85f34f67dca0
|
[
"Apache-2.0"
] |
permissive
|
SuLab/mygene.info
|
455127c4e0bcae61eb36d76496dfc4139be0f584
|
506d7b1d2a7e4de55bdebba8671dc8a09fc303b2
|
refs/heads/master
| 2020-06-03T11:27:34.021692 | 2017-06-12T20:58:45 | 2017-06-12T20:58:45 | 54,933,630 | 20 | 2 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,148 |
py
|
'''
Populates MICROBE gene entries with genomic position data
Currently updates the 120 microbial taxids that are NCBI Reference Sequences
run get_ref_microbe_taxids function to get an updated file for TAXIDS_FILE
when it's necessary.
'''
import os.path
from biothings.utils.common import (dump, loadobj, get_timestamp)
from utils.dataload import (tab2list, load_start, load_done)
from dataload import get_data_folder
DATA_FOLDER = get_data_folder('entrez')
print('DATA_FOLDER: ' + DATA_FOLDER)
__metadata__ = {
'__collection__': 'entrez_genomic_pos',
}
TAXIDS_FILE = os.path.join(DATA_FOLDER, "../ref_microbe_taxids.pyobj")
DATAFILE = os.path.join(DATA_FOLDER, 'gene/gene2refseq.gz')
def load_genedoc(self):
"""
Loads gene data from NCBI's refseq2gene.gz file.
Parses it based on genomic position data and refseq status provided by the
list of taxids from get_ref_microbe_taxids() as lookup table
:return:
"""
taxids = loadobj(TAXIDS_FILE)
taxid_set = set(taxids)
load_start(DATAFILE)
def _includefn(ld):
return ld[0] in taxid_set # match taxid from taxid_set
cols_included = [0, 1, 7, 9, 10, 11] # 0-based col idx
gene2genomic_pos_li = tab2list(DATAFILE, cols_included, header=1,
includefn=_includefn)
count = 0
last_id = None
for gene in gene2genomic_pos_li:
count += 1
strand = 1 if gene[5] == '+' else -1
_id = gene[1]
mgi_dict = {
'_id': _id,
'genomic_pos': {
'start': int(gene[3]),
'end': int(gene[4]),
'chr': gene[2],
'strand': strand
}
}
if _id != last_id:
# rows with dup _id will be skipped
yield mgi_dict
last_id = _id
load_done('[%d]' % count)
def get_mapping(self):
mapping = {
"genomic_pos": {
"dynamic": False,
"type": "nested",
"properties": {
"chr": {"type": "string"},
"start": {"type": "long"},
"end": {"type": "long"},
"strand": {
"type": "byte",
"index": "no"
},
},
},
}
return mapping
def get_ref_microbe_taxids():
"""
Downloads the latest bacterial genome assembly summary from the NCBI genome
ftp site and generate a list of taxids of the bacterial reference genomes.
:return:
"""
import urllib.request
import csv
urlbase = 'ftp://ftp.ncbi.nlm.nih.gov'
urlextension = '/genomes/refseq/bacteria/assembly_summary.txt'
assembly = urllib.request.urlopen(urlbase + urlextension)
datareader = csv.reader(assembly.read().decode().splitlines(), delimiter="\t")
taxid = []
for row in datareader:
if len(row) == 1 and row[0].startswith("#"):
continue
if row[4] in ['reference genome','representative genome']:
taxid.append(row[5])
ts = get_timestamp()
dump(taxid, "ref_microbe_taxids_{}.pyobj".format(ts))
return taxid
|
[
"[email protected]"
] | |
0ae246e21eb23160ee3be8dc5060109d11903209
|
26f862c5f17fd97beb38be35b4b5937673929c9b
|
/swagger_client/models/system_object.py
|
f2f7c5ffd1642cfd9026a3adcb69acada10676a8
|
[] |
no_license
|
m-wendt/swagger-client
|
bf146841fa4e7eb6add01c09822eb01d89defa5e
|
2db96983a900dbb1f5d32c5e66d190e5c0d9b3dc
|
refs/heads/master
| 2020-11-25T22:06:23.487954 | 2019-12-18T15:56:21 | 2019-12-18T15:56:21 | 228,865,549 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,441 |
py
|
# coding: utf-8
"""
Save.TV API
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: v3
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class SystemObject(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
}
attribute_map = {
}
def __init__(self): # noqa: E501
"""SystemObject - a model defined in Swagger""" # noqa: E501
self.discriminator = None
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(SystemObject, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, SystemObject):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
[
"[email protected]"
] | |
cd1b616721d53514d80440788a48f49edc7432fc
|
6189f34eff2831e3e727cd7c5e43bc5b591adffc
|
/WebMirror/management/rss_parser_funcs/feed_parse_extractMayomtlHomeBlog.py
|
b24bf15613f06fa3f5fec756e2f050bb98d368d9
|
[
"BSD-3-Clause"
] |
permissive
|
fake-name/ReadableWebProxy
|
24603660b204a9e7965cfdd4a942ff62d7711e27
|
ca2e086818433abc08c014dd06bfd22d4985ea2a
|
refs/heads/master
| 2023-09-04T03:54:50.043051 | 2023-08-26T16:08:46 | 2023-08-26T16:08:46 | 39,611,770 | 207 | 20 |
BSD-3-Clause
| 2023-09-11T15:48:15 | 2015-07-24T04:30:43 |
Python
|
UTF-8
|
Python
| false | false | 938 |
py
|
def extractMayomtlHomeBlog(item):
'''
Parser for 'mayomtl.home.blog'
'''
vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title'])
if not (chp or vol) or "preview" in item['title'].lower():
return None
tagmap = [
('My Lover Was Stolen, And I Was Kicked Out Of The Hero’s Party, But I Awakened To The EX Skill “Fixed Damage” And Became Invincible. Now, Let’s Begin Some Revenge',
'My Lover Was Stolen, And I Was Kicked Out Of The Hero’s Party, But I Awakened To The EX Skill “Fixed Damage” And Became Invincible. Now, Let’s Begin Some Revenge', 'translated'),
('PRC', 'PRC', 'translated'),
('Loiterous', 'Loiterous', 'oel'),
]
for tagname, name, tl_type in tagmap:
if tagname in item['tags']:
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
return False
|
[
"[email protected]"
] | |
ebf9e16d0dc1998f35d44ba5017f92cdab150035
|
d77af24d09dc00a4b7d3e0bdc14b7d2727c96495
|
/RouToolPa/Tools/AsseblyQC/PurgeDups.py
|
68fc6d159a6e577e289bfe832a2558a7f6313423
|
[] |
no_license
|
mahajrod/RouToolPa
|
14ee0f7fce78c53e8639e770caa6ffb0dfd82fce
|
9b0cd0f0817a23cd3f37b3a55f83ce2d8abc71d8
|
refs/heads/master
| 2023-08-19T19:15:49.876175 | 2023-08-12T12:27:39 | 2023-08-12T12:27:39 | 181,844,151 | 0 | 3 | null | null | null | null |
UTF-8
|
Python
| false | false | 5,532 |
py
|
#!/usr/bin/env python
from pathlib import Path
import pandas as pd
import numpy as np
from RouToolPa.Routines import MathRoutines
from RouToolPa.Tools.Abstract import Tool
class PurgeDups(Tool):
def __init__(self, path="", max_threads=4):
Tool.__init__(self, "augustus", path=path, max_threads=max_threads)
def convert_coverage_file_to_bed(self, input_file, output_prefix):
length_dict = {}
coverage_dict = {}
mean_coverage_dict = {}
median_coverage_dict = {}
with self.metaopen(input_file, "r", buffering=100000000) as in_fd, \
self.metaopen(output_prefix + ".bed", "w", buffering=100000000) as out_fd:
scaffold, length = in_fd.readline()[1:].split()
length_dict[scaffold] = int(length)
coverage_dict[scaffold] = {}
for line in in_fd:
if line[0] == ">":
scaffold, length = line[1:].split()
length_dict[scaffold] = int(length)
coverage_dict[scaffold] = {}
continue
#print(line)
value_list = list(map(int, line.strip().split()))
value_list[0] -= 1 # convert to zero-based and half open coordinates
out_fd.write("{0}\t{1}\n".format(scaffold, "\t".join(map(str, value_list))))
#print(value_list)
if value_list[-1] not in coverage_dict[scaffold]:
coverage_dict[scaffold][value_list[-1]] = value_list[1] - value_list[0]
else:
coverage_dict[scaffold][value_list[-1]] += value_list[1] - value_list[0]
for scaffold in coverage_dict:
median_coverage_dict[scaffold] = MathRoutines.median_from_dict(coverage_dict[scaffold])
mean_coverage_dict[scaffold] = MathRoutines.mean_from_dict(coverage_dict[scaffold])
stat_df = pd.DataFrame.from_dict(length_dict, columns=["length", ], orient='index').sort_values(by=["length"], ascending=False)
stat_df.index.name = "scaffold"
stat_df["mean_cov"] = pd.Series(mean_coverage_dict)
stat_df["median_cov"] = pd.Series(median_coverage_dict)
stat_df.to_csv(output_prefix + ".stat", sep="\t", header=False, index=True)
stat_df[["length"]].to_csv(output_prefix + ".len", sep="\t", header=False, index=True)
return stat_df
def add_lengths_to_dups_bed(self, input_file, length_file, output_file):
if isinstance(length_file, (str, Path)):
length_df = pd.read_csv(length_file, sep="\t", header=None, index_col=0, names=["scaffold", "length"])
else:
length_df = length_file
dups_bed_df = pd.read_csv(input_file, sep="\t", header=None, index_col=0, names=["scaffold", "start", "end", "type", "overlapping_scaffold"])
dups_bed_df["overlap_len"] = dups_bed_df["end"] - dups_bed_df["start"]
dups_bed_df["scaffold_len"] = length_df["length"]
dups_bed_df["overlapping_scaffold_len"] = dups_bed_df["overlapping_scaffold"].apply(lambda s: length_df.loc[s, "length"])
dups_bed_df["overlap_faction"] = dups_bed_df["overlap_len"] / dups_bed_df["scaffold_len"]
dups_bed_df["overlap_faction_overlapping_scaffold"] = dups_bed_df["overlap_len"] / dups_bed_df["overlapping_scaffold_len"]
def count_fraction(df):
scaffold_len = df["scaffold_len"].iloc[0]
sorted_df = df[["start", "end"]].sort_values(by=["start", "end"])
fraction_df = [list(sorted_df.iloc[0])]
for row in sorted_df.itertuples(index=False):
if row[0] <= fraction_df[-1][1]:
if row[1] > fraction_df[-1][1]:
fraction_df[-1][1] = row[1]
else:
fraction_df.append(list(row))
fraction_df = pd.DataFrame(fraction_df, columns=["start", "end"])
fraction_df["fraction"] = (fraction_df["end"] - fraction_df["start"]) / scaffold_len
return sum(fraction_df["fraction"])
haplo_fraction_df = dups_bed_df[["start", "end", "scaffold_len"]].groupby(by='scaffold').apply(count_fraction)
dups_bed_df["cumulative_overlap_fraction"] = haplo_fraction_df
with open(output_file, "w") as out_fd:
out_fd.write("#{0}\n".format("\t".join(["scaffold", "start", "end", "type", "overlapping_scaffold",
"overlap_len", "scaffold_len", "overlapping_scaffold_len",
"overlap_faction", "overlap_faction_overlapping_scaffold",
"cumulative_overlap_fraction"])))
dups_bed_df.to_csv(out_fd, sep="\t", header=False, index=True, na_rep=".")
#print(haplo_fraction_df)
return dups_bed_df
"""
def count_contig_fraction_in_haplotype(self, input_file_with_len, output_file):
if isinstance(input_file_with_len, (str, Path)):
haplo_df = pd.read_csv(input_file_with_len, sep="\t", header=None, index_col=0,
names=["scaffold", "start", "end", "type", "overlapping_scaffold",
"overlap_len", "scaffold_len", "overlapping_scaffold_len",
"overlap_faction,%", "overlap_faction_overlapping_scaffold,%"])
else:
haplo_df = input_file_with_len
print(haplo_df)
"""
|
[
"[email protected]"
] | |
a31e0c49cbba5e45a39a6fef1c35198454eac3b3
|
44a7473404d37a3f5c73cbcdf88be55564e580bb
|
/121_word-ladder-ii/word-ladder-ii.py
|
07104867d5e6cc1bf312c512b69bfb50106d1741
|
[] |
no_license
|
frankobe/lintcode
|
dbc10befc8055c55b2ca9716aa3dfa238b58aaa9
|
db131f968944b8140f07a8e5765fea55c72da6ba
|
refs/heads/master
| 2021-06-07T18:32:12.338879 | 2016-11-22T09:05:37 | 2016-11-22T09:05:37 | 20,018,761 | 3 | 2 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,264 |
py
|
# coding:utf-8
'''
@Copyright:LintCode
@Author: frankobe
@Problem: http://www.lintcode.com/problem/word-ladder-ii
@Language: Python
@Datetime: 15-09-10 00:37
'''
from string import ascii_lowercase
from collections import defaultdict
class Solution:
# @param start, a string
# @param end, a string
# @param dict, a set of string
# @return a list of lists of string
def findLadders(self, start, end, dict):
dict.add(end)
level = {start}
size = len(start)
parents = defaultdict(set)
while level and end not in parents:
next_level = defaultdict(set)
for node in level:
for char in ascii_lowercase:
for i in range(size):
n = node[:i]+char+node[i+1:]
if n in dict and n not in parents:
next_level[n].add(node)
level = next_level
parents.update(next_level)
res = [[end]]
while res and res[0][0] != start:
res = [[p]+r for r in res for p in parents[r[0]]]
return res
|
[
"[email protected]"
] | |
3a867c97d04bc12c43529626104a44e5cde357d0
|
5982a9c9c9cb682ec9732f9eeb438b62c61f2e99
|
/Problem_131/my_solution.py
|
0957503ab542faeb851bc44ae52794dc24263800
|
[] |
no_license
|
chenshanghao/LeetCode_learning
|
6fdf98473be8f2240dd86d5586bbd1bbb95d6b0c
|
acf2395f3b946054009d4543f2a13e83402323d3
|
refs/heads/master
| 2021-10-23T05:23:01.970535 | 2019-03-15T05:08:54 | 2019-03-15T05:08:54 | 114,688,902 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 787 |
py
|
class Solution(object):
def partition(self, s):
"""
:type s: str
:rtype: List[List[str]]
"""
self.result = []
if len(s) == 0: return self.result
self.backtrack([],0,s)
return self.result
def IsPalindrome(self, string):
i, j = 0, len(string)-1
while(i<=j):
if string[i]!= string[j]:
return False
i+=1
j-=1
return True
def backtrack(self, temp, start, s):
if start >= len(s):
self.result.append(temp[:])
for i in range(start,len(s)):
if self.IsPalindrome(s[start:i+1]):
temp.append(s[start:i+1])
self.backtrack(temp, i+1, s)
temp.pop()
|
[
"[email protected]"
] | |
e97d900e1e7624fde472f0927a9acdd56581b60c
|
de24f83a5e3768a2638ebcf13cbe717e75740168
|
/moodledata/vpl_data/59/usersdata/216/26475/submittedfiles/testes.py
|
54f602251a4e6970b2390dc096dbe706d12bd81d
|
[] |
no_license
|
rafaelperazzo/programacao-web
|
95643423a35c44613b0f64bed05bd34780fe2436
|
170dd5440afb9ee68a973f3de13a99aa4c735d79
|
refs/heads/master
| 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 154 |
py
|
# -*- coding: utf-8 -*-
from __future__ import division
#COMECE AQUI ABAIXO
a= float(input('digite a'))
b= float(input('digite b'))
c=(a+b)/2
print (c)
|
[
"[email protected]"
] | |
dc763b74c1dc61594084c2e1bd2444d4edaf96d4
|
9c0f691393abbeb5754e1624e0c48dfcdf857352
|
/2017/Helpers/day_02.py
|
ce9e1ba0627d0318c61d59f26c208b83c5be9430
|
[] |
no_license
|
seligman/aoc
|
d0aac62eda3e6adc3c96229ca859bd2274398187
|
9de27ff2e13100770a3afa4595b15565d45bb6bc
|
refs/heads/master
| 2023-04-02T16:45:19.032567 | 2023-03-22T15:05:33 | 2023-03-22T15:05:33 | 230,493,583 | 17 | 10 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,393 |
py
|
#!/usr/bin/env python3
import itertools
DAY_NUM = 2
DAY_DESC = 'Day 2: Corruption Checksum'
def calc(log, values):
values = [[int(y) for y in x.replace('\t', ' ').split(' ')] for x in values]
ret = 0
ret2 = 0
for row in values:
a, b = min(row), max(row)
ret += b - a
for a, b in itertools.combinations(row, 2):
if b > a:
a, b = b, a
if a % b == 0:
ret2 += a // b
log("Second form: " + str(ret2))
return ret
def test(log):
values = [
"5 1 9 5",
"7 5 3",
"2 4 6 8",
]
if calc(log, values) == 18:
return True
else:
return False
def run(log, values):
log(calc(log, values))
if __name__ == "__main__":
import sys, os
def find_input_file():
for fn in sys.argv[1:] + ["input.txt", f"day_{DAY_NUM:0d}_input.txt", f"day_{DAY_NUM:02d}_input.txt"]:
for dn in [[], ["Puzzles"], ["..", "Puzzles"]]:
cur = os.path.join(*(dn + [fn]))
if os.path.isfile(cur): return cur
fn = find_input_file()
if fn is None: print("Unable to find input file!\nSpecify filename on command line"); exit(1)
print(f"Using '{fn}' as input file:")
with open(fn) as f: values = [x.strip("\r\n") for x in f.readlines()]
print(f"Running day {DAY_DESC}:")
run(print, values)
|
[
"[email protected]"
] | |
6015c71d15294c4e5332fac46c344a18bee9eddd
|
de24f83a5e3768a2638ebcf13cbe717e75740168
|
/moodledata/vpl_data/216/usersdata/354/113785/submittedfiles/av2_p3_civil.py
|
e7ed6d32a539f56ce9f534bf54484c4e125a9e91
|
[] |
no_license
|
rafaelperazzo/programacao-web
|
95643423a35c44613b0f64bed05bd34780fe2436
|
170dd5440afb9ee68a973f3de13a99aa4c735d79
|
refs/heads/master
| 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 546 |
py
|
# -*- coding: utf-8 -*-
import numpy as np
ordem=int(input('digite a dimensao n da matriz: '))
x=int(input('digite a linha do numero: '))
y=int(input('digite a coluna do numero: '))
matriz=np.zeros((ordem,ordem))
for i in range(0,ordem,1):
for j in range(0,ordem,1):
matriz[i,j]=int(input('digite os valores da matriz: '))
#LINHA
i=x
soma=0
for j in range(0,ordem,1):
if j!=y:
soma=soma+matriz[i,j]
#COLUNA
j=y
soma1=0
for i in range(0,ordem,1):
if i!=x:
soma1=soma1+matriz[i,j]
peso=soma+soma1
print(peso)
|
[
"[email protected]"
] | |
2bbc1196c0025f188016b75fc7993a434729f616
|
6af81c1e3853255f064ce58e848b34211decdd23
|
/test/top/api/rest/HotelsSearchRequest.py
|
f79b816b5737e298bbab342a8a4f1a00cb4bc2be
|
[] |
no_license
|
dacy413/TBAutoTool
|
d472445f54f0841f2cd461d48ec6181ae2182d92
|
ca7da4638d38dd58e38c680ee03aaccf575bce7b
|
refs/heads/master
| 2016-09-06T16:13:01.633177 | 2015-02-01T00:04:50 | 2015-02-01T00:04:50 | 29,625,228 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 438 |
py
|
'''
Created by auto_sdk on 2015-01-20 12:36:26
'''
from top.api.base import RestApi
class HotelsSearchRequest(RestApi):
def __init__(self,domain='gw.api.taobao.com',port=80):
RestApi.__init__(self,domain, port)
self.city = None
self.country = None
self.district = None
self.domestic = None
self.name = None
self.page_no = None
self.province = None
def getapiname(self):
return 'taobao.hotels.search'
|
[
"[email protected]"
] | |
70c76db1ec07449c468c62369074bb65be67d7f4
|
7920ac571217d627aad1ed8fa0b87ef1436cdb28
|
/casepro/cases/migrations/0006_auto_20150508_0912.py
|
ba0c4ea6984a0959cd1e04d511e9dab37ee86a50
|
[
"BSD-3-Clause"
] |
permissive
|
rapidpro/casepro
|
34777e5373822d41ff2e5f3995f86d009c2d1e7c
|
66177c00b06b2bd6e6cad2b648feb8f28f592add
|
refs/heads/main
| 2023-07-20T00:16:09.616516 | 2023-07-06T21:46:31 | 2023-07-06T21:46:31 | 32,147,348 | 23 | 30 |
BSD-3-Clause
| 2023-07-19T07:44:59 | 2015-03-13T09:31:47 |
Python
|
UTF-8
|
Python
| false | false | 1,662 |
py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [("cases", "0005_auto_20150424_1427")]
operations = [
migrations.CreateModel(
name="CaseEvent",
fields=[
("id", models.AutoField(verbose_name="ID", serialize=False, auto_created=True, primary_key=True)),
("event", models.CharField(max_length=1, choices=[("R", "Contact replied")])),
("created_on", models.DateTimeField(db_index=True)),
],
),
migrations.AlterField(
model_name="case",
name="opened_on",
field=models.DateTimeField(help_text="When this case was opened", auto_now_add=True, db_index=True),
),
migrations.AlterField(
model_name="caseaction", name="created_on", field=models.DateTimeField(auto_now_add=True, db_index=True)
),
migrations.AlterField(
model_name="messageaction",
name="action",
field=models.CharField(
max_length=1,
choices=[
("F", "Flag"),
("N", "Un-flag"),
("L", "Label"),
("U", "Remove Label"),
("A", "Archive"),
("R", "Restore"),
],
),
),
migrations.AddField(
model_name="caseevent",
name="case",
field=models.ForeignKey(related_name="events", to="cases.Case", on_delete=models.PROTECT),
),
]
|
[
"[email protected]"
] | |
b453aed2c254c9389e6d16e6972bda279a7aa2b9
|
cf3891c6122d21584bb6d7ad81c41e26755c1083
|
/tests/gmprocess/subcommands/import_test.py
|
e189a580a0d4d0d4d0c4ed40a44128a0147e9ff5
|
[
"Unlicense",
"LicenseRef-scancode-public-domain",
"LicenseRef-scancode-warranty-disclaimer"
] |
permissive
|
mmoschetti-usgs/groundmotion-processing
|
5cb6412eebe258dd3d30e085d68affc20553d744
|
944667e90b5a0a01f7017a676f60e2958b1eb902
|
refs/heads/master
| 2022-11-03T04:32:00.353837 | 2022-10-19T17:57:16 | 2022-10-19T18:37:23 | 186,485,732 | 0 | 0 |
NOASSERTION
| 2019-05-13T19:51:34 | 2019-05-13T19:51:34 | null |
UTF-8
|
Python
| false | false | 2,014 |
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import io
import shutil
import pathlib
from gmprocess.utils import constants
def test_import(script_runner):
try:
# Need to create profile first.
cdir = constants.CONFIG_PATH_TEST
ddir = constants.TEST_DATA_DIR / "demo"
idir = constants.TEST_DATA_DIR / "import"
setup_inputs = io.StringIO(f"test\n{str(cdir)}\n{str(ddir)}\nname\[email protected]\n")
ret = script_runner.run("gmrecords", "projects", "-c", stdin=setup_inputs)
setup_inputs.close()
assert ret.success
# Test CESMD zip file
zfile = idir / "cesmd_test.zip"
ret = script_runner.run(
"gmrecords", "import", "-e", "nn00725272", "-p", str(zfile)
)
print("*** stdout ***")
print(ret.stdout)
print("*** stderr ***")
print(ret.stderr)
assert ret.success
raw_dir = ddir / "nn00725272" / "raw"
assert raw_dir.is_dir()
dst_files = list(pathlib.Path(raw_dir).glob("*"))
assert len(dst_files) == 23
# Test tar file of CWB data
tfile = idir / "test.tar.zip"
ret = script_runner.run(
"gmrecords", "import", "-e", "us6000e2mt", "-p", str(tfile)
)
assert ret.success
raw_dir = ddir / "us6000e2mt" / "raw"
assert raw_dir.is_dir()
dst_dats = list(raw_dir.glob("*.dat"))
assert len(dst_dats) == 19
# Test directory of files
dpath = idir / "dir"
ret = script_runner.run(
"gmrecords", "import", "-e", "us6000e2mt", "-p", str(dpath)
)
assert ret.success
except Exception as ex:
raise ex
finally:
shutil.rmtree(str(constants.CONFIG_PATH_TEST), ignore_errors=True)
# Remove created files
events = ["us6000e2mt", "nn00725272"]
for eid in events:
shutil.rmtree(str(ddir / eid), ignore_errors=True)
if __name__ == "__main__":
test_import()
|
[
"[email protected]"
] | |
89302cc74ca6ac2bdca46b282f61fee632281c3a
|
ad02587a87ec19658d6a53bcf2a2f5e92149e7f4
|
/django-stubs/core/serializers/__init__.pyi
|
fcc124753a89fb1b8460527fcb732507dc4e7f9c
|
[
"BSD-3-Clause"
] |
permissive
|
Naddiseo/django-stubs
|
32a944617aea5b0e2dc3b8ad4dfd191b9ca6198b
|
cff5ab463c911283a9c43a26a38cb7bd4deebbd5
|
refs/heads/master
| 2020-04-18T05:01:40.832084 | 2019-01-22T17:13:31 | 2019-01-22T17:13:31 | 167,261,510 | 0 | 0 |
BSD-3-Clause
| 2019-01-23T22:06:15 | 2019-01-23T22:06:15 | null |
UTF-8
|
Python
| false | false | 1,396 |
pyi
|
from collections import OrderedDict
from typing import Any, Callable, Dict, Iterator, List, Optional, Tuple, Type, Union
from django.apps.config import AppConfig
from django.core.serializers.base import Serializer, Deserializer
from django.db.models.base import Model
from django.db.models.query import QuerySet
BUILTIN_SERIALIZERS: Any
class BadSerializer:
internal_use_only: bool = ...
exception: ModuleNotFoundError = ...
def __init__(self, exception: ImportError) -> None: ...
def __call__(self, *args: Any, **kwargs: Any) -> Any: ...
def register_serializer(format: str, serializer_module: str, serializers: Optional[Dict[str, Any]] = ...) -> None: ...
def unregister_serializer(format: str) -> None: ...
def get_serializer(format: str) -> Union[Type[Serializer], BadSerializer]: ...
def get_serializer_formats() -> List[str]: ...
def get_public_serializer_formats() -> List[str]: ...
def get_deserializer(format: str) -> Union[Callable, Type[Deserializer]]: ...
def serialize(
format: str, queryset: Union[Iterator[Any], List[Model], QuerySet], **options: Any
) -> Optional[Union[List[OrderedDict], bytes, str]]: ...
def deserialize(format: str, stream_or_string: Any, **options: Any) -> Union[Iterator[Any], Deserializer]: ...
def sort_dependencies(
app_list: Union[List[Tuple[AppConfig, None]], List[Tuple[str, List[Type[Model]]]]]
) -> List[Type[Model]]: ...
|
[
"[email protected]"
] | |
7442cc095982c595c26f2dc4f1297cb96e53d1b1
|
c5f58af61e3577ded52acda210f4f664651b598c
|
/template/mmdetection/tools/inference.py
|
1c3be13bc08f24a5ff7a2139b02780c446855c27
|
[
"Apache-2.0",
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
hojihun5516/object-detection-level2-cv-02
|
0a4ee5cea9a77ef5d43fb61a4b37fe3a87cb0eac
|
bc8a08286935b31b8e7e597c4b1ca2cbbaeb9109
|
refs/heads/master
| 2023-08-31T09:50:59.150971 | 2021-10-16T15:00:19 | 2021-10-16T15:00:19 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,490 |
py
|
import argparse
import os
import os.path as osp
import time
import warnings
import mmcv
import torch
from mmcv import Config, DictAction
from mmcv.cnn import fuse_conv_bn
from mmcv.parallel import MMDataParallel, MMDistributedDataParallel
from mmcv.runner import get_dist_info, init_dist, load_checkpoint, wrap_fp16_model
from mmdet.apis import multi_gpu_test, single_gpu_test
from mmdet.datasets import build_dataloader, build_dataset, replace_ImageToTensor
from mmdet.models import build_detector
import pandas as pd
from pandas import DataFrame
import numpy as np
from pycocotools.coco import COCO
def parse_args():
parser = argparse.ArgumentParser(description="MMDet test (and eval) a model")
# config 파일 경로 (학습 시킬 때 사용했던 config 파일, work_dir에도 복사되어있음)
parser.add_argument("config", help="test config file path")
# checkpoint가 저장되어있는 work_dir 경로
parser.add_argument("--work_dir", help="the directory to save the file containing evaluation metrics")
# 사용할 checkpoint epoch
parser.add_argument("--epoch", default="latest", help="Checkpoint file's epoch")
parser.add_argument("--show_score_thr", type=float, default=0.05, help="score threshold (default: 0.05)")
args = parser.parse_args()
return args
def make_csv(output, cfg):
# submission 양식에 맞게 output 후처리
prediction_strings = []
file_names = []
coco = COCO(cfg.data.test.ann_file)
img_ids = coco.getImgIds()
class_num = len(cfg.data.test.classes)
for i, out in enumerate(output):
prediction_string = ""
image_info = coco.loadImgs(coco.getImgIds(imgIds=i))[0]
for j in range(class_num):
for o in out[j]:
prediction_string += (
str(j)
+ " "
+ str(o[4])
+ " "
+ str(o[0])
+ " "
+ str(o[1])
+ " "
+ str(o[2])
+ " "
+ str(o[3])
+ " "
)
prediction_strings.append(prediction_string)
file_names.append(image_info["file_name"])
submission = pd.DataFrame()
submission["PredictionString"] = prediction_strings
submission["image_id"] = file_names
submission.to_csv(os.path.join(cfg.work_dir, "submission.csv"), index=None)
print(f"submission.csv is saved in {cfg.work_dir}")
def main():
args = parse_args()
cfg = Config.fromfile(args.config)
if args.work_dir:
cfg.work_dir = args.work_dir
cfg.data.test.test_mode = True
dataset = build_dataset(cfg.data.test)
data_loader = build_dataloader(
dataset,
samples_per_gpu=cfg.data.samples_per_gpu,
workers_per_gpu=cfg.data.workers_per_gpu,
dist=False,
shuffle=False,
)
checkpoint_path = os.path.join(cfg.work_dir, f"{args.epoch}.pth")
# build detector
cfg.model.train_cfg = None
model = build_detector(cfg.model, test_cfg=cfg.get("test_cfg"))
# ckpt load
checkpoint = load_checkpoint(model, checkpoint_path, map_location="cpu")
model.CLASSES = dataset.CLASSES
model = MMDataParallel(model.cuda(), device_ids=[0])
# cal ouput
output = single_gpu_test(model, data_loader, show_score_thr=args.show_score_thr)
make_csv(output, cfg)
if __name__ == "__main__":
main()
|
[
"[email protected]"
] | |
0ffb1a2ee81207f529a86af9c5969f5b359151d8
|
92866897ac8b95067960f312aa92a4d02c7c81df
|
/environments/oc-p5/database.py
|
93f99ef3da8506942db150a6ad42cd3bace69117
|
[] |
no_license
|
DenisLamalis/cours-python
|
63fec725c038a50fd52f428152dbc1e0671dba53
|
1fc92b125969a2771633d6e8508138986163b6e7
|
refs/heads/master
| 2023-02-03T19:59:34.345181 | 2020-12-15T09:57:42 | 2020-12-15T09:57:42 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,841 |
py
|
import mysql.connector
from config import *
from mysql.connector import errorcode
from tables import Tables
class Database:
""" """
def __init__(self):
""" """
self.host = HOST
self.user = USER
self.password = PASSWORD
self.db_name = 'PureBeurre'
self.tables = Tables()
def connection(self):
""" """
try:
self.connection = mysql.connector.connect(
host = self.host,
user = self.user,
password = self.password,
database = self.db_name)
self.mycursor = self.connection.cursor()
if (self.connection.is_connected()):
print(f"REUSSITE : Connection à la base {self.db_name} effectuée.")
return self.mycursor
except mysql.connector.Error as error:
print("ECHEC : impossible de me connecter, erreur : {}".format(error))
def db_create(self):
""" """
mycursor = self.connection()
try:
mycursor.execute("CREATE DATABASE {} DEFAULT CHARACTER SET 'utf8'".format(self.db_name))
print(f"REUSSITE : création de la base {self.db_name} effectuée.")
except mysql.connector.Error as err:
print("ECHEC : impossible de créer la base, erreur : {}".format(err))
exit(1)
def tables_create(self):
""" """
mycursor = self.connection()
for table_name in self.tables.TABLES:
table_description = self.tables.TABLES[table_name]
try:
mycursor.execute(table_description)
print("REUSSITE : la création de la table {} est effectuée.\n".format(table_name), end='')
except mysql.connector.Error as err:
print("ECHEC : impossible de créer la table, erreur : {}".format(error))
def load_nutriscore(self):
mycursor = self.connection()
try:
add_nutriscore = ("INSERT INTO nutriscore (nut_id, nut_type) VALUES (%s,%s)")
values = (1, 'A')
self.mycursor.execute(add_nutriscore, values)
values = (2, 'B')
self.mycursor.execute(add_nutriscore, values)
values = (3, 'C')
self.mycursor.execute(add_nutriscore, values)
values = (4, 'D')
self.mycursor.execute(add_nutriscore, values)
values = (5, 'E')
self.mycursor.execute(add_nutriscore, values)
self.connection.commit()
print("Les différents Nutriscore ont été chargés dans la base.")
except mysql.connector.Error as error:
print("Erreur lors du chargement : {}".format(error))
if __name__ == "__main__":
database = Database()
|
[
"[email protected]"
] | |
5812623a6b231e2bf8b445f6ffa642fcb04887cc
|
53fab060fa262e5d5026e0807d93c75fb81e67b9
|
/backup/user_328/ch37_2020_03_25_19_56_06_004731.py
|
24a51c11e2535e75fe75b9bbbcb27294953173b6
|
[] |
no_license
|
gabriellaec/desoft-analise-exercicios
|
b77c6999424c5ce7e44086a12589a0ad43d6adca
|
01940ab0897aa6005764fc220b900e4d6161d36b
|
refs/heads/main
| 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 184 |
py
|
a = (input("Digite aqui sua senha: "))
while True:
if a != "desisto":
a= (input("Digite aqui sua senha: "))
else:
print ("Você acertou a senha!")
break
|
[
"[email protected]"
] | |
165e7dc760f0dca474059f542342f73228ee2ee4
|
7aebfaec6957ad67523f1d8851856af88fb997a6
|
/catkin_ws/build/robotiq/robotiq_modbus_rtu/catkin_generated/pkg.develspace.context.pc.py
|
2fea7493bdaa8d6fad68cdd3e90a1c93c073d9a2
|
[] |
no_license
|
k-makihara/ROS
|
918e79e521999085ab628b6bf27ec28a51a8ab87
|
45b60e0488a5ff1e3d8f1ca09bfd191dbf8c0508
|
refs/heads/master
| 2023-01-28T06:00:55.943392 | 2020-11-26T05:27:16 | 2020-11-26T05:27:16 | 316,127,707 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 384 |
py
|
# generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "".split(';') if "" != "" else []
PROJECT_CATKIN_DEPENDS = "rospy".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "".split(';') if "" != "" else []
PROJECT_NAME = "robotiq_modbus_rtu"
PROJECT_SPACE_DIR = "/home/mslab/catkin_ws/devel"
PROJECT_VERSION = "1.0.0"
|
[
"[email protected]"
] | |
f3d1de23c937418d9d66cee322518ae815b1b97d
|
942ee5e8d54e8ebe9c5c841fbfdd1da652946944
|
/1001-1500/1354.Construct Target Array With Multiple Sums.2.py
|
937d34cc801eb03e13440f0ce5a28b99a2374341
|
[] |
no_license
|
kaiwensun/leetcode
|
0129c174457f32887fbca078fb448adce46dd89d
|
6b607f4aae3a4603e61f2e2b7480fdfba1d9b947
|
refs/heads/master
| 2023-08-31T07:30:50.459062 | 2023-08-27T07:59:16 | 2023-08-27T07:59:16 | 57,526,914 | 69 | 9 | null | 2023-08-20T06:34:41 | 2016-05-01T05:37:29 |
Python
|
UTF-8
|
Python
| false | false | 640 |
py
|
import heapq
class Solution(object):
def isPossible(self, target):
s = sum(target)
max_heap = [-t for t in target]
heapq.heapify(max_heap)
while max_heap[0] != -1:
top = -heapq.heappop(max_heap)
snd = -max_heap[0] if max_heap else 0
restored = top * 2 - s
diff = top - restored
if top == snd or diff == 0:
return False
restored = snd + (top - snd) % -diff
if restored < 1:
return False
s -= (top - restored)
heapq.heappush(max_heap, -restored)
return True
|
[
"[email protected]"
] | |
bcb2d3d5b2956afcdde5f3be634d6e0742748d87
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p02233/s835389106.py
|
94c01a979ff6a34046d86dab98703089af7bc21b
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 140 |
py
|
fib = [0 for i in range(45)]
fib[0] = 1
fib[1] = 1
for i in range(2, 45):
fib[i] = fib[i - 1] + fib[i - 2]
n = int(input())
print(fib[n])
|
[
"[email protected]"
] | |
5b5eb5cda0fba8e8594dfdd2a26512967a17d5b7
|
db861016e307fa7e1a57c1d07262b5d9c8051218
|
/cookbook/ingredients/migrations/0001_initial.py
|
91177488701b66d42e9147238624ca23682e9abb
|
[
"MIT"
] |
permissive
|
mugagambi/cookbook-graphql-server
|
794fedaf0d6c7fc5a7ffd21100d90c4f9ef16cba
|
d45044dc5e307d822e3338bcb3e4f8758c89a2f2
|
refs/heads/master
| 2021-01-25T14:33:55.992792 | 2018-03-03T17:18:52 | 2018-03-03T17:18:52 | 123,712,464 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,002 |
py
|
# Generated by Django 2.0.2 on 2018-03-03 13:19
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Category',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100)),
],
),
migrations.CreateModel(
name='Ingredient',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100)),
('notes', models.TextField()),
('category', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='ingredients', to='ingredients.Category')),
],
),
]
|
[
"[email protected]"
] | |
89502b0cd5d0b335a4a18aeb229341a774ad9d71
|
44d5b0a1f411ce14181f1bc8b09e3acbc800e9e1
|
/routes1.py
|
a1fbc536fb60722dd5222cad0edc061e93379366
|
[] |
no_license
|
mukosh123/Librarysysrtem
|
5d74988af1aaec31a007f5aaddd9d8e3855a7662
|
e74ed3328bc50336df28ec45fdf3775051407a27
|
refs/heads/master
| 2021-01-22T10:40:15.775179 | 2017-02-16T14:42:41 | 2017-02-16T14:42:41 | 82,023,543 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,825 |
py
|
from flask import *
import sqlite3
DATABASE = 'books.db'
app = Flask(__name__)
app.config.from_object(__name__)
def connect_db():
return sqlite3.connect(app.config['DATABASE'])
@app.route('/admin')
def admin():
g.db = connect_db()
cur = g.db.execute('select rep_title,category from reps')
books = [dict(rep_title=row[0],category=row[1]) for row in cur.fetchall()]
g.db.close()
return render_template('admin.html',books=books)
@app.route('/userlogin')
def userlogin():
error = None
if request.method == 'POST':
if request.form['email'] == '[email protected]' or request.form['password']== 'admin':
return redirect (url_for('users'))
return render_template('userlogin.html')
@app.route('/users')
def users():
g.db = connect_db()
cur = g.db.execute('select rep_title,category from reps')
books = [dict(rep_title=row[0],category=row[1]) for row in cur.fetchall()]
g.db.close()
return render_template('users.html',books=books)
@app.route('/borrow')
def borrow():
if request.method == 'POST':
if request.form['book']:
g.db = connect_db()
cur = g.db.execute('select rep_title,category from reps')
books = [dict(rep_title=row[0],category=row[1]) for row in cur.fetchall()]
g.db.close()
return render_template('borrow.html',books=books)
@app.route('/',methods=['GET','POST'])
def login():
error = None
if request.method == 'POST':
if request.form['email'] != '[email protected]' or request.form['password']!= 'admin':
error = 'Invalid credentials .please try again'
else:
return redirect (url_for('admin'))
return render_template('login.html')
if __name__== '__main__':
app.run()
|
[
"admin"
] |
admin
|
b46eb8ad515541f7d2dca44fc8545ec091fa2544
|
726a548766a9db586806ef540dcf8ea4d0a82a60
|
/Python3/unit_testing/pytest/phonebook/phonebook.py
|
c9f4a4f2dcbac826ca6232de245f48fa455d4e4b
|
[] |
no_license
|
ArseniD/learn_python
|
6fd735a594ff83ea97888d6688e474e94182ea74
|
d73fc790514f50a2f61c5cc198073299b0c71277
|
refs/heads/master
| 2022-05-28T04:53:54.603475 | 2019-08-27T10:15:29 | 2019-08-27T10:15:29 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 576 |
py
|
import os
class Phonebook():
def __init__(self, cachedir):
self.entries = {}
self.filename = "phonebook.txt"
self.file_cache = open(os.path.join(str(cachedir), self.filename), "w")
def add(self, name, number):
self.entries[name] = number
def lookup(self, name):
return self.entries[name]
def names(self):
return self.entries.keys()
def numbers(self):
return self.entries.values()
def clear(self):
self.entries = {}
self.file_cache.close()
os.remove(self.filename)
|
[
"[email protected]"
] | |
d3ff1e8fd2b9310e9ac4b6e16b83c3b07946f17f
|
349c4f37b6a003d10dd78d864395a0d596d24fe6
|
/Learn_advanced/5201_container.py
|
2c7ae775dfcca224e76bba0ef1aea78bf35bbcbc
|
[] |
no_license
|
bwjubrother/Algorithms
|
55c2980a4540a7e48cb3afd298cbd2e3d81c594e
|
03daa2c778b1cc59ce1920363a27c88bec5ec289
|
refs/heads/master
| 2023-04-07T00:38:58.715786 | 2021-04-25T08:00:08 | 2021-04-25T08:00:08 | 279,016,520 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 434 |
py
|
import sys
sys.stdin = open('5201.txt', 'r')
T = int(input())
for tc in range(T):
n, m = map(int, input().split())
ns = list(map(int, input().split()))
ms = list(map(int, input().split()))
ans = 0
while ns and ms:
if max(ns) <= max(ms):
ans += max(ns)
ns.remove(max(ns))
ms.remove(max(ms))
else:
ns.remove(max(ns))
print('#%d %d' % (tc+1, ans))
|
[
"[email protected]"
] | |
01b617e6e058ce0246a7c101cf43bf4e1c81a5c1
|
7b798a55cf7bd42ab5d2d423ab77814c2564bd44
|
/Easy/Longest Harmonious Subsequence.py
|
6c543dadc03fa60cf5efa587aed5009fc279b69a
|
[] |
no_license
|
addherbs/LeetCode
|
d933839eb0a2eb53c192f76c42152c6f3a6ef3f2
|
cadd48225d93aa69745a94a214e55e7751996e19
|
refs/heads/master
| 2021-04-15T05:12:26.855696 | 2021-02-27T05:53:42 | 2021-02-27T05:53:42 | 126,174,823 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 368 |
py
|
import collections
class Solution:
def findLHS(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
p = collections.Counter (nums)
ans = 0
for num in nums:
temp = 1
if (num + 1 in p):
ans = max (ans, p[num + 1] + p[num])
print (p, ans)
return ans
|
[
"[email protected]"
] | |
ac79892ea7a04e438b9f617893c0aeddfc3de5db
|
33daf4c69a8f46d7ad8d93eaa73fc60e36fd022d
|
/gestion/opos_2016/corregir_nombres.py
|
74f44cc7b0761081bb11028fe73577b9f2112e9e
|
[] |
no_license
|
OscarMaestre/estructurado
|
81cfc9412b77d5015be1bebf66785c357746d8e2
|
7649747e48128cb9c17dee937574e9490fcc9087
|
refs/heads/master
| 2021-01-10T15:05:47.695362 | 2016-04-28T07:30:50 | 2016-04-28T07:30:50 | 53,923,820 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,058 |
py
|
#!/usr/bin/env python3
#coding=utf-8
from utilidades.ficheros.GestorFicheros import GestorFicheros
modificaciones=[
("La Torre de Esteban Hambran", "Torre de Esteban Hambran"),
("Cortijos de Arriba", "Cortijo de Arriba"),
("Villafranca de los Caballeros", "Villafranca de los"),
("Las Ventas con Peña Aguilera", "Ventas Con Peña Aguilera"),
("Herrera de la Mancha", "Centro Penitenciario Herrera")
]
gf=GestorFicheros()
sql_modificar_origen="update rutas set origen='{0}' where origen='{1}';"
sql_modificar_destino="update rutas set destino='{0}' where destino='{1}';"
ARCHIVO_BD="rutas.db"
for m in modificaciones:
pueblo_antes=m[0]
pueblo_como_debe_quedar=m[1]
comando_sql_1=sql_modificar_origen.format (pueblo_antes, pueblo_como_debe_quedar)
gf.ejecutar_comando(
"echo \"" + comando_sql_1+ "\"", "| sqlite3 "+ARCHIVO_BD
)
comando_sql_2=sql_modificar_destino.format (pueblo_antes, pueblo_como_debe_quedar)
gf.ejecutar_comando(
"echo \"" + comando_sql_2+ "\"", "| sqlite3 "+ARCHIVO_BD
)
|
[
"[email protected]"
] | |
9951868b35d55b8b9969caede6c4916b987b0f5c
|
e2f0806ca1cdd887ea40d050a19fa2710427bd38
|
/기본 문제/05주차_스택2/2167_2차원 배열의 합/강승훈.py
|
a590cd46cdd52cc0944208211c63cc1350c95b58
|
[] |
no_license
|
JY-Dev/AlgorithmStudy-1
|
001f94d80097c850c79eeb2bc86971a01aa5bd5d
|
2ad1df0fd65c72a6f6d1feeba09f889000ff8c15
|
refs/heads/main
| 2023-08-21T18:38:18.235994 | 2021-09-28T07:07:11 | 2021-09-28T07:07:11 | 406,208,087 | 1 | 0 | null | 2021-09-14T03:14:32 | 2021-09-14T03:14:31 | null |
UTF-8
|
Python
| false | false | 607 |
py
|
from sys import stdin
# 입력.
n,m = map(int, stdin.readline().split(" "))
arr = list(list(map(int, stdin.readline().split())) for _ in range(n))
test_case = int(stdin.readline().strip())
for _ in range(test_case):
i1, j1, i2, j2 = map(int, stdin.readline().split(" ")) # 좌표 입력.
sub_sum = 0 # 결과 저장 할 변수.
for i in range(i1-1, i2): # 일단, 입력으로 들어온 i1와 i2 넣으면 col연산 되고,
for j in range(j1-1, j2): # 포문 마다, j1부터 j2 까지 더하면 row연산 됨.
sub_sum += arr[i][j]
# 매번 출력.
print(sub_sum)
|
[
"[email protected]"
] | |
979a71a39688b941580d1480aaa2802ebc8058a2
|
2af94f8a7609d47fdcea28a2132c4f8bacb103e3
|
/src/services/service_manager.py
|
da578ee4f10fc46bdd44de29797f6e45099bc02f
|
[] |
no_license
|
bernhara/DigiGateway4Raph
|
685527723f0b306f387233c78d27fe9d78717c38
|
f36ba29ef883d70f94b8609ff734b5dcde786c66
|
refs/heads/master
| 2020-07-05T19:56:27.027547 | 2019-08-19T06:10:46 | 2019-08-19T06:10:46 | 202,756,662 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,464 |
py
|
############################################################################
# #
# Copyright (c)2008, 2009, Digi International (Digi). All Rights Reserved. #
# #
# Permission to use, copy, modify, and distribute this software and its #
# documentation, without fee and without a signed licensing agreement, is #
# hereby granted, provided that the software is used on Digi products only #
# and that the software contain this copyright notice, and the following #
# two paragraphs appear in all copies, modifications, and distributions as #
# well. Contact Product Management, Digi International, Inc., 11001 Bren #
# Road East, Minnetonka, MN, +1 952-912-3444, for commercial licensing #
# opportunities for non-Digi products. #
# #
# DIGI SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING, BUT NOT LIMITED #
# TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A #
# PARTICULAR PURPOSE. THE SOFTWARE AND ACCOMPANYING DOCUMENTATION, IF ANY, #
# PROVIDED HEREUNDER IS PROVIDED "AS IS" AND WITHOUT WARRANTY OF ANY KIND. #
# DIGI HAS NO OBLIGATION TO PROVIDE MAINTENANCE, SUPPORT, UPDATES, #
# ENHANCEMENTS, OR MODIFICATIONS. #
# #
# IN NO EVENT SHALL DIGI BE LIABLE TO ANY PARTY FOR DIRECT, INDIRECT, #
# SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, INCLUDING LOST PROFITS, #
# ARISING OUT OF THE USE OF THIS SOFTWARE AND ITS DOCUMENTATION, EVEN IF #
# DIGI HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. #
# #
############################################################################
"""\
Manages the loading and instances of individual services.
The ServiceManager allows for the dynamic loading of services.
drivers as well as the ability to retrieve an instance of a service
by name.
The ServiceManager also provides an interface to start and stop
an instance as well as to query the instance for its configuration
parameters.
"""
# imports
from common.abstract_service_manager import AbstractServiceManager
# constants
# exception classes
# interface functions
# classes
class ServiceManager(AbstractServiceManager):
def __init__(self, core_services):
self.__core = core_services
self.__core.set_service("service_manager", self)
# Initialize our base class:
AbstractServiceManager.__init__(self, core_services, ('services',))
def driver_load(self, name):
"""\
Loads a service driver class dynamically.
If the driver has not been loaded previously, an unconfigured
instance of the driver will be created and managed by the
ServiceManager. If the driver has already been loaded
nothing will be done. In either case, this function will
return True.
If the service driver cannot be loaded for any reason, an
exception will be raised.
"""
return AbstractServiceManager.service_load(self, name)
# internal functions & classes
|
[
"[email protected]"
] | |
39231c851e4390fefee972dc33794a199ac03564
|
589b5eedb71d83c15d44fedf60c8075542324370
|
/project/stock_project/alpha_model/alpha_factor/GrossProfitYOY.py
|
7132439c922d47498483410e22ffd1b56dbe32b9
|
[] |
no_license
|
rlcjj/quant
|
4c2be8a8686679ceb675660cb37fad554230e0d4
|
c07e8f0f6e1580ae29c78c1998a53774a15a67e1
|
refs/heads/master
| 2020-03-31T07:15:48.111511 | 2018-08-27T05:29:00 | 2018-08-27T05:29:00 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,813 |
py
|
import pandas as pd
import numpy as np
from quant.stock.stock import Stock
from quant.stock.date import Date
from quant.stock.stock_factor_operate import StockFactorOperate
def GrossProfitYOY(beg_date, end_date):
"""
因子说明: 当季毛利润的同比增长
披露日期 为 最近财报
"""
# param
#################################################################################
factor_name = 'GrossProfitYOY'
ipo_num = 90
# read data
#################################################################################
income = Stock().get_factor_h5("OperatingIncome", None, "primary_mfc").T
cost = Stock().get_factor_h5("OperatingCost", None, "primary_mfc").T
[income, cost] = Stock().make_same_index_columns([income, cost])
gross_profit = income - cost
gross_profit_4 = gross_profit.shift(4)
gross_profit_yoy = gross_profit / gross_profit_4 - 1.0
gross_profit_yoy = gross_profit_yoy.T
gross_profit_yoy = StockFactorOperate().change_quarter_to_daily_with_report_date(gross_profit_yoy, beg_date, end_date)
# data precessing
#################################################################################
pass
# calculate data daily
#################################################################################
res = gross_profit_yoy.T.dropna(how='all').T
# save data
#############################################################################
Stock().write_factor_h5(res, factor_name, "alpha_dfc")
return res
#############################################################################
if __name__ == '__main__':
from datetime import datetime
beg_date = '2004-01-01'
end_date = datetime.today()
data = GrossProfitYOY(beg_date, end_date)
print(data)
|
[
"[email protected]"
] | |
1393b6316d1e0a3f66c872c6f21c11da41fbb9e9
|
ad715f9713dc5c6c570a5ac51a18b11932edf548
|
/tensorflow/lite/testing/op_tests/ceil.py
|
02d6ab3f76b56e09d806372a7e08eef7ec137d0a
|
[
"LicenseRef-scancode-generic-cla",
"Apache-2.0",
"BSD-2-Clause"
] |
permissive
|
rockzhuang/tensorflow
|
f1f31bc8edfa402b748c500efb97473c001bac95
|
cb40c060b36c6a75edfefbc4e5fc7ee720273e13
|
refs/heads/master
| 2022-11-08T20:41:36.735747 | 2022-10-21T01:45:52 | 2022-10-21T01:45:52 | 161,580,587 | 27 | 11 |
Apache-2.0
| 2019-01-23T11:00:44 | 2018-12-13T03:47:28 |
C++
|
UTF-8
|
Python
| false | false | 1,835 |
py
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Test configs for ceil."""
import tensorflow.compat.v1 as tf
from tensorflow.lite.testing.zip_test_utils import create_tensor_data
from tensorflow.lite.testing.zip_test_utils import make_zip_of_tests
from tensorflow.lite.testing.zip_test_utils import register_make_test_function
@register_make_test_function()
def make_ceil_tests(options):
"""Make a set of tests to do ceil."""
test_parameters = [{
"input_dtype": [tf.float32],
"input_shape": [[], [1], [1, 2], [5, 6, 7, 8], [3, 4, 5, 6]],
}]
def build_graph(parameters):
"""Build the ceil op testing graph."""
input_value = tf.compat.v1.placeholder(
dtype=parameters["input_dtype"],
name="input1",
shape=parameters["input_shape"])
out = tf.math.ceil(input_value)
return [input_value], [out]
def build_inputs(parameters, sess, inputs, outputs):
input_value = create_tensor_data(parameters["input_dtype"],
parameters["input_shape"])
return [input_value], sess.run(outputs, feed_dict={inputs[0]: input_value})
make_zip_of_tests(options, test_parameters, build_graph, build_inputs)
|
[
"[email protected]"
] | |
a1ff4766613a6a27fc4395c92e158607ac3292fc
|
99c4d4a6592fded0e8e59652484ab226ac0bd38c
|
/code/batch-1/vse-naloge-brez-testov/DN4-Z-225.py
|
9583739c790cad2cec42909834833817e30398cc
|
[] |
no_license
|
benquick123/code-profiling
|
23e9aa5aecb91753e2f1fecdc3f6d62049a990d5
|
0d496d649247776d121683d10019ec2a7cba574c
|
refs/heads/master
| 2021-10-08T02:53:50.107036 | 2018-12-06T22:56:38 | 2018-12-06T22:56:38 | 126,011,752 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,597 |
py
|
# Tu pišite svoje funkcije:
from math import *
def koordinate(ime, kraji):
for kraj, x, y in kraji:
if kraj == ime:
return(x, y)
else:
return None
def razdalja_koordinat(x1, y1, x2, y2):
return sqrt((x1-x2) ** 2 + (y1-y2) ** 2)
def razdalja(ime1, ime2, kraji):
x1, y1 = koordinate(ime1, kraji)
x2, y2 = koordinate(ime2, kraji)
return razdalja_koordinat(x1, y1, x2, y2)
def v_dometu(ime, domet, kraji):
s = []
for mesto, x, y in kraji:
if mesto != ime:
if razdalja(ime, mesto, kraji) <= domet:
s.append(mesto)
return s
def najbolj_oddaljeni(ime, imena, kraji):
s = []
naj_r = 0
naj_k = ''
for kraj, x, y in kraji:
if kraj in imena:
r = razdalja(ime, kraj, kraji)
s.append((kraj, r))
for kraj, r in s:
if r > naj_r:
naj_r = r
naj_k = kraj
return naj_k
def zalijemo(ime, domet, kraji):
return najbolj_oddaljeni(ime, v_dometu(ime,domet,kraji), kraji)
def presek(s1, s2):
return list(set(s1).intersection(s2))
def skupno_zalivanje(ime1, ime2, domet, kraji):
mes1 = []
mes2 = []
for mesto, x, y in kraji:
if mesto == ime1:
for mesto, x, y in kraji:
if razdalja(mesto,ime1,kraji) <= domet:
mes1.append(mesto)
if mesto == ime2:
for mesto, x, y in kraji:
if razdalja(mesto,ime2,kraji) <= domet:
mes2.append(mesto)
return presek(mes1, mes2)
|
[
"[email protected]"
] | |
ce739380e97a96bf00fcdc9d4059e29f2e122645
|
099256b28df65fb7c90c077b060dca16b8655235
|
/unsupervised_learning/0x03-hyperparameter_tuning/2-gp.py
|
948f3c23d718cd4522f60f7ce711796142e5c0e1
|
[] |
no_license
|
Immaannn2222/holbertonschool-machine_learning
|
1cebb9a889b363669bed7645d102dc56ab943c08
|
80bf8d3354702f7fb9f79bbb5ed7e00fc19f788d
|
refs/heads/master
| 2023-08-01T05:35:00.180472 | 2021-09-22T20:28:17 | 2021-09-22T20:28:17 | 317,624,526 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,373 |
py
|
#!/usr/bin/env python3
"""HYPERPARAÙETER"""
import numpy as np
class GaussianProcess:
""" represents a noiseless 1D Gaussian process"""
def __init__(self, X_init, Y_init, l=1, sigma_f=1):
"""class constructor"""
self.X = X_init
self.Y = Y_init
self.l = l
self.sigma_f = sigma_f
self.K = self.kernel(X_init, X_init)
def kernel(self, X1, X2):
"""calculates the covariance kernel matrix between two matrices"""
exp_term = (X1 - X2.T) ** 2
RBF = (((self.sigma_f) ** 2) * (np.exp(exp_term * (
-0.5 / self.l ** 2))))
return RBF
def predict(self, X_s):
"""predicts mean, standard deviation of points in a Gaussian process"""
K_ss = self.kernel(X_s, X_s)
K = self.kernel(self.X, self.X)
decompositon = np.linalg.cholesky(K)
K_k = self.kernel(self.X, X_s)
result = np.linalg.solve(decompositon, K_k)
mu = np.dot(result.T, np.linalg.solve(decompositon, self.Y)).reshape((
X_s.shape[0],))
s2 = np.diag(K_ss) - np.sum(result**2, axis=0)
return mu, s2
def update(self, X_new, Y_new):
"""updates a Gaussian Process"""
self.X = np.append(self.X, X_new).reshape(-1, 1)
self.Y = np.append(self.Y, Y_new).reshape(-1, 1)
self.K = self.kernel(self.X, self.X)
|
[
"[email protected]"
] | |
7d1bcd8e386680914a0800493669b944fd4b31b4
|
04e5b6df2ee3bcfb7005d8ec91aab8e380333ac4
|
/Lib/objc/_BiomeFoundation.py
|
46963683901ead01d0776eb2ce541ab36ad30317
|
[
"MIT"
] |
permissive
|
ColdGrub1384/Pyto
|
64e2a593957fd640907f0e4698d430ea7754a73e
|
7557485a733dd7e17ba0366b92794931bdb39975
|
refs/heads/main
| 2023-08-01T03:48:35.694832 | 2022-07-20T14:38:45 | 2022-07-20T14:38:45 | 148,944,721 | 884 | 157 |
MIT
| 2023-02-26T21:34:04 | 2018-09-15T22:29:07 |
C
|
UTF-8
|
Python
| false | false | 324 |
py
|
"""
Classes from the 'BiomeFoundation' framework.
"""
try:
from rubicon.objc import ObjCClass
except ValueError:
def ObjCClass(name):
return None
def _Class(name):
try:
return ObjCClass(name)
except NameError:
return None
BMCoreAnalyticsEvents = _Class("BMCoreAnalyticsEvents")
|
[
"[email protected]"
] | |
1a113e39024e17830518e548d9edbf161cb4665c
|
6caab8d886e8bd302d1994ff663cf5ccb5e11522
|
/MyNotes_01/Step02/1-Data_Structure/day03/demo04_order_set.py
|
bea67a7c9c1c6c6023282873b66b421d9bb4c5d7
|
[] |
no_license
|
ZimingGuo/MyNotes01
|
7698941223c79ee754b17296b9984b731858b238
|
55e6681da1a9faf9c0ec618ed60f5da9ecc6beb6
|
refs/heads/master
| 2022-07-30T21:30:32.100042 | 2020-05-19T16:59:09 | 2020-05-19T16:59:09 | 265,254,345 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,924 |
py
|
# author: Ziming Guo
# time: 2020/3/14
'''
三种排序集合的汇总:冒泡;选择;插入
'''
# 冒泡排序:双层循环
def bubble(list_):
for m in range(0, len(list_) - 1):
for n in range(m + 1, len(list_)): # 注意这个地方一定是从 m 开始的
if list_[m] > list_[n]:
list_[m], list_[n] = list_[n], list_[m]
# 选择排序
def seq_ord(list_target):
"""
把一个列表用选择排序的方式从小到大排列
思想:
1) 找剩下的元素里最小的元素
2) 把这个元素和最前面的那个元素交换
:param list_target: 要排列的列表
"""
for m in range(0, len(list_target) - 1):
dict_min = {}
dict_min["val"] = list_target[m]
dict_min["ind"] = m
for i in range(m + 1, len(list_target)):
if list_target[i] < dict_min["val"]:
dict_min["val"] = list_target[i] # 找到了最小的元素,存数据
dict_min["ind"] = i # 找到了最小元素,存索引值
list_target[m], list_target[dict_min["ind"]] = dict_min["val"], list_target[m]
# 插入排序
def insert_ord(list_target):
"""
思想:
# 1 按顺序拿出来一个元素,和前面的进行比较
# 2 当比到一个比他小的元素后,就插在他的前面一个位置
# 注意:比较是和前面的元素进行比较
# 注意:是插入而不是交换(insert)
:param list_target: 要进行排序的列表
"""
for m in range(1, len(list_target)):
for n in range(m - 1, -1, -1):
if list_target[n] < list_target[m]:
list_target.insert(n + 1, list_target[m])
del list_target[m + 1]
break
elif n == 0:
list_target.insert(0, list_target[m])
del list_target[m + 1]
|
[
"[email protected]"
] | |
61b4cdb93612dde44672fc2ceda9f4c5e7e07d60
|
ae7f4a70a0bdb2e98d13c996c75d274241c25278
|
/basics/bubble_sort.py
|
ec8166badc0e61cd877a955c07c700c7d8f6268f
|
[
"MIT"
] |
permissive
|
zi-NaN/algorithm_exercise
|
5d17e1f6c3cae89ed3c7523b344e55c5a10e3e62
|
817916a62774145fe6387b715f76c5badbf99197
|
refs/heads/master
| 2020-03-30T12:00:46.694490 | 2019-06-23T11:04:34 | 2019-06-23T11:04:34 | 151,204,296 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 309 |
py
|
def bubble_sort(arr:'list'):
# inplace sort
for i in range(len(arr)-1):
for j in range(len(arr)-1, 0, -1):
if arr[j-1] > arr[j]:
arr[j-1], arr[j] = arr[j], arr[j-1]
return arr
# test
if __name__ == '__main__':
print(bubble_sort([1, 3, 2, 4]))
|
[
"[email protected]"
] | |
244d50cea282092239e50d4c7fae5eae2ae5d443
|
d2a564ee5ecc46ad55ba4a17504dd79b26f77d0f
|
/educa/courses/migrations/0002_content_file_image_text_video.py
|
672f9544b897995f2adb898129ce06f0b7bb6096
|
[] |
no_license
|
Da1anna/Educa
|
ab5eead0337a2447b87271a6a06c2bcfc61f09a2
|
736fd9840c66221212275f2cfa7374cb521e79ff
|
refs/heads/master
| 2022-12-30T12:31:36.014607 | 2020-10-15T03:52:49 | 2020-10-15T03:52:49 | 303,141,101 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,898 |
py
|
# Generated by Django 2.0.5 on 2020-09-18 14:12
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('contenttypes', '0002_remove_content_type_name'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('courses', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Content',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('object_id', models.PositiveIntegerField()),
('content_type', models.ForeignKey(limit_choices_to={'model__in': ('text', 'image', 'file', 'video')}, on_delete=django.db.models.deletion.CASCADE, to='contenttypes.ContentType')),
('module', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='contents', to='courses.Module')),
],
),
migrations.CreateModel(
name='File',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=250)),
('created', models.DateTimeField(auto_now_add=True)),
('updated', models.DateTimeField(auto_now=True)),
('content', models.FileField(upload_to='files')),
('owner', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='file_related', to=settings.AUTH_USER_MODEL)),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='Image',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=250)),
('created', models.DateTimeField(auto_now_add=True)),
('updated', models.DateTimeField(auto_now=True)),
('content', models.ImageField(upload_to='images')),
('owner', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='image_related', to=settings.AUTH_USER_MODEL)),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='Text',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=250)),
('created', models.DateTimeField(auto_now_add=True)),
('updated', models.DateTimeField(auto_now=True)),
('content', models.TextField()),
('owner', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='text_related', to=settings.AUTH_USER_MODEL)),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='Video',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=250)),
('created', models.DateTimeField(auto_now_add=True)),
('updated', models.DateTimeField(auto_now=True)),
('content', models.URLField()),
('owner', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='video_related', to=settings.AUTH_USER_MODEL)),
],
options={
'abstract': False,
},
),
]
|
[
"[email protected]"
] | |
96c732b39274d27dba371d3ba780deafa53399a0
|
8dbb2a3e2286c97b1baa3ee54210189f8470eb4d
|
/kubernetes-stubs/client/api/autoscaling_v1_api.pyi
|
483ed490e8ae7275b864b7af091f1df71f67ac70
|
[] |
no_license
|
foodpairing/kubernetes-stubs
|
e4b0f687254316e6f2954bacaa69ff898a88bde4
|
f510dc3d350ec998787f543a280dd619449b5445
|
refs/heads/master
| 2023-08-21T21:00:54.485923 | 2021-08-25T03:53:07 | 2021-08-25T04:45:17 | 414,555,568 | 0 | 0 | null | 2021-10-07T10:26:08 | 2021-10-07T10:26:08 | null |
UTF-8
|
Python
| false | false | 5,142 |
pyi
|
import typing
import kubernetes.client
class AutoscalingV1Api:
def __init__(
self, api_client: typing.Optional[kubernetes.client.ApiClient] = ...
) -> None: ...
def get_api_resources(self) -> kubernetes.client.V1APIResourceList: ...
def list_horizontal_pod_autoscaler_for_all_namespaces(
self,
*,
allow_watch_bookmarks: typing.Optional[bool] = ...,
_continue: typing.Optional[str] = ...,
field_selector: typing.Optional[str] = ...,
label_selector: typing.Optional[str] = ...,
limit: typing.Optional[int] = ...,
pretty: typing.Optional[str] = ...,
resource_version: typing.Optional[str] = ...,
timeout_seconds: typing.Optional[int] = ...,
watch: typing.Optional[bool] = ...
) -> kubernetes.client.V1HorizontalPodAutoscalerList: ...
def list_namespaced_horizontal_pod_autoscaler(
self,
namespace: str,
*,
pretty: typing.Optional[str] = ...,
allow_watch_bookmarks: typing.Optional[bool] = ...,
_continue: typing.Optional[str] = ...,
field_selector: typing.Optional[str] = ...,
label_selector: typing.Optional[str] = ...,
limit: typing.Optional[int] = ...,
resource_version: typing.Optional[str] = ...,
timeout_seconds: typing.Optional[int] = ...,
watch: typing.Optional[bool] = ...
) -> kubernetes.client.V1HorizontalPodAutoscalerList: ...
def create_namespaced_horizontal_pod_autoscaler(
self,
namespace: str,
body: kubernetes.client.V1HorizontalPodAutoscaler,
*,
pretty: typing.Optional[str] = ...,
dry_run: typing.Optional[str] = ...,
field_manager: typing.Optional[str] = ...
) -> kubernetes.client.V1HorizontalPodAutoscaler: ...
def delete_collection_namespaced_horizontal_pod_autoscaler(
self,
namespace: str,
*,
pretty: typing.Optional[str] = ...,
body: typing.Optional[kubernetes.client.V1DeleteOptions] = ...,
_continue: typing.Optional[str] = ...,
dry_run: typing.Optional[str] = ...,
field_selector: typing.Optional[str] = ...,
grace_period_seconds: typing.Optional[int] = ...,
label_selector: typing.Optional[str] = ...,
limit: typing.Optional[int] = ...,
orphan_dependents: typing.Optional[bool] = ...,
propagation_policy: typing.Optional[str] = ...,
resource_version: typing.Optional[str] = ...,
timeout_seconds: typing.Optional[int] = ...
) -> kubernetes.client.V1Status: ...
def read_namespaced_horizontal_pod_autoscaler(
self,
name: str,
namespace: str,
*,
pretty: typing.Optional[str] = ...,
exact: typing.Optional[bool] = ...,
export: typing.Optional[bool] = ...
) -> kubernetes.client.V1HorizontalPodAutoscaler: ...
def replace_namespaced_horizontal_pod_autoscaler(
self,
name: str,
namespace: str,
body: kubernetes.client.V1HorizontalPodAutoscaler,
*,
pretty: typing.Optional[str] = ...,
dry_run: typing.Optional[str] = ...,
field_manager: typing.Optional[str] = ...
) -> kubernetes.client.V1HorizontalPodAutoscaler: ...
def delete_namespaced_horizontal_pod_autoscaler(
self,
name: str,
namespace: str,
*,
pretty: typing.Optional[str] = ...,
body: typing.Optional[kubernetes.client.V1DeleteOptions] = ...,
dry_run: typing.Optional[str] = ...,
grace_period_seconds: typing.Optional[int] = ...,
orphan_dependents: typing.Optional[bool] = ...,
propagation_policy: typing.Optional[str] = ...
) -> kubernetes.client.V1Status: ...
def patch_namespaced_horizontal_pod_autoscaler(
self,
name: str,
namespace: str,
body: typing.Any,
*,
pretty: typing.Optional[str] = ...,
dry_run: typing.Optional[str] = ...,
field_manager: typing.Optional[str] = ...,
force: typing.Optional[bool] = ...
) -> kubernetes.client.V1HorizontalPodAutoscaler: ...
def read_namespaced_horizontal_pod_autoscaler_status(
self, name: str, namespace: str, *, pretty: typing.Optional[str] = ...
) -> kubernetes.client.V1HorizontalPodAutoscaler: ...
def replace_namespaced_horizontal_pod_autoscaler_status(
self,
name: str,
namespace: str,
body: kubernetes.client.V1HorizontalPodAutoscaler,
*,
pretty: typing.Optional[str] = ...,
dry_run: typing.Optional[str] = ...,
field_manager: typing.Optional[str] = ...
) -> kubernetes.client.V1HorizontalPodAutoscaler: ...
def patch_namespaced_horizontal_pod_autoscaler_status(
self,
name: str,
namespace: str,
body: typing.Any,
*,
pretty: typing.Optional[str] = ...,
dry_run: typing.Optional[str] = ...,
field_manager: typing.Optional[str] = ...,
force: typing.Optional[bool] = ...
) -> kubernetes.client.V1HorizontalPodAutoscaler: ...
|
[
"[email protected]"
] | |
0cbd1c8aeac8d787abd3ecf791a38ec0389941b3
|
4569d707a4942d3451f3bbcfebaa8011cc5a128d
|
/privateticketsplugin/branches/0.10/privatetickets/report.py
|
e36569c4591e109ecf3d68f4f5d34b955ec69b4a
|
[] |
no_license
|
woochica/trachacks
|
28749b924c897747faa411876a3739edaed4cff4
|
4fcd4aeba81d734654f5d9ec524218b91d54a0e1
|
refs/heads/master
| 2021-05-30T02:27:50.209657 | 2013-05-24T17:31:23 | 2013-05-24T17:31:23 | 13,418,837 | 0 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,095 |
py
|
from trac.core import *
from trac.web.api import IRequestFilter
from trac.ticket.report import ReportModule
from api import PrivateTicketsSystem
__all__ = ['PrivateTicketsReportFilter']
class PrivateTicketsReportFilter(Component):
"""Show only ticket the user is involved in in the reports."""
implements(IRequestFilter)
# IRequestFilter methods
def pre_process_request(self, req, handler):
if isinstance(handler, ReportModule) and \
not req.perm.has_permission('TICKET_VIEW') and \
req.args.get('format') in ('tab', 'csv'):
raise TracError('Access denied')
return handler
def post_process_request(self, req, template, content_type):
if req.args.get('DO_PRIVATETICKETS_FILTER') == 'report':
# Walk the HDF
fn = PrivateTicketsSystem(self.env).check_ticket_access
deleted = []
left = []
node = req.hdf.getObj('report.items')
if node is None:
return template, content_type
node = node.child()
while node:
i = node.name()
id = req.hdf['report.items.%s.ticket'%i]
if not fn(req, id):
deleted.append(i)
else:
left.append(i)
node = node.next()
# Delete the needed subtrees
for n in deleted:
req.hdf.removeTree('report.items.%s'%n)
# Recalculate this
req.hdf['report.numrows'] = len(left)
# Move the remaining items into their normal places
for src, dest in zip(left, xrange(len(left)+len(deleted))):
if src == dest: continue
req.hdf.getObj('report.items').copy(str(dest), req.hdf.getObj('report.items.%s'%src))
for n in xrange(len(left), len(left)+len(deleted)):
req.hdf.removeTree('report.items.%s'%n)
return template, content_type
|
[
"coderanger@7322e99d-02ea-0310-aa39-e9a107903beb"
] |
coderanger@7322e99d-02ea-0310-aa39-e9a107903beb
|
f17d60f3ba2d4ccd6446efee607a59d13b9f6596
|
b09db2bba8019b1d11720f1092304e5ce9948d91
|
/lib/sqlalchemy/util/__init__.py
|
273570357b09f600df0913bd840eed8f0a4f6efe
|
[
"MIT"
] |
permissive
|
theosotr/sqlalchemy
|
6da34f5e28859f4ae7479db4ca9963c8392d7ac8
|
e1d4e59116bbf1a12bb6b3f57d33ddfc757d4567
|
refs/heads/master
| 2022-10-17T08:42:31.757925 | 2020-06-11T03:14:46 | 2020-06-11T03:14:46 | 271,558,840 | 0 | 0 |
MIT
| 2020-06-11T13:51:28 | 2020-06-11T13:51:28 | null |
UTF-8
|
Python
| false | false | 6,629 |
py
|
# util/__init__.py
# Copyright (C) 2005-2020 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
from collections import defaultdict # noqa
from contextlib import contextmanager # noqa
from functools import partial # noqa
from functools import update_wrapper # noqa
from ._collections import coerce_generator_arg # noqa
from ._collections import collections_abc # noqa
from ._collections import column_dict # noqa
from ._collections import column_set # noqa
from ._collections import EMPTY_SET # noqa
from ._collections import FacadeDict # noqa
from ._collections import flatten_iterator # noqa
from ._collections import has_dupes # noqa
from ._collections import has_intersection # noqa
from ._collections import IdentitySet # noqa
from ._collections import ImmutableContainer # noqa
from ._collections import immutabledict # noqa
from ._collections import ImmutableProperties # noqa
from ._collections import LRUCache # noqa
from ._collections import ordered_column_set # noqa
from ._collections import OrderedDict # noqa
from ._collections import OrderedIdentitySet # noqa
from ._collections import OrderedProperties # noqa
from ._collections import OrderedSet # noqa
from ._collections import PopulateDict # noqa
from ._collections import Properties # noqa
from ._collections import ScopedRegistry # noqa
from ._collections import ThreadLocalRegistry # noqa
from ._collections import to_column_set # noqa
from ._collections import to_list # noqa
from ._collections import to_set # noqa
from ._collections import unique_list # noqa
from ._collections import UniqueAppender # noqa
from ._collections import update_copy # noqa
from ._collections import WeakPopulateDict # noqa
from ._collections import WeakSequence # noqa
from .compat import b # noqa
from .compat import b64decode # noqa
from .compat import b64encode # noqa
from .compat import binary_type # noqa
from .compat import byte_buffer # noqa
from .compat import callable # noqa
from .compat import cmp # noqa
from .compat import cpython # noqa
from .compat import decode_backslashreplace # noqa
from .compat import dottedgetter # noqa
from .compat import has_refcount_gc # noqa
from .compat import inspect_getfullargspec # noqa
from .compat import int_types # noqa
from .compat import iterbytes # noqa
from .compat import itertools_filter # noqa
from .compat import itertools_filterfalse # noqa
from .compat import namedtuple # noqa
from .compat import next # noqa
from .compat import parse_qsl # noqa
from .compat import pickle # noqa
from .compat import print_ # noqa
from .compat import py2k # noqa
from .compat import py36 # noqa
from .compat import py37 # noqa
from .compat import py3k # noqa
from .compat import quote_plus # noqa
from .compat import raise_ # noqa
from .compat import raise_from_cause # noqa
from .compat import reduce # noqa
from .compat import reraise # noqa
from .compat import string_types # noqa
from .compat import StringIO # noqa
from .compat import text_type # noqa
from .compat import threading # noqa
from .compat import timezone # noqa
from .compat import TYPE_CHECKING # noqa
from .compat import u # noqa
from .compat import ue # noqa
from .compat import unquote # noqa
from .compat import unquote_plus # noqa
from .compat import win32 # noqa
from .compat import with_metaclass # noqa
from .compat import zip_longest # noqa
from .deprecations import deprecated # noqa
from .deprecations import deprecated_20 # noqa
from .deprecations import deprecated_20_cls # noqa
from .deprecations import deprecated_cls # noqa
from .deprecations import deprecated_params # noqa
from .deprecations import inject_docstring_text # noqa
from .deprecations import warn_deprecated # noqa
from .deprecations import warn_deprecated_20 # noqa
from .langhelpers import add_parameter_text # noqa
from .langhelpers import as_interface # noqa
from .langhelpers import asbool # noqa
from .langhelpers import asint # noqa
from .langhelpers import assert_arg_type # noqa
from .langhelpers import attrsetter # noqa
from .langhelpers import bool_or_str # noqa
from .langhelpers import chop_traceback # noqa
from .langhelpers import class_hierarchy # noqa
from .langhelpers import classproperty # noqa
from .langhelpers import clsname_as_plain_name # noqa
from .langhelpers import coerce_kw_type # noqa
from .langhelpers import constructor_copy # noqa
from .langhelpers import constructor_key # noqa
from .langhelpers import counter # noqa
from .langhelpers import decode_slice # noqa
from .langhelpers import decorator # noqa
from .langhelpers import dictlike_iteritems # noqa
from .langhelpers import duck_type_collection # noqa
from .langhelpers import ellipses_string # noqa
from .langhelpers import EnsureKWArgType # noqa
from .langhelpers import format_argspec_init # noqa
from .langhelpers import format_argspec_plus # noqa
from .langhelpers import generic_repr # noqa
from .langhelpers import get_callable_argspec # noqa
from .langhelpers import get_cls_kwargs # noqa
from .langhelpers import get_func_kwargs # noqa
from .langhelpers import getargspec_init # noqa
from .langhelpers import HasMemoized # noqa
from .langhelpers import hybridmethod # noqa
from .langhelpers import hybridproperty # noqa
from .langhelpers import iterate_attributes # noqa
from .langhelpers import map_bits # noqa
from .langhelpers import md5_hex # noqa
from .langhelpers import memoized_instancemethod # noqa
from .langhelpers import memoized_property # noqa
from .langhelpers import MemoizedSlots # noqa
from .langhelpers import methods_equivalent # noqa
from .langhelpers import monkeypatch_proxied_specials # noqa
from .langhelpers import NoneType # noqa
from .langhelpers import only_once # noqa
from .langhelpers import PluginLoader # noqa
from .langhelpers import portable_instancemethod # noqa
from .langhelpers import preload_module # noqa
from .langhelpers import preloaded # noqa
from .langhelpers import quoted_token_parser # noqa
from .langhelpers import safe_reraise # noqa
from .langhelpers import set_creation_order # noqa
from .langhelpers import string_or_unprintable # noqa
from .langhelpers import symbol # noqa
from .langhelpers import unbound_method_to_callable # noqa
from .langhelpers import warn # noqa
from .langhelpers import warn_exception # noqa
from .langhelpers import warn_limited # noqa
from .langhelpers import wrap_callable # noqa
SQLALCHEMY_WARN_20 = False
|
[
"[email protected]"
] | |
a4939c1fd486001c5569097c8d0b69969c4afcca
|
06c54acbc3d93601182170eef1c8f69396644003
|
/glTools-master/tools/mirrorDeformerWeights.py
|
fd416031993b524c6ae37273571ed212844d52a9
|
[] |
no_license
|
moChen0607/pubTool
|
bfb05b7ba763c325b871a60d1a690bd67d6ad888
|
16337badb6d1b4266f31008ceb17cfd70fec3623
|
refs/heads/master
| 2021-05-31T17:59:06.840382 | 2016-06-06T07:11:42 | 2016-06-06T07:11:42 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 4,666 |
py
|
import maya.cmds as mc
import maya.OpenMaya as OpenMaya
import maya.OpenMayaAnim as OpenMayaAnim
import glTools.utils.mesh
import glTools.utils.deformer
import glTools.tools.symmetryTable
class UserInputError(Exception): pass
def mirrorWeights(mesh,deformer,axis='x',posToNeg=True,refMesh=''):
'''
Mirror deformer weights
@param mesh: Mesh to mirror weights on
@type mesh: str
@param deformer: Deformer to mirror weights for
@type deformer: str
@param axis: Axis to mirror weights across
@type axis: str
@param posToNeg: Apply weight mirror from positive to negative vertices
@type posToNeg: bool
@param refMesh: Mesh used for symmetry reference
@type refMesh: str
'''
# Check deformers
if not mc.objExists(deformer):
raise UserInputError('Deformer "'+deformer+'" does not exist!!')
# Check refMesh
if not refMesh: refMesh = mesh
# Get symmetry table
axisIndex = {'x':0,'y':1,'z':2}[axis]
sTable = glTools.tools.symmetryTable.SymmetryTable()
symTable = sTable.buildSymTable(refMesh,axisIndex)
# Get current weights
wt = glTools.utils.deformer.getWeights(deformer)
mem = glTools.utils.deformer.getDeformerSetMemberIndices(deformer,mesh)
# Mirror weights
for i in [sTable.negativeIndexList,sTable.positiveIndexList][int(posToNeg)]:
if mem.count(i) and mem.count(symTable[i]):
wt[mem.index(symTable[i])] = wt[mem.index(i)]
# Apply mirrored weights
glTools.utils.deformer.setWeights(deformer,wt,mesh)
def flipWeights(mesh,sourceDeformer,targetDeformer='',axis='x',refMesh=''):
'''
Flip deformer weights
@param mesh: Mesh to flip weights for
@type mesh: str
@param sourceDeformer: Deformer to query weights from
@type sourceDeformer: str
@param targetDeformer: Deformer to apply weights to
@type targetDeformer: str
@param axis: Axis to flip weights across
@type axis: str
@param refMesh: Mesh used for symmetry reference
@type refMesh: str
'''
# Check deformers
if not mc.objExists(sourceDeformer):
raise UserInputError('Source deformer '+sourceDeformer+' does not exist!!')
if targetDeformer and not mc.objExists(targetDeformer):
raise UserInputError('Traget deformer '+targetDeformer+' does not exist!!')
if not targetDeformer:
targetDeformer = sourceDeformer
# Check refMesh
if not refMesh: refMesh = mesh
# Get mesh shape
meshShape = mesh
if mc.objectType(meshShape) == 'transform':
meshShape = mc.listRelatives(mesh,s=True,ni=True)[0]
# Get symmetry table
axisIndex = {'x':0,'y':1,'z':2}[axis]
symTable = glTools.common.symmetryTable.SymmetryTable().buildSymTable(refMesh,axisIndex)
# Get current weights
wt = glTools.utils.deformer.getWeights(sourceDeformer,mesh)
sourceMem = glTools.utils.deformer.getDeformerSetMemberIndices(sourceDeformer,meshShape)
targetMem = glTools.utils.deformer.getDeformerSetMemberIndices(targetDeformer,meshShape)
targetWt = [0.0 for i in range(len(targetMem))]
# Mirror weights
for i in sourceMem:
if targetMem.count(symTable[i]):
try: targetWt[targetMem.index(symTable[i])] = wt[sourceMem.index(i)]
except:
print('Error @: '+str(symTable[i]))
pass
else:
print('Cant find sym index for '+str(i))
# Apply mirrored weights
glTools.utils.deformer.setWeights(targetDeformer,targetWt,mesh)
def copyWeights(sourceMesh,targetMesh,sourceDeformer,targetDeformer):
'''
Copy deformer weights from one mesh to another.
Source and Target mesh objects must have matching point order!
@param sourceMesh: Mesh to copy weights from
@type sourceMesh: str
@param targetMesh: Mesh to copy weights to
@type targetMesh: str
@param sourceDeformer: Deformer to query weights from
@type sourceDeformer: str
@param targetDeformer: Deformer to apply weights to
@type targetDeformer: str
'''
# Check source and target mesh
if not mc.objExists(sourceMesh):
raise UserInputError('Source mesh "'+sourceMesh+'" does not exist!!')
if not mc.objExists(targetMesh):
raise UserInputError('Target mesh "'+targetMesh+'" does not exist!!')
# Check deformers
if not mc.objExists(sourceDeformer):
raise UserInputError('Source deformer "'+sourceDeformer+'" does not exist!!')
if targetDeformer and not mc.objExists(targetDeformer):
raise UserInputError('Target deformer "'+targetDeformer+'" does not exist!!')
if not targetDeformer: targetDeformer = sourceDeformer
# Compare vertex count
if mc.polyEvaluate(sourceMesh,v=True) != mc.polyEvaluate(targetMesh,v=True):
raise UserInputError('Source and Target mesh vertex counts do not match!!')
# Copy weights
wtList = glTools.utils.deformer.getWeights(sourceDeformer,sourceMesh)
# Paste weights
glTools.utils.deformer.setWeights(targetDeformer,wtList,targetMesh)
|
[
"[email protected]"
] | |
abc855529ce069a7208dd306d3988daf851774db
|
2cfeb115b0ea14c52c3bf99abb53e935fa3d01b7
|
/examples/vanilla/settings_quickstart.py
|
a8a2eec01f21006986c5b8f512f375b6eaf87a00
|
[
"BSD-2-Clause"
] |
permissive
|
aykut/django-oscar
|
796fbc2f62d3dd7877833610f7bead2b006b9739
|
ca3629e74ea1e0affc55d3de4e97f523e352d267
|
refs/heads/master
| 2021-01-22T07:27:59.359441 | 2011-06-30T19:36:01 | 2011-06-30T19:36:01 | 14,263,668 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 6,527 |
py
|
import os
PROJECT_DIR = os.path.dirname(__file__)
location = lambda x: os.path.join(os.path.dirname(os.path.realpath(__file__)), x)
DEBUG = True
TEMPLATE_DEBUG = True
SQL_DEBUG = True
ADMINS = (
# ('Your Name', '[email protected]'),
)
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': '/tmp/oscar_vanilla',
'USER': '',
'PASSWORD': '',
'HOST': '',
'PORT': '',
}
}
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# On Unix systems, a value of None will cause Django to use the same
# timezone as the operating system.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'Europe/London'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale
USE_L10N = True
# Absolute path to the directory that holds media.
# Example: "/home/media/media.lawrence.com/"
MEDIA_ROOT = location("assets")
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash if there is a path component (optional in other cases).
# Examples: "http://media.lawrence.com", "http://example.com/media/"
MEDIA_URL = '/media/'
# URL prefix for admin media -- CSS, JavaScript and images. Make sure to use a
# trailing slash.
# Examples: "http://foo.com/media/", "/media/".
ADMIN_MEDIA_PREFIX = '/media/admin/'
# Make this unique, and don't share it with anybody.
SECRET_KEY = '$)a7n&o80u!6y5t-+jrd3)3!%vh&shg$wqpjpxc!ar&p#!)n1a'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
)
TEMPLATE_CONTEXT_PROCESSORS = (
"django.contrib.auth.context_processors.auth",
"django.core.context_processors.request",
"django.core.context_processors.debug",
"django.core.context_processors.i18n",
"django.core.context_processors.media",
"django.core.context_processors.static",
"django.contrib.messages.context_processors.messages",
# Oscar specific
'oscar.apps.search.context_processors.search_form',
'oscar.apps.promotions.context_processors.promotions',
'oscar.apps.promotions.context_processors.merchandising_blocks',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.transaction.TransactionMiddleware',
'django.contrib.flatpages.middleware.FlatpageFallbackMiddleware',
# Oscar specific
'oscar.apps.basket.middleware.BasketMiddleware'
)
INTERNAL_IPS = ('127.0.0.1',)
ROOT_URLCONF = 'urls'
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'verbose': {
'format': '%(levelname)s %(asctime)s %(module)s %(process)d %(thread)d %(message)s'
},
'simple': {
'format': '%(levelname)s %(message)s'
},
},
'handlers': {
'null': {
'level':'DEBUG',
'class':'django.utils.log.NullHandler',
},
'console':{
'level':'DEBUG',
'class':'logging.StreamHandler',
'formatter': 'verbose'
},
'file': {
'level': 'INFO',
'class': 'logging.FileHandler',
'filename': '/tmp/oscar.log',
'formatter': 'verbose'
},
'mail_admins': {
'level': 'ERROR',
'class': 'django.utils.log.AdminEmailHandler',
},
},
'loggers': {
'django': {
'handlers':['null'],
'propagate': True,
'level':'INFO',
},
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': False,
},
'oscar.checkout': {
'handlers': ['console'],
'propagate': True,
'level':'INFO',
},
'django.db.backends': {
'handlers':['null'],
'propagate': False,
'level':'DEBUG',
},
}
}
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.admin',
'django.contrib.flatpages',
# External dependencies
'haystack',
'sorl.thumbnail',
# Apps from oscar
'oscar',
'oscar.apps.analytics',
'oscar.apps.discount',
'oscar.apps.order',
'oscar.apps.checkout',
'oscar.apps.shipping',
'oscar.apps.order_management',
'oscar.apps.product',
'oscar.apps.basket',
'oscar.apps.payment',
'oscar.apps.offer',
'oscar.apps.address',
'oscar.apps.partner',
'oscar.apps.image',
'oscar.apps.customer',
'oscar.apps.promotions',
'oscar.apps.reports',
'oscar.apps.search',
'oscar.apps.product.reviews',
'oscar.apps.payment.datacash',
)
LOGIN_REDIRECT_URL = '/accounts/profile/'
APPEND_SLASH = True
# Oscar settings
from oscar.defaults import *
# Haystack settings
HAYSTACK_SITECONF = 'oscar.search_sites'
HAYSTACK_SEARCH_ENGINE = 'dummy'
|
[
"[email protected]"
] | |
4ca1a63aba6d81d8131ea2d1874236b45ee14bb9
|
283f85409e4aa92444fc865c802d2babd8629f88
|
/app/errors/__init__.py
|
fab5ff4aceb9732c788992026b7de33c99e5c66b
|
[
"MIT"
] |
permissive
|
tomwright01/EyeReport
|
df52a77b3cc6396ba51721421cc5616649286c8b
|
ab227190e7efe9af18125d175efd271ee11dbff4
|
refs/heads/master
| 2021-05-16T04:30:05.374448 | 2019-08-08T15:41:15 | 2019-08-08T15:41:15 | 106,033,903 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 172 |
py
|
# -*- coding: utf-8 -*-
from flask import Blueprint
bp = Blueprint('errors', __name__,
template_folder='templates')
from app.errors import handlers
|
[
"[email protected]"
] | |
81df5fb4cda7e31f1ab5cd5b884be42f24cade5e
|
137ded4225a84d1f5f46099ef6e5545b26cc5fb2
|
/Configuration/GenProduction/python/Pythia8_TuneCP5_5TeV_D0_PiK_prompt_pt1p2_y2p4_cfi.py
|
6281438c1a2a5fb8c8501629827135ab0b1fc8e0
|
[] |
no_license
|
davidlw/2017FlowMCRequest
|
8a27f04d5a70c3f34d003d6ea25888a691e73bb6
|
c9cd086db18ec3a661482cc457a1fdb5949d3b88
|
refs/heads/master
| 2022-08-28T21:42:32.093605 | 2022-08-02T18:00:06 | 2022-08-02T18:00:06 | 148,789,077 | 0 | 2 | null | 2021-01-06T21:45:03 | 2018-09-14T13:01:38 |
Python
|
UTF-8
|
Python
| false | false | 2,674 |
py
|
import FWCore.ParameterSet.Config as cms
from Configuration.Generator.Pythia8CommonSettings_cfi import *
from Configuration.Generator.MCTunes2017.PythiaCP5Settings_cfi import *
from GeneratorInterface.EvtGenInterface.EvtGenSetting_cff import *
generator = cms.EDFilter("Pythia8GeneratorFilter",
pythiaPylistVerbosity = cms.untracked.int32(0),
pythiaHepMCVerbosity = cms.untracked.bool(False),
comEnergy = cms.double(5020.0),
maxEventsToPrint = cms.untracked.int32(0),
ExternalDecays = cms.PSet(
EvtGen130 = cms.untracked.PSet(
decay_table = cms.string('GeneratorInterface/EvtGenInterface/data/DECAY_2010.DEC'),
operates_on_particles = cms.vint32(),
particle_property_file = cms.FileInPath('GeneratorInterface/EvtGenInterface/data/evt.pdl'),
user_decay_file = cms.vstring('GeneratorInterface/ExternalDecays/data/D0_Kpi.dec'),
list_forced_decays = cms.vstring('myD0', 'myanti-D0')
),
parameterSets = cms.vstring('EvtGen130')
),
PythiaParameters = cms.PSet(
pythia8CommonSettingsBlock,
pythia8CP5SettingsBlock,
processParameters = cms.vstring(
'HardQCD:all = on',
'PhaseSpace:pTHatMin = 0.', #min pthat
),
parameterSets = cms.vstring(
'pythia8CommonSettings',
'pythia8CP5Settings',
'processParameters',
)
)
)
generator.PythiaParameters.processParameters.extend(EvtGenExtraParticles)
partonfilter = cms.EDFilter("PythiaFilter",
ParticleID = cms.untracked.int32(4) # 4 for c and 5 for b quark
)
D0Daufilter = cms.EDFilter("PythiaMomDauFilter",
ParticleID = cms.untracked.int32(421),
MomMinPt = cms.untracked.double(0.0),
MomMinEta = cms.untracked.double(-10.0),
MomMaxEta = cms.untracked.double(10.0),
DaughterIDs = cms.untracked.vint32(211, -321),
NumberDaughters = cms.untracked.int32(2),
NumberDescendants = cms.untracked.int32(0),
BetaBoost = cms.untracked.double(0.0),
)
D0rapidityfilter = cms.EDFilter("PythiaFilter",
ParticleID = cms.untracked.int32(421),
MinPt = cms.untracked.double(1.2),
MaxPt = cms.untracked.double(1000.),
MinRapidity = cms.untracked.double(-2.5),
MaxRapidity = cms.untracked.double(2.5),
)
ProductionFilterSequence = cms.Sequence(generator*partonfilter*D0Daufilter*D0rapidityfilter)
|
[
"[email protected]"
] | |
b916f740e286b9f3ef5c7acddf84b90d8541aa80
|
452f3354c04f887103d0c7c8b4a07dd29a72eed7
|
/A2/app/form.py
|
3d2b4af3bc9bb7c09fc3646a81f113d6fb7cda66
|
[] |
no_license
|
wmuf/ECE1779_Cloud_Computing
|
2d8b4420a26ea6169a5ad8ea13f8dd7997190f71
|
1e385a0a54d4bd8b0c3689ccb4e4064f02efb670
|
refs/heads/master
| 2023-07-24T16:40:45.875193 | 2021-04-20T02:41:54 | 2021-04-20T02:41:54 | 404,382,723 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 950 |
py
|
from flask_wtf import FlaskForm
from wtforms import FloatField, IntegerField, SubmitField, validators
class AutoScalarForm(FlaskForm):
cpu_threshold_grow = IntegerField('Cpu_Threshold_Grow', [validators.optional(), validators.NumberRange(min=0, max=100, message="Please specify range from 0 to 100")], filters=[lambda x: x or None])
cpu_threshold_shrink = IntegerField('Cpu_Threshold_Shrink', [validators.optional(), validators.NumberRange(min=0, max=100, message="Please specify range from 0 to 100")], filters=[lambda x: x or None])
expand_ratio = IntegerField('Expand_Ratio', [validators.optional(), validators.NumberRange(min=1, max=8, message="Please specify range from 1 to 8")], filters=[lambda x: x or None])
shrink_ratio = FloatField('Shrink_Ratio', [validators.optional(), validators.NumberRange(min=0, max=1, message="Please specify range from 0 to 1")], filters=[lambda x: x or None])
submit = SubmitField('Submit')
|
[
"[email protected]"
] | |
279d8933e8c2057be7901387644e7ccbc5494a53
|
a39ecd4dce4b14f5d17416233fa16c76d2d3f165
|
/RepositoryBootstrap/Impl/Utilities.py
|
305320a07ef4e303ff131cad0be6735155fe6662
|
[
"BSL-1.0",
"Python-2.0",
"OpenSSL",
"LicenseRef-scancode-unknown-license-reference",
"GPL-2.0-only"
] |
permissive
|
davidbrownell/Common_Environment_v3
|
8e6bbed15004a38a4c6e6f337d78eb2339484d64
|
2981ad1566e6d3c00fd390a67dbc1277ef40aaba
|
refs/heads/master
| 2022-09-03T19:04:57.270890 | 2022-06-28T01:33:31 | 2022-06-28T01:33:31 | 132,171,665 | 0 | 0 |
BSL-1.0
| 2021-08-13T21:19:48 | 2018-05-04T17:47:30 |
Python
|
UTF-8
|
Python
| false | false | 4,909 |
py
|
# ----------------------------------------------------------------------
# |
# | Utilities.py
# |
# | David Brownell <[email protected]>
# | 2018-05-02 15:57:42
# |
# ----------------------------------------------------------------------
# |
# | Copyright David Brownell 2018-22.
# | Distributed under the Boost Software License, Version 1.0.
# | (See accompanying file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
# |
# ----------------------------------------------------------------------
"""Utilities used by multiple files within this module."""
import hashlib
import importlib
import os
import re
import sys
from contextlib import contextmanager
import six
from RepositoryBootstrap import Constants
from RepositoryBootstrap.Impl import CommonEnvironmentImports
# ----------------------------------------------------------------------
_script_fullpath = CommonEnvironmentImports.CommonEnvironment.ThisFullpath()
_script_dir, _script_name = os.path.split(_script_fullpath)
# ----------------------------------------------------------------------
# ----------------------------------------------------------------------
def GenerateCommands( functor, # def Func() -> []
is_debug,
):
"""
Generates shell-specific commands as returned by the provided functor.
Returns:
(result, generated_commands)
"""
assert functor
commands = []
try:
result = functor()
if isinstance(result, int):
commands = []
elif isinstance(result, tuple):
result, commands = result
else:
commands = result
result = 0
except Exception as ex:
if is_debug:
import traceback
error = traceback.format_exc()
else:
error = str(ex)
commands = [ CommonEnvironmentImports.CurrentShell.Commands.Message("\n\nERROR: {}".format(CommonEnvironmentImports.StringHelpers.LeftJustify(error, len("ERROR: ")))),
CommonEnvironmentImports.CurrentShell.Commands.Exit(return_code=-1),
]
result = -1
if is_debug and commands:
commands = [ CommonEnvironmentImports.CurrentShell.Commands.Message("{}\n".format(CommonEnvironmentImports.StringHelpers.Prepend( "Debug: ",
CommonEnvironmentImports.CurrentShell.GenerateCommands(commands),
skip_first_line=False,
))),
] + commands
return result, commands
# ----------------------------------------------------------------------
def CalculateFingerprint(repo_dirs, relative_root=None):
"""
Returns a value that can be used to determine if any configuration info
has changed for a repo and its dependencies.
"""
results = {}
for repo_dir in repo_dirs:
md5 = hashlib.md5()
filename = os.path.join(repo_dir, Constants.SETUP_ENVIRONMENT_CUSTOMIZATION_FILENAME)
if not os.path.isfile(filename):
continue
with open(filename, 'rb') as f:
# Skip the file header, as it has no impact on the file's actual contents.
in_file_header = True
for line in f:
if in_file_header and line.lstrip().startswith(b'#'):
continue
in_file_header = False
md5.update(line)
if relative_root:
repo_dir = CommonEnvironmentImports.FileSystem.GetRelativePath(relative_root, repo_dir)
results[repo_dir] = md5.hexdigest()
return results
# ----------------------------------------------------------------------
@contextmanager
def CustomMethodManager(customization_filename, method_name):
"""Attempts to load a customization filename and extract the given method."""
if not os.path.isfile(customization_filename):
yield None
return
customization_path, customization_name = os.path.split(customization_filename)
customization_name = os.path.splitext(customization_name)[0]
sys.path.insert(0, customization_path)
with CommonEnvironmentImports.CallOnExit(lambda: sys.path.pop(0)):
mod = importlib.import_module(customization_name)
with CommonEnvironmentImports.CallOnExit(lambda: sys.modules.pop(customization_name)):
yield getattr(mod, method_name, None)
|
[
"[email protected]"
] | |
6512eaa1731b6c91c774540047b19a5886180e3b
|
080c13cd91a073457bd9eddc2a3d13fc2e0e56ae
|
/GIT-USERS/TOM2/CS32_Architecture_GP/day4/simple.py
|
b368bec2f1d9590d966617b2ce072a8e347ffd3e
|
[] |
no_license
|
Portfolio-Projects42/UsefulResourceRepo2.0
|
1dccc8961a09347f124d3ed7c27c6d73b9806189
|
75b1e23c757845b5f1894ebe53551a1cf759c6a3
|
refs/heads/master
| 2023-08-04T12:23:48.862451 | 2021-09-15T12:51:35 | 2021-09-15T12:51:35 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 4,238 |
py
|
# Let's build a data driven machine!
import sys
# What do we need to have our machine working?
"""
- Some sort of memory
- Some way of stopping operation
- Some way of keeping the CPU running
- Some sort of storage for local variables seperate from main RAM (memory) eg; Registers
- Some sort of operations that can be performed such as (printing something, saving data to a variable[register] )
- Some FETCH, DECODE, EXECUTE CYCLE
"""
# Operations that we can perform
HALT = 0b00000001
PRINT_VLAD = 2
PRINT_NUM = 3
SAVE = 0b10000010
PRINT_REG = 5
ADD = 6
# PUSH and POP
PUSH = 7
POP = 8
# TODO: CALL and RET
CALL = 0b01001001
RET = 0b00001010
SUB = 23
PRN = 0b01000111
SHL = 0b10101100
SHR = 0b10101101
# some sort of memory (lets refactor this to load in opcodes from a file)
def load_memory(filename):
# TODO do some logic here
try:
address = 0
with open(filename) as f:
for line in f:
comment_split = line.split("#")
n = comment_split[0].strip()
if n == '':
continue
val = int(n, 2)
# store val in memory
memory[address] = val
address += 1
print(f"{val:08b}: {val:d}")
except FileNotFoundError:
print(f"{sys.argv[0]}: {filename} not found")
sys.exit(2)
memory = [0] * 256
# keep track of running?
running = True
# some sort of counter
pc = 0
# Some local var holders [registers]
registers = [0] * 10
# Stack Pointer (R7) as per specs
# index of the registers list
# SP
SP = 7
# to use to store where the top of the stack is
# 0xF4 (244)
registers[SP] = 244
# size of opcode
op_size = 1
# grab any args
if len(sys.argv) != 2:
print("usage: simple.py filename")
sys.exit(1)
# load opcodes in to memory
load_memory(sys.argv[1])
# REPL to run once per cycle of CPU
# inside this we will have our FETCH, DECODE, EXECUTE CYCLE
while running:
# FETCH
cmd = memory[pc]
op_size = ((cmd >> 6) & 0b11) + 1
# DECODE
if cmd == PRINT_VLAD:
# EXECUTE
print("Vlad")
elif cmd == HALT:
running = False
elif cmd == PRINT_NUM:
num = memory[pc + 1]
print(num)
elif cmd == PRINT_REG:
index_of_reg = memory[pc + 1]
num_at_reg = registers[index_of_reg]
print(num_at_reg)
elif cmd == SAVE:
num_to_save = memory[pc + 1] # 300
reg_index = memory[pc + 2]
registers[reg_index] = num_to_save
elif cmd == ADD:
reg_index_a = memory[pc + 1]
reg_index_b = memory[pc + 2]
registers[reg_index_a] += registers[reg_index_b]
elif cmd == SUB:
reg_index_a = memory[pc + 1]
reg_index_b = memory[pc + 2]
registers[reg_index_a] -= registers[reg_index_b]
elif cmd == SHL:
reg_index_a = memory[pc + 1]
reg_index_b = memory[pc + 2]
registers[reg_index_a] <<= registers[reg_index_b]
elif cmd == SHR:
reg_index_a = memory[pc + 1]
reg_index_b = memory[pc + 2]
registers[reg_index_a] >>= registers[reg_index_b]
# PUSH
elif cmd == PUSH:
# setup
reg_index = memory[pc + 1]
val = registers[reg_index]
# decrememt Stack Pointer
registers[SP] -= 1
# insert val on to the stack
memory[registers[SP]] = val
# POP
elif cmd == POP:
# setup
reg_index = memory[pc + 1]
val = memory[registers[SP]]
# take value from stack and put it in reg
registers[reg_index] = val
# increment Stack Pointer
registers[SP] += 1
# CALL
elif cmd == CALL:
# push the return address on to the stack
registers[SP] -= 1
memory[registers[SP]] = pc + 2
# Set the PC to the subroutines address
reg = memory[pc + 1]
pc = registers[reg]
op_size = 0
# RET
elif cmd == RET:
# POP return address from stack to store in pc
pc = memory[registers[SP]]
registers[SP] += 1
op_size = 0
else:
print(f"Invalid Instruction: {cmd}")
running = False
pc += op_size
|
[
"[email protected]"
] | |
0dd564c9ec118b6ab6323eccabc8304d63041320
|
0f481498bba97a7bb9f38bc2b9a1dc5b9ebf50a5
|
/Pacote-download/Exercicios/ex045.py
|
d818f1b6795a40fce1325086d8ba0bb24fd50a3f
|
[
"MIT"
] |
permissive
|
agnaka/CEV-Python-Exercicios
|
d7e8efd6426d60d6920ba3cfddbd049a80e7d6da
|
a4299abd5da283b1b15ed2436965db162f42885f
|
refs/heads/master
| 2022-10-23T11:45:56.298286 | 2020-06-10T21:13:15 | 2020-06-10T21:13:15 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,486 |
py
|
from random import randint
print('-=' * 20)
print('\033[1;34mVAMOS A JOGAR JANKENPÔ!!!\033[m')
print('-=' * 20)
print('''SUAS OPÇÕES:
[1] Pedra
[2] Papel
[3] Tesoura''')
choice = int(input('Qual a sua escolha? '))
print('JAN')
print('KEN')
print('PO!!!')
itens = ('Pedra', 'Papel', 'Tesoura')
compu = randint(1, 3)
# print(compu)
if compu == 1 and choice == 1:
print('O computador escolheu {} e você {} - EMPATOU! Jogar novamente'.format('PEDRA', 'PEDRA'))
elif compu == 1 and choice == 2:
print('O computador escolheu {} e você {} - VOCÊ GANHOU!!!'.format('PEDRA', 'PAPEL'))
elif compu == 1 and choice == 3:
print('O computador escolheu {} e você {} - VOCÊ PERDEU'.format('PEDRA', 'TESOURA'))
elif compu == 2 and choice == 2:
print('O computador escolheu {} e você {} - EMPATOU! Jogar novamente'.format('PAPEL', 'PAPEL'))
elif compu == 2 and choice == 1:
print('O computador escolheu {} e você {} - VOCÊ PERDEU'.format('PAPEL', 'PEDRA'))
elif compu == 2 and choice == 3:
print('O computador escolheu {} e você {} - VOCÊ GANHOU!!!'.format('PAPEL', 'TESOURA'))
elif compu == 3 and choice == 3:
print('O computador escolheu {} e você {} - EMPATOU! Jogar novamente'.format('TESOURA', 'TESOURA'))
elif compu == 3 and choice == 1:
print('O computador escolheu {} e você {} - VOCÊ GANHOU!!!'.format('TESOURA', 'PEDRA'))
elif compu == 3 and choice == 2:
print('O computador escolheu {} e você {} - VOCÊ PERDEU'.format('TESOURA', 'PAPEL'))
|
[
"[email protected]"
] | |
cf50250d8ef3adadc370a28b4e97588d22adf4a9
|
8898273f9811fab29eb5621734bafcdf204d8229
|
/scipy-stubs/special/_precompute/expn_asy.pyi
|
61ecaf6d73b6d529e4f36b9d6019a65c5721a799
|
[] |
no_license
|
tyrion/scipy-stubs
|
628ad6321a7e1502683a2b55a759777508ab4b67
|
bf49a91313523c4f635bc3e5d14444c1361caf64
|
refs/heads/master
| 2020-05-30T21:59:43.001510 | 2019-06-03T10:30:54 | 2019-06-03T10:30:54 | 189,984,340 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 238 |
pyi
|
# Stubs for scipy.special._precompute.expn_asy (Python 3.6)
#
# NOTE: This dynamically typed stub was automatically generated by stubgen.
from typing import Any
x: Any
def generate_A(K: Any): ...
WARNING: str
def main() -> None: ...
|
[
"[email protected]"
] | |
7557b31d5f98ea2c2c7f9df591d067658163f0a1
|
3035e6a2b4e5b5662670c188785ed9fad0e1a315
|
/Chapter07/example/python/permissions/can_get_all_acc_txs.py
|
18c41042f78d6a276b339039ec7df00cbc8a5bdd
|
[
"MIT"
] |
permissive
|
mahen92/Hyperledger-Cookbook
|
52491da47ea7e4b3d988b1303ad4641d89bd3c0e
|
c2aaf9f9fd58757110a2a6b3ab7498da11fba254
|
refs/heads/master
| 2021-01-09T15:36:10.368893 | 2020-04-10T18:17:41 | 2020-04-10T18:17:41 | 242,358,174 | 0 | 0 |
MIT
| 2020-02-22T14:46:54 | 2020-02-22T14:46:53 | null |
UTF-8
|
Python
| false | false | 1,308 |
py
|
#
# Copyright Soramitsu Co., Ltd. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
#
import iroha
import commons
admin = commons.new_user('admin@first')
alice = commons.new_user('alice@second')
@commons.hex
def genesis_tx():
test_permissions = iroha.RolePermissionSet([iroha.Role_kGetAllAccTxs])
tx = iroha.ModelTransactionBuilder() \
.createdTime(commons.now()) \
.creatorAccountId(admin['id']) \
.addPeer('0.0.0.0:50541', admin['key'].publicKey()) \
.createRole('admin_role', commons.all_permissions()) \
.createRole('test_role', test_permissions) \
.createDomain('first', 'admin_role') \
.createDomain('second', 'test_role') \
.createAccount('admin', 'first', admin['key'].publicKey()) \
.createAccount('alice', 'second', alice['key'].publicKey()) \
.build()
return iroha.ModelProtoTransaction(tx) \
.signAndAddSignature(admin['key']).finish()
@commons.hex
def account_transactions_query():
tx = iroha.ModelQueryBuilder() \
.createdTime(commons.now()) \
.queryCounter(1) \
.creatorAccountId(alice['id']) \
.getAccountTransactions(admin['id']) \
.build()
return iroha.ModelProtoQuery(tx) \
.signAndAddSignature(alice['key']).finish()
|
[
"[email protected]"
] | |
ef464d2028beaa30b26f3bd7318554f2e18e9109
|
7142c3941481e661075154d714a29d5e283a3074
|
/Decorator1.py
|
d4a71bacf012ffb8e07545dfa66863b19ccd5332
|
[] |
no_license
|
nirajan5/Demo
|
5642a9669fedcca47b0304ac423c0b3e6333b8e2
|
2451875bf5698cd38af69baa117c14099951bc9f
|
refs/heads/master
| 2023-07-27T17:04:03.689673 | 2021-09-15T11:14:25 | 2021-09-15T11:14:25 | 406,732,005 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 237 |
py
|
def make_pretty(func):
def inner():
print("I got decorated")
func()
return inner
def simple():
print("I am simple")
simple()
# let's decorate this ordinary function
pretty = make_pretty(simple)
pretty()
|
[
"[email protected]"
] | |
1f26110f249939ecb0f0260b32cca636fdea2aee
|
0c534f461e9c1e8b9ef442c1bac1d7a1dea851b1
|
/new_plotter.py
|
0dab04a15268c6cbe758b8500943bf32a14cc5ad
|
[] |
no_license
|
paulgowdy/nle
|
bb77e07a02e319775266091e34ad6f669d1034cd
|
27c62f443b7ff6fcd3822596b86152ef2f320804
|
refs/heads/main
| 2023-08-03T16:44:00.607002 | 2021-09-03T04:33:12 | 2021-09-03T04:33:12 | 390,802,676 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,721 |
py
|
import matplotlib.pyplot as plt
import pandas as pd
runs = [
#'2021-08-05/09-53-24',
'2021-08-08/17-38-17',
'2021-08-09/17-54-30',
]
colors = ['navy','darkred','green','navy','navy','red','aqua','cyan','red','red','darkred']
prefix = "//wsl$/Ubuntu-20.04/home/paulgamble/neurips-2021-the-nethack-challenge/nethack_baselines/torchbeast/outputs/"
#prefix = "//wsl$/Ubuntu-20.04/home/paulgamble/hackbot_transformer/nethack_baselines/torchbeast/outputs/"
suffix = "/logs.csv"
roll_window = 100
plt.figure()
ax = plt.gca()
for r, c in zip(runs, colors):
log_fn = prefix + r + suffix
df = pd.read_csv(log_fn)
df['rolling_score'] = df['mean_episode_return'].rolling(roll_window).mean()
#df['score_std_low'] = df['rolling_score'] - df['mean_episode_return'].rolling(roll_window).std()
#df['score_std_high'] = df['rolling_score'] + df['mean_episode_return'].rolling(roll_window).std()
#ax.fill_between(df['step'], df['score_std_low'], df['score_std_high'], color=c, alpha=0.3)
df.plot(x='step',y='rolling_score',ax=ax, color=c)
labels = [x.split('/')[-1] for x in runs]
plt.legend(labels)
plt.title("Mean Episode Score")
#plt.ylim(-200,0)
plt.figure()
ax = plt.gca()
for r, c in zip(runs, colors):
log_fn = prefix + r + suffix
df = pd.read_csv(log_fn)
df['rolling_score'] = df['mean_episode_step'].rolling(roll_window).mean()
#df['rolling_score'] = df['mean_episode_return'].rolling(roll_window).mean()
#df['rolling_score'].plot(x='step')
#df['mean_episode_return'].plot()
df.plot(x='step',y='rolling_score',ax=ax, color=c)
plt.legend(runs)
#plt.ylim(-200,0)
plt.title("Mean Episode Steps")
plt.show()
|
[
"[email protected]"
] | |
64ff0b3da04db2adfecb58e8771034b3ad7b2520
|
859093a06bb7b8ff2c00f21d4d3052b9d6b3a580
|
/schedule/widgets.py
|
b7aa89545511619cbebd2f923c9a003ca96d629d
|
[
"MIT"
] |
permissive
|
fitahol/fitahol
|
bbf71b695fbacad2d3a1f99a034c041ea6069529
|
ce84dc909aa98f2dc7594ef26568e015cbfe0e94
|
refs/heads/master
| 2021-01-19T20:18:11.677674 | 2017-02-20T14:05:39 | 2017-02-20T14:05:39 | 82,561,065 | 2 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,540 |
py
|
from __future__ import unicode_literals
from django.forms.widgets import TextInput
from django.utils.safestring import mark_safe
class SpectrumColorPicker(TextInput):
"""
Based on Brian Grinstead's Spectrum - http://bgrins.github.com/spectrum/
"""
class Media:
css = {'all': ("//cdnjs.cloudflare.com/ajax/libs/spectrum/1.7.1/spectrum.css",)}
js = ('//cdnjs.cloudflare.com/ajax/libs/jquery/1.8.3/jquery.min.js',
'//cdnjs.cloudflare.com/ajax/libs/spectrum/1.7.1/spectrum.js',)
def _render_js(self, _id, value):
js = u"""
<script type="text/javascript">
$(document).ready(function(){
$('#%s').spectrum({
color: "",
allowEmpty: true,
showAlpha: true,
showInput: true,
className: "full-spectrum",
showInitial: true,
showPalette: true,
showSelectionPalette: true,
maxSelectionSize: 10,
preferredFormat: "hex",
localStorageKey: "spectrum.demo",
palette: [
["rgb(0, 0, 0)", "rgb(67, 67, 67)", "rgb(102, 102, 102)",
"rgb(204, 204, 204)", "rgb(217, 217, 217)","rgb(255, 255, 255)"],
["rgb(152, 0, 0)", "rgb(255, 0, 0)", "rgb(255, 153, 0)", "rgb(255, 255, 0)", "rgb(0, 255, 0)",
"rgb(0, 255, 255)", "rgb(74, 134, 232)", "rgb(0, 0, 255)", "rgb(153, 0, 255)", "rgb(255, 0, 255)"],
["rgb(230, 184, 175)", "rgb(244, 204, 204)", "rgb(252, 229, 205)", "rgb(255, 242, 204)", "rgb(217, 234, 211)",
"rgb(208, 224, 227)", "rgb(201, 218, 248)", "rgb(207, 226, 243)", "rgb(217, 210, 233)", "rgb(234, 209, 220)",
"rgb(221, 126, 107)", "rgb(234, 153, 153)", "rgb(249, 203, 156)", "rgb(255, 229, 153)", "rgb(182, 215, 168)",
"rgb(162, 196, 201)", "rgb(164, 194, 244)", "rgb(159, 197, 232)", "rgb(180, 167, 214)", "rgb(213, 166, 189)",
"rgb(204, 65, 37)", "rgb(224, 102, 102)", "rgb(246, 178, 107)", "rgb(255, 217, 102)", "rgb(147, 196, 125)",
"rgb(118, 165, 175)", "rgb(109, 158, 235)", "rgb(111, 168, 220)", "rgb(142, 124, 195)", "rgb(194, 123, 160)",
"rgb(166, 28, 0)", "rgb(204, 0, 0)", "rgb(230, 145, 56)", "rgb(241, 194, 50)", "rgb(106, 168, 79)",
"rgb(69, 129, 142)", "rgb(60, 120, 216)", "rgb(61, 133, 198)", "rgb(103, 78, 167)", "rgb(166, 77, 121)",
"rgb(91, 15, 0)", "rgb(102, 0, 0)", "rgb(120, 63, 4)", "rgb(127, 96, 0)", "rgb(39, 78, 19)",
"rgb(12, 52, 61)", "rgb(28, 69, 135)", "rgb(7, 55, 99)", "rgb(32, 18, 77)", "rgb(76, 17, 48)"]
]
});
});
</script>""" % (_id)
return js
def render(self, name, value, attrs=None):
if 'id' not in attrs:
attrs['id'] = "id_%s" % name
rendered = super(SpectrumColorPicker, self).render(name, value, attrs)
return mark_safe(rendered + self._render_js(attrs['id'], value))
|
[
"[email protected]"
] | |
2ddcf7148c7696de359ace2ede7a271758df3cfc
|
2118f244be2e09508e3c89dee432d4a75343b430
|
/Twitter Projects/twitter_sentiment_basic_with_function_RJ_Keys.py
|
981ff629e356b9bde7b1e8186617e488aaf965f0
|
[] |
no_license
|
RamiJaloudi/Python-Scripts
|
91d139093a95f9498a77b1df8ec2f790c4f4dd4c
|
37e740a618ae543a02c38dc04a32ef95202ff613
|
refs/heads/master
| 2020-04-29T14:55:41.108332 | 2019-03-18T05:42:06 | 2019-03-18T05:42:06 | 176,212,014 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,027 |
py
|
from tweepy import Stream
from tweepy import OAuthHandler
from tweepy.streaming import StreamListener
#consumer key, consumer secret, access token, access secret.
ckey="r2I3FdcFB3WRKpKoxhpb9pkra"
csecret="Snt0LzxPyKIUQphTQmbsf0DKPALKPfCAy4Jjr3g9O3A93AGdHM"
atoken="18894514-JsJsbjRkWF4jgA7nrMyNYfLR3RccNSUlTzrYO5shJ"
asecret="BhFpvR3ZJe46wmA3sEUJ1eStz8y83WtgIlw91jJBU01z6"
##def sentimentAnalysis(text):
## encoded_text = urllib.quote(text)
class listener(StreamListener):
def on_data(self, data):
print(data)
#return(True)
## tweet = data.split(',"text:"')[1].split('","source')[0]
##
## saveMe = tweet+'::'+sentimentRating+'\n'
## output = open('output.txt','a')
## outpute.write(saveMe)
## output.close()
## return True
def on_error(self, status):
print (status)
auth = OAuthHandler(ckey, csecret)
auth.set_access_token(atoken, asecret)
twitterStream = Stream(auth, listener())
twitterStream.filter(track=["#target"])
|
[
"[email protected]"
] | |
65398257cd8f44323e9a0e99c7ed1824e8f632ba
|
974c5a4f101d0e6f4dfa5fc2f7c641c9d2bd8184
|
/sdk/resources/azure-mgmt-resource/azure/mgmt/resource/policy/v2019_09_01/aio/_configuration.py
|
38ab1c393bb97968a488f5f477a42303c3b73493
|
[
"MIT",
"LicenseRef-scancode-generic-cla",
"LGPL-2.1-or-later"
] |
permissive
|
gaoyp830/azure-sdk-for-python
|
4816f04c554dcffb7510a6b7044b0c86a2dd32e1
|
1c66defa502b754abcc9e5afa444ca03c609342f
|
refs/heads/master
| 2022-10-20T21:33:44.281041 | 2022-09-29T17:03:13 | 2022-09-29T17:03:13 | 250,355,505 | 0 | 0 |
MIT
| 2020-03-26T19:42:13 | 2020-03-26T19:42:12 | null |
UTF-8
|
Python
| false | false | 3,523 |
py
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, TYPE_CHECKING
from azure.core.configuration import Configuration
from azure.core.pipeline import policies
from azure.mgmt.core.policies import ARMHttpLoggingPolicy, AsyncARMChallengeAuthenticationPolicy
from .._version import VERSION
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from azure.core.credentials_async import AsyncTokenCredential
class PolicyClientConfiguration(Configuration): # pylint: disable=too-many-instance-attributes
"""Configuration for PolicyClient.
Note that all parameters used to create this instance are saved as instance
attributes.
:param credential: Credential needed for the client to connect to Azure.
:type credential: ~azure.core.credentials_async.AsyncTokenCredential
:param subscription_id: The ID of the target subscription.
:type subscription_id: str
:keyword api_version: Api Version. Default value is "2019-09-01". Note that overriding this
default value may result in unsupported behavior.
:paramtype api_version: str
"""
def __init__(
self,
credential: "AsyncTokenCredential",
subscription_id: str,
**kwargs: Any
) -> None:
super(PolicyClientConfiguration, self).__init__(**kwargs)
api_version = kwargs.pop('api_version', "2019-09-01") # type: str
if credential is None:
raise ValueError("Parameter 'credential' must not be None.")
if subscription_id is None:
raise ValueError("Parameter 'subscription_id' must not be None.")
self.credential = credential
self.subscription_id = subscription_id
self.api_version = api_version
self.credential_scopes = kwargs.pop('credential_scopes', ['https://management.azure.com/.default'])
kwargs.setdefault('sdk_moniker', 'mgmt-resource/{}'.format(VERSION))
self._configure(**kwargs)
def _configure(
self,
**kwargs: Any
) -> None:
self.user_agent_policy = kwargs.get('user_agent_policy') or policies.UserAgentPolicy(**kwargs)
self.headers_policy = kwargs.get('headers_policy') or policies.HeadersPolicy(**kwargs)
self.proxy_policy = kwargs.get('proxy_policy') or policies.ProxyPolicy(**kwargs)
self.logging_policy = kwargs.get('logging_policy') or policies.NetworkTraceLoggingPolicy(**kwargs)
self.http_logging_policy = kwargs.get('http_logging_policy') or ARMHttpLoggingPolicy(**kwargs)
self.retry_policy = kwargs.get('retry_policy') or policies.AsyncRetryPolicy(**kwargs)
self.custom_hook_policy = kwargs.get('custom_hook_policy') or policies.CustomHookPolicy(**kwargs)
self.redirect_policy = kwargs.get('redirect_policy') or policies.AsyncRedirectPolicy(**kwargs)
self.authentication_policy = kwargs.get('authentication_policy')
if self.credential and not self.authentication_policy:
self.authentication_policy = AsyncARMChallengeAuthenticationPolicy(self.credential, *self.credential_scopes, **kwargs)
|
[
"[email protected]"
] | |
248c9152bbb8623c6fc0909ddc639ffa604c646b
|
99e4d9226e124215aaf66945cfaa5c42d18cc19f
|
/questionbot/matchableSentence.py
|
08fc5ea5a19bf209ddf7989190890511301aaabe
|
[
"CC0-1.0",
"LicenseRef-scancode-public-domain"
] |
permissive
|
mathieucaroff/oxowlbot
|
d826423a1a4cca8a38c90383d0a71dbb40052f35
|
a10c12b7c94b3e7030cef2f57c567bbd3034c8c9
|
refs/heads/master
| 2022-04-18T14:06:29.049957 | 2020-04-22T14:44:57 | 2020-04-22T14:44:57 | 255,177,595 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,442 |
py
|
import logging
from typing import List
from .stanza.pword import PWord
symbolMap = {
-6: "<=-",
-5: "<-=",
-4: "<--",
-3: "<=",
-2: "<-",
-1: "<",
0: "==",
1: ">",
2: "->",
3: "=>",
4: "-->",
5: "=->",
6: "-=>",
}
def matchableSentence(wordList: List[PWord]) -> str:
matchableWordList = []
for word in wordList:
matchableWordList.append(matchableWord(word))
return " ".join(matchableWordList)
def matchableWord(word: PWord) -> str:
diff = word.head - int(word.id)
if word.head == 0:
diff = 0
symbol = symbolMap.get(diff)
number = "x"
hintString = ""
pastHint = 0
for piece in word.feats.split("|"):
if piece == "Number=Plur":
number = "2"
if piece == "Number=Sing":
number = "1"
if piece == "VerbForm=Part":
pastHint += 1
if piece == "Tense=Past":
pastHint += 1
if pastHint >= 2:
hintString += "_Hpast"
w = word
upos = w.upos.lower()
feats = w.feats.replace("|", "_F").replace(":", "+")
deprel = w.deprel.replace(':', '+')
result = f":{w.id}_L{w.lemma}_U{upos}_N{number}_R{deprel}{hintString}_F{feats}_{symbol}."
if "." in result[1:-1] or ":" in result[1:-1]:
logging.error(f"bad (:.) matchableWord: {result}")
result = ":" + result.replace(":", "").replace(".", "") + "."
return result
|
[
"[email protected]"
] | |
9e367421bb74b17511012b38e47f3fc511540a62
|
f98347c036a98c32a0c72c49bf1e298588d48bab
|
/MyProjectRest/MyProjectRest/settings.py
|
bdbb19e0337188845c243b6cae3526de63938721
|
[] |
no_license
|
ikki2530/django_isfun
|
2de26ceb1e3e2a76063dcd602f8c3afa627713cb
|
91615c96b2297005ca3a21edc123466ca7d4ae18
|
refs/heads/master
| 2022-12-26T21:14:29.824341 | 2020-10-16T16:21:43 | 2020-10-16T16:21:43 | 288,185,540 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,181 |
py
|
"""
Django settings for MyProjectRest project.
Generated by 'django-admin startproject' using Django 2.2.11.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'nt!@6n=-m_klbe=fg7)g0j2hqefw-pcj9t8vb(yl!g8^h*_(d^'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'rest_framework.authtoken',
'api_basic',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'MyProjectRest.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'MyProjectRest.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
|
[
"[email protected]"
] | |
10f1a2beeb2f92dd6f7c12c073707ab12b23578b
|
176497ba1cea7233f249a5f439a65f7c472b267f
|
/11_polls_part_4/02_refactor_using_django_form/feed/forms.py
|
b6e2d79cd5ed81d818be9a91bfe921cbf89f9fc8
|
[] |
no_license
|
CodingNomads/django-web-dev
|
79a3a94707489ca0d5f0bf49193b7ffdf6270f4a
|
e03b8ed130f100afb0296c0d76a84206fbbf789d
|
refs/heads/master
| 2023-05-02T05:12:21.427462 | 2022-11-06T17:56:14 | 2022-11-06T17:56:14 | 235,174,521 | 1 | 7 | null | 2023-04-21T20:54:10 | 2020-01-20T18:53:31 |
Python
|
UTF-8
|
Python
| false | false | 489 |
py
|
from django import forms
from django.utils.html import strip_tags
from .models import Post
class PostForm(forms.ModelForm):
body = forms.CharField(required=True,
widget=forms.widgets.Textarea(
attrs={
'placeholder': 'Post',
'class': 'form-control'
}))
class Meta:
model = Post
exclude = ('user', )
|
[
"[email protected]"
] | |
ed2b24be9e79cc47a29adef832946f1b9008a54f
|
3a298c93b67386392d3dee243671f2c101decf01
|
/hackerrank/interview-preparation-kit/string-manipulation/02_alternating_characters.py
|
4ed83c47964e820fee050e52be5a67ab600cced2
|
[] |
no_license
|
Zahidsqldba07/coding-problems-2
|
ffbc8408e4408fc846c828af2ec50a9d72e799bc
|
020bffbd14ca9993f1e678181ee7df761f1533de
|
refs/heads/master
| 2023-06-26T11:05:34.089697 | 2021-07-21T15:16:10 | 2021-07-21T15:16:10 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 169 |
py
|
def alternatingCharacters(s):
min_dels = 0
for i in range(1, len(s)):
if s[i] == s[i-1]:
i += 1
min_dels += 1
return min_dels
|
[
"[email protected]"
] | |
06987b844ae674541272c3184dcb10864d851190
|
1498148e5d0af365cd7fd16197174174a7fa9800
|
/t001125.py
|
fbab733d238bfb3ac8c2b42ba0affa9097b2b6e9
|
[] |
no_license
|
feiyanshiren/myAcm
|
59a2b80fe7e02787defcb152eee3eae26135322a
|
00c7082d5143ddf87aeeafbdb6ce29da46dc8a12
|
refs/heads/master
| 2023-09-01T12:12:19.866447 | 2023-09-01T09:09:56 | 2023-09-01T09:09:56 | 148,560,672 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 464 |
py
|
def iToR(n):
c = [["","I","II","III","IV","V","VI","VII","VIII","IX"],
["","X","XX","XXX","XL","L","LX","LXX","LXXX","XC"],
["","C","CC","CCC","CD","D","DC","DCC","DCCC","CM"],
["","M","MM","MMM"]]
s = ""
s += c[3][n // 1000 % 10]
s += c[2][n // 100 % 10]
s += c[1][n // 10 % 10]
s += c[0][n % 10]
return s
try:
while 1:
n = int(input())
print(iToR(n))
except:
pass
|
[
"[email protected]"
] | |
16e2f39d93d44121207666aaed39b10a375cc842
|
f0d713996eb095bcdc701f3fab0a8110b8541cbb
|
/bBExn57vLEsXgHC5m_18.py
|
3f968eb28deda866f7bf09e4165adcac6ad9b42e
|
[] |
no_license
|
daniel-reich/turbo-robot
|
feda6c0523bb83ab8954b6d06302bfec5b16ebdf
|
a7a25c63097674c0a81675eed7e6b763785f1c41
|
refs/heads/main
| 2023-03-26T01:55:14.210264 | 2021-03-23T16:08:01 | 2021-03-23T16:08:01 | 350,773,815 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 651 |
py
|
"""
Create a function that returns `True` if three points belong to the same line,
and `False` otherwise. Each point is represented by a list consisting of an x-
and y-coordinate.
### Examples
same_line([[0, 0], [1, 1], [3, 3]]) ➞ True
same_line([[-2, -1], [2, 1], [0, 0]]) ➞ True
same_line([[-2, 0], [-10, 0], [-8, 0]]) ➞ True
same_line([[0, 0], [1, 1], [1, 2]]) ➞ False
same_line([[3, 4], [3, 5], [6, 6]]) ➞ False
### Notes
Note the special case of a vertical line.
"""
def same_line(lst):
return (lst[1][0]-lst[0][0])*(lst[2][1]-lst[0][1])==(lst[2][0]-lst[0][0])*(lst[1][1]-lst[0][1])
|
[
"[email protected]"
] | |
77a7abbd67fc0f5d958444057b77e1fa3518e3fa
|
eb9c3dac0dca0ecd184df14b1fda62e61cc8c7d7
|
/google/monitoring/dashboard/v1/monitoring-dashboard-v1-py/google/monitoring/dashboard_v1/types/xychart.py
|
d94061b1defd3543e734bee18c04bf35a39253da
|
[
"Apache-2.0"
] |
permissive
|
Tryweirder/googleapis-gen
|
2e5daf46574c3af3d448f1177eaebe809100c346
|
45d8e9377379f9d1d4e166e80415a8c1737f284d
|
refs/heads/master
| 2023-04-05T06:30:04.726589 | 2021-04-13T23:35:20 | 2021-04-13T23:35:20 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 5,725 |
py
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
from google.monitoring.dashboard_v1.types import metrics
from google.protobuf import duration_pb2 as duration # type: ignore
__protobuf__ = proto.module(
package='google.monitoring.dashboard.v1',
manifest={
'XyChart',
'ChartOptions',
},
)
class XyChart(proto.Message):
r"""A chart that displays data on a 2D (X and Y axes) plane.
Attributes:
data_sets (Sequence[google.monitoring.dashboard_v1.types.XyChart.DataSet]):
Required. The data displayed in this chart.
timeshift_duration (google.protobuf.duration_pb2.Duration):
The duration used to display a comparison
chart. A comparison chart simultaneously shows
values from two similar-length time periods
(e.g., week-over-week metrics).
The duration must be positive, and it can only
be applied to charts with data sets of LINE plot
type.
thresholds (Sequence[google.monitoring.dashboard_v1.types.Threshold]):
Threshold lines drawn horizontally across the
chart.
x_axis (google.monitoring.dashboard_v1.types.XyChart.Axis):
The properties applied to the X axis.
y_axis (google.monitoring.dashboard_v1.types.XyChart.Axis):
The properties applied to the Y axis.
chart_options (google.monitoring.dashboard_v1.types.ChartOptions):
Display options for the chart.
"""
class DataSet(proto.Message):
r"""Groups a time series query definition with charting options.
Attributes:
time_series_query (google.monitoring.dashboard_v1.types.TimeSeriesQuery):
Required. Fields for querying time series
data from the Stackdriver metrics API.
plot_type (google.monitoring.dashboard_v1.types.XyChart.DataSet.PlotType):
How this data should be plotted on the chart.
legend_template (str):
A template string for naming ``TimeSeries`` in the resulting
data set. This should be a string with interpolations of the
form ``${label_name}``, which will resolve to the label's
value.
min_alignment_period (google.protobuf.duration_pb2.Duration):
Optional. The lower bound on data point frequency for this
data set, implemented by specifying the minimum alignment
period to use in a time series query For example, if the
data is published once every 10 minutes, the
``min_alignment_period`` should be at least 10 minutes. It
would not make sense to fetch and align data at one minute
intervals.
"""
class PlotType(proto.Enum):
r"""The types of plotting strategies for data sets."""
PLOT_TYPE_UNSPECIFIED = 0
LINE = 1
STACKED_AREA = 2
STACKED_BAR = 3
HEATMAP = 4
time_series_query = proto.Field(proto.MESSAGE, number=1,
message=metrics.TimeSeriesQuery,
)
plot_type = proto.Field(proto.ENUM, number=2,
enum='XyChart.DataSet.PlotType',
)
legend_template = proto.Field(proto.STRING, number=3)
min_alignment_period = proto.Field(proto.MESSAGE, number=4,
message=duration.Duration,
)
class Axis(proto.Message):
r"""A chart axis.
Attributes:
label (str):
The label of the axis.
scale (google.monitoring.dashboard_v1.types.XyChart.Axis.Scale):
The axis scale. By default, a linear scale is
used.
"""
class Scale(proto.Enum):
r"""Types of scales used in axes."""
SCALE_UNSPECIFIED = 0
LINEAR = 1
LOG10 = 2
label = proto.Field(proto.STRING, number=1)
scale = proto.Field(proto.ENUM, number=2,
enum='XyChart.Axis.Scale',
)
data_sets = proto.RepeatedField(proto.MESSAGE, number=1,
message=DataSet,
)
timeshift_duration = proto.Field(proto.MESSAGE, number=4,
message=duration.Duration,
)
thresholds = proto.RepeatedField(proto.MESSAGE, number=5,
message=metrics.Threshold,
)
x_axis = proto.Field(proto.MESSAGE, number=6,
message=Axis,
)
y_axis = proto.Field(proto.MESSAGE, number=7,
message=Axis,
)
chart_options = proto.Field(proto.MESSAGE, number=8,
message='ChartOptions',
)
class ChartOptions(proto.Message):
r"""Options to control visual rendering of a chart.
Attributes:
mode (google.monitoring.dashboard_v1.types.ChartOptions.Mode):
The chart mode.
"""
class Mode(proto.Enum):
r"""Chart mode options."""
MODE_UNSPECIFIED = 0
COLOR = 1
X_RAY = 2
STATS = 3
mode = proto.Field(proto.ENUM, number=1,
enum=Mode,
)
__all__ = tuple(sorted(__protobuf__.manifest))
|
[
"bazel-bot-development[bot]@users.noreply.github.com"
] |
bazel-bot-development[bot]@users.noreply.github.com
|
a132f26dbd5f323274528482518a0db067ccaee7
|
24cce1ec7737f9ebb6df3e317a36c0a0329ec664
|
/HZMX/amazon_api/wizard/__init__.py
|
ec88e0a602a1466bc35254ab81c4e14fb0155649
|
[] |
no_license
|
tate11/HangZhouMinXing
|
ab261cb347f317f9bc4a77a145797745e2531029
|
14b7d34af635db015bd3f2c139be1ae6562792f9
|
refs/heads/master
| 2021-04-12T04:23:20.165503 | 2018-03-14T05:02:05 | 2018-03-14T05:02:05 | 125,855,729 | 1 | 0 | null | 2018-03-19T12:42:07 | 2018-03-19T12:42:07 | null |
UTF-8
|
Python
| false | false | 183 |
py
|
# -*- coding: utf-8 -*-
from . import amazon_wizard
from . import shop_template_wizard
from . import sync_sale_order
from . import stock_adjust
from . import stock_immediate_transfer
|
[
"1121403085"
] |
1121403085
|
c6f5f3941b86fc9197ffda49361b9e893dd4af5d
|
d87483a2c0b50ed97c1515d49d62c6e9feaddbe0
|
/.history/get_positions_20210205015710.py
|
71c1dcc17b16ec8ec382841ca4d884e1bb8d3c0f
|
[
"MIT"
] |
permissive
|
HopperKremer/hoptrader
|
0d36b6e33922414003cf689fb81f924da076a54b
|
406793c10bc888648290fd15c7c2af62cf8c6c67
|
refs/heads/main
| 2023-06-12T15:51:00.910310 | 2021-07-06T16:15:41 | 2021-07-06T16:15:41 | 334,754,936 | 0 | 2 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,169 |
py
|
# Buy top tickers from Financhill
import requests
from tda import auth, client
from tda.orders.equities import equity_buy_market, equity_buy_limit
from tda.orders.common import Duration, Session
import os, sys
import time
from selenium import webdriver
import json
currentdir = os.path.dirname(os.path.realpath(__file__))
parentdir = os.path.dirname(currentdir)
sys.path.append(parentdir)
import config # stored in parent directory for security
token_path = "token"
c = auth.client_from_token_file(token_path, config.api_key)
# positions = c.get_account(config.tda_acct_num, c.Account.Fields.POSITIONS)
# account_info = c.get_account(config.tda_acct_num, fields=[c.Account.Fields.POSITIONS]).json()
# print(account_info)
# positions = c.Account.Fields.POSITIONS
# r = c.get_account(config.tda_acct_num, fields=positions)
# stocks = r.json()['securitiesAccount']['positions']
# # stocks = json.dumps(r.json(), indent=4)
# for stock in stocks:
# print('--------------------------------')
# print(stock['instrument']['symbol'])
orders = c.Order.Status.QUEUED
res = c.get_orders_by_path(config.tda_acct_num, status != orders)
data = res.json()
print(data)
|
[
"[email protected]"
] | |
ad9989eac00b34fb5d4c74cacffaf49e07c379a3
|
042bd40e554ac7fcd618c334ae98b4f43248a250
|
/examples/python/gpu/tensors/ocean_cast_01.py
|
ce97706ce32728f5fa42af5c74d567a917e63ef7
|
[
"Apache-2.0"
] |
permissive
|
kant/ocean-tensor-package
|
8a62df968335de2057ff095f0910e5ad5fcff8e1
|
fb3fcff8bba7f4ef6cd8b8d02f0e1be1258da02d
|
refs/heads/master
| 2020-03-29T04:01:22.064480 | 2018-09-19T19:17:19 | 2018-09-19T19:17:19 | 149,511,923 | 0 | 0 |
Apache-2.0
| 2018-09-19T21:03:14 | 2018-09-19T21:03:14 | null |
UTF-8
|
Python
| false | false | 388 |
py
|
import ocean
A = ocean.asTensor([1,2,3])
B = A.storage
C = ocean.int8(10)
print(ocean.gpu[0](A))
print(ocean.ensure(A,ocean.float,ocean.gpu[0]))
ocean.ensure(A,ocean.half,ocean.gpu[0],True)
print(A)
print(ocean.gpu[0](B))
print(ocean.ensure(B,ocean.int8,ocean.gpu[0]))
ocean.ensure(B,ocean.gpu[0],True)
print(B)
print(ocean.gpu[0](C))
print(ocean.ensure(C,ocean.int16,ocean.cpu))
|
[
"[email protected]"
] | |
a2b91eceee4b8605757728c8196874fbfb1c1d05
|
71469cb9d9dd41438373be83c1e43b67bca25649
|
/tests/test__util.py
|
5f8dd6d9d82c8331fe015eef296620530e1e28c4
|
[
"MIT"
] |
permissive
|
achillesrasquinha/honcho
|
7494042775f205b5c0690676856a49185f4ef5d1
|
aab83cb10b1d4832c82a4dd3661a6b6df1e1e021
|
refs/heads/master
| 2020-03-26T10:00:02.351236 | 2018-08-17T18:37:34 | 2018-08-17T18:37:34 | 144,776,468 | 0 | 0 |
MIT
| 2018-08-14T22:03:08 | 2018-08-14T22:03:08 | null |
UTF-8
|
Python
| false | false | 397 |
py
|
from honcho._util import (
_get_if_empty,
_listify
)
def test__get_if_empty():
assert _get_if_empty("foo", "bar") == "foo"
assert _get_if_empty(None, "bar") == "bar"
def test__listify():
assert _listify("foo") == ["foo"]
assert _listify(12345) == [12345]
assert _listify(["foo"]) == ["foo"]
assert _listify([[]]) == [[]]
assert _listify([]) == []
|
[
"[email protected]"
] | |
ce9cacb177ce5e5ab233a69cca7469454d7a56e2
|
61dcd9b485bc5e6d07c4adf14f138eabaa9a23b5
|
/evennumberedexercise/Exercise10_2.py
|
484e16240b641b4684b13211c0c81be57f8d9814
|
[] |
no_license
|
bong1915016/Introduction-to-Programming-Using-Python
|
d442d2252d13b731f6cd9c6356032e8b90aba9a1
|
f23e19963183aba83d96d9d8a9af5690771b62c2
|
refs/heads/master
| 2020-09-25T03:09:34.384693 | 2019-11-28T17:33:28 | 2019-11-28T17:33:28 | 225,904,132 | 1 | 0 | null | 2019-12-04T15:56:55 | 2019-12-04T15:56:54 | null |
UTF-8
|
Python
| false | false | 303 |
py
|
def main():
# Read numbers as a string from the console
s = input("Enter numbers separated by spaces from one line: ")
items = s.split() # Extracts items from the string
numbers = [ eval(x) for x in items ] # Convert items to numbers
numbers.reverse()
print(numbers)
main()
|
[
"[email protected]"
] | |
6f52e25526d91b14a03debb468ee2df71da8d084
|
487ce91881032c1de16e35ed8bc187d6034205f7
|
/codes/CodeJamCrawler/16_0_2_neat/16_0_2_Loctet_exo3_2.py
|
bf23d8a702249cdbeb9c90068dad392df81c129c
|
[] |
no_license
|
DaHuO/Supergraph
|
9cd26d8c5a081803015d93cf5f2674009e92ef7e
|
c88059dc66297af577ad2b8afa4e0ac0ad622915
|
refs/heads/master
| 2021-06-14T16:07:52.405091 | 2016-08-21T13:39:13 | 2016-08-21T13:39:13 | 49,829,508 | 2 | 0 | null | 2021-03-19T21:55:46 | 2016-01-17T18:23:00 |
Python
|
UTF-8
|
Python
| false | false | 943 |
py
|
def flip(s):
s=s[::-1]
final=""
for i in s:
if i=="-":
final+="+"
else:
final+="-"
return final
def lastOccur(s):
l=len(s)
l-=1
while l>=0:
if s[l]=="-":
return l
l-=1
return -1
input = open("B-large.in","r")
output = open("output2.txt","w")
T = input.readline()
T = int(T)
for i in range(T):
s=input.readline()
ind = lastOccur(s)
nb=0
while ind != -1:
nb+=1
st = s[0:(ind+1)]
tmp=""
f=ind
while st[0]!=st[f] and f>0:
tmp+=st[f]
f-=1
if tmp!="":
stk = st[0:f+1]
stF = flip(stk)
s=stF+tmp
ind = lastOccur(s)
else:
stF=flip(st)
ind=lastOccur(stF)
s = stF[0:ind+1]
output.write("Case #{0}: {1}\n".format(i+1,nb))
|
[
"[[email protected]]"
] | |
e999a16e6adfdf17446ba8992e64289a3804c4f2
|
e23a4f57ce5474d468258e5e63b9e23fb6011188
|
/125_algorithms/_examples/_algorithms_challenges/pybites/intermediate/125_get_most_recommended_books/save2_nopass.py
|
797bdd2662c2b1d825c4e4a7e7ca198722722ac8
|
[] |
no_license
|
syurskyi/Python_Topics
|
52851ecce000cb751a3b986408efe32f0b4c0835
|
be331826b490b73f0a176e6abed86ef68ff2dd2b
|
refs/heads/master
| 2023-06-08T19:29:16.214395 | 2023-05-29T17:09:11 | 2023-05-29T17:09:11 | 220,583,118 | 3 | 2 | null | 2023-02-16T03:08:10 | 2019-11-09T02:58:47 |
Python
|
UTF-8
|
Python
| false | false | 1,283 |
py
|
from collections import Counter
from bs4 import BeautifulSoup
import requests
AMAZON = "amazon.com"
# static copy
TIM_BLOG = ('https://bites-data.s3.us-east-2.amazonaws.com/'
'tribe-mentors-books.html')
MIN_COUNT = 3
def load_page():
"""Download the blog html and return its decoded content"""
with requests.Session() as session:
return session.get(TIM_BLOG).content.decode('utf-8')
def get_top_books(content=None):
"""Make a BeautifulSoup object loading in content,
find all links that contain AMAZON, extract the book title
(stripping spacing characters), and count them.
Return a list of (title, count) tuples where
count is at least MIN_COUNT
"""
if content is None:
content = load_page()
soup = BeautifulSoup(content, 'html.parser')
right_table = soup.find('div', {'class': 'entry-content'})
books = [row.text
for row in right_table.select('a[href*=amazon]')]
c = Counter(books)
books_final = []
count = []
for letter in c:
if c[letter] >= MIN_COUNT:
books_final.append(letter.strip())
count.append(c[letter])
return sorted(list(zip(books_final, count)),
key=lambda tup: tup[1], reverse=True)
|
[
"[email protected]"
] | |
6633aaf841c66798b7f26e53ce98722ba6c11f37
|
086c199b617f304f5edcbb3481a82119b9cec99d
|
/build/turtlebot3_simulations/turtlebot3_gazebo/catkin_generated/pkg.installspace.context.pc.py
|
71b5c21b7cabd6268333c28e49d1b4f0f942a34c
|
[] |
no_license
|
canveo/catkin_ws
|
59634bee66aa9f5ed593acd85e6bd4e2e2eaab01
|
3c931df576529ad7e1e48dc0e69ba131914d18dc
|
refs/heads/master
| 2023-02-28T13:23:43.653964 | 2021-02-03T23:00:45 | 2021-02-03T23:00:45 | 325,388,539 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 479 |
py
|
# generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "${prefix}/include".split(';') if "${prefix}/include" != "" else []
PROJECT_CATKIN_DEPENDS = "roscpp;std_msgs;sensor_msgs;geometry_msgs;nav_msgs;tf;gazebo_ros".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "".split(';') if "" != "" else []
PROJECT_NAME = "turtlebot3_gazebo"
PROJECT_SPACE_DIR = "/home/canveo/catkin_ws/install"
PROJECT_VERSION = "1.3.0"
|
[
"[email protected]"
] | |
0f1fe0d7b7427e43223e7a9c0c4c64f6116a45f0
|
caf8cbcafd448a301997770165b323438d119f5e
|
/.history/spider/car_spider_20201124011404.py
|
ac405951f74aa797df77660aae074658fec4ce27
|
[
"MIT"
] |
permissive
|
KustomApe/nerdape
|
03e0691f675f13ce2aefa46ee230111247e90c72
|
aef6fb2d1f8c364b26d91bf8570b4487a24de69a
|
refs/heads/main
| 2023-01-23T10:13:26.584386 | 2020-11-28T22:29:49 | 2020-11-28T22:29:49 | 309,897,105 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,314 |
py
|
from selenium import webdriver
import pandas as pd
import time
"""[Initial Setting]
初期設定
"""
options = webdriver.ChromeOptions()
options.add_argument('--headeless')
options.add_argument('--disable-gpu')
options.add_argument('--lang-ja')
browser = webdriver.Chrome(chrome_options=options, executable_path='./chromedriver')
df = pd.DataFrame(columns=['name', 'image', 'price', 'category', 'car'])
url = 'https://motorz-garage.com/parts/'
"""[CSS Selector Setting]
CSSセレクターの設定
"""
PAGER_NEXT = "li.select-page.arrow a[rel='next']"
POSTS = ".product-item-list__item"
PRODUCT_NAME = ".product-item-list__item-name"
IMAGE = ".product-item-list__item-image img"
PRICE = ".product-item-list__item-price"
CATEGORY = ".product-item-list__item-category"
CAR = ".product-item-list__item-car-name"
"""[Activate Section]
実行部分
"""
browser.get(url)
while True: #Continue until getting the last page.
if len(browser.find_elements_by_css_selector(PAGER_NEXT)) > 0:
print('Starting to get posts...')
posts = browser.find_elements_by_css_selector(POSTS)
print(len(posts))
for post in posts:
try:
name = post.find_element_by_css_selector(PRODUCT_NAME).text
print(name)
thumbnailURL = post.find_element_by_css_selector(IMAGE).get_attribute('src')
print(thumbnailURL)
price = post.find_element_by_css_selector(PRICE).text
print(price)
category = post.find_element_by_css_selector(CATEGORY).text
print(category)
car = post.find_element_by_css_selector(CAR).text
print(car)
se = pd.Series([name, thumbnailURL, price, category, car], ['name', 'image', 'price', 'category', 'car'])
df.append(se, ignore_index=True)
except Exception as e:
print(e)
break
btn = browser.find_element_by_css_selector(PAGER_NEXT).get_attribute('href')
print('next url:{}'.format(btn))
time.sleep(3)
browser.get(btn)
print('Moving to next page.')
else:
print('No pager exist anymore...')
break
print('Finished Crawling. Writing out to CSV file...')
df.to_csv('car_parts.csv')
print('Done')
|
[
"[email protected]"
] | |
9bf5f9186fe2f542ae87d34473bbfe712d6079a5
|
df816f41be8f02107f08b7651e9397b1c905c154
|
/1_Classic_RL/Exercises/6 - TD Learning/CliffWalking/plot_utils.py
|
8d5b55b91b46530e339d7d36f6b5878fe45467bf
|
[] |
no_license
|
PabloRR100/Reinforcement-Learning
|
7f11caeb2eb2bc68b2ae6b2b3bc7fb4b651eae68
|
8d926cdae59f89a215391ca825d9f07f778dbd96
|
refs/heads/master
| 2020-03-27T18:39:42.969119 | 2018-12-17T19:01:36 | 2018-12-17T19:01:36 | 146,935,269 | 1 | 2 | null | 2018-09-07T23:37:01 | 2018-08-31T19:36:43 |
Jupyter Notebook
|
UTF-8
|
Python
| false | false | 620 |
py
|
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
sns.set_style("white")
import warnings
warnings.filterwarnings('ignore', 'DeprecationWarning')
def plot_values(V):
# reshape the state-value function
V = np.reshape(V, (4,12))
# plot the state-value function
fig = plt.figure(figsize=(15,5))
ax = fig.add_subplot(111)
im = ax.imshow(V, cmap='cool')
for (j,i),label in np.ndenumerate(V):
ax.text(i, j, np.round(label,3), ha='center', va='center', fontsize=14)
plt.tick_params(bottom='off', left='off', labelbottom='off', labelleft='off')
plt.title('State-Value Function')
plt.show()
|
[
"[email protected]"
] | |
6cc279aca08425752f46c34dd433ac0d8c904369
|
94c8f0b09ced7ae86fba0d09faf4310e508c18e5
|
/scaler/dp2/dp4/largest_rectangle.py
|
ab95e9ec10944ed6e47c17d22cc8956dfee29a56
|
[] |
no_license
|
navkant/ds_algo_practice
|
6e7dd427df6ac403ac23fa68b079b162b839447a
|
a2b762d08b151f6dbbc12d76dd930f6cd7b9017d
|
refs/heads/master
| 2023-06-24T02:56:25.886991 | 2021-06-13T03:42:24 | 2021-06-13T03:42:24 | 376,431,047 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,061 |
py
|
import sys
class LargestRectangle:
def nearest_minimum_left(self, a):
n = len(a)
ans_arr = []
stack = []
for i in range(n):
if not stack:
ans_arr.append(-1)
stack.append(i)
else:
if a[i] > a[stack[-1]]:
ans_arr.append(stack[-1])
stack.append(i)
else:
while stack and a[i] <= a[stack[-1]]:
stack.pop()
if not stack:
ans_arr.append(-1)
else:
ans_arr.append(stack[-1])
stack.append(i)
return ans_arr
def nearest_minimum_right(self, a):
n = len(a)
ans_arr = []
stack = []
for i in range(n-1, -1, -1):
if not stack:
ans_arr.append(-1)
stack.append(i)
else:
if a[i] > a[stack[-1]]:
ans_arr.append(stack[-1])
stack.append(i)
else:
while stack and a[i] <= a[stack[-1]]:
stack.pop()
if not stack:
ans_arr.append(-1)
else:
ans_arr.append(stack[-1])
stack.append(i)
return ans_arr[::-1]
def largest_rectangle(self, a):
n = len(a)
left_mins = self.nearest_minimum_left(a)
right_mins = self.nearest_minimum_right(a)
# print(a)
# print(left_mins)
# print(a)
# print(right_mins)
max_area = 0
for i in range(len(a)):
left_min = left_mins[i]
right_min = right_mins[i]
height = a[i]
if left_min == -1 and right_min == -1:
width = n
elif right_min == -1:
width = n - left_min - 1
elif left_min == -1:
width = right_min
else:
width = right_min - left_min - 1
area = height * width
# print(area, end=' ')
max_area = max(max_area, area)
return max_area
class Solution:
# @param A : list of list of integers
# @return an integer
def maximalRectangle(self, A):
n = len(A[0])
m = len(A)
for i in range(1, m):
for j in range(n):
if A[i][j] == 0:
continue
A[i][j] = A[i][j] + A[i-1][j]
maxx_area = sys.maxsize * -1
obj = LargestRectangle()
for row in A:
current_area = obj.largest_rectangle(row)
maxx_area = max(maxx_area, current_area)
return maxx_area
if __name__ == '__main__':
a = [[1, 1, 1, 1, 0, 1, 1],
[0, 1, 1, 1, 1, 1, 0],
[1, 0, 0, 1, 1, 1, 1],
[1, 0, 0, 1, 1, 1, 1],
[1, 0, 0, 1, 1, 1, 1]]
obj = Solution()
ans = obj.maximalRectangle(a)
|
[
"[email protected]"
] | |
fd5d20da9b0ffd715c0a27df62cb9aa1293849d8
|
1eb7fa8b1745d4e51cefb4eceb44621862516aa6
|
/Company Interview/FB/regularExpressionMatching.py
|
7c2ad7787f3d25cd13d93fb3a68ba0ddc93ad340
|
[] |
no_license
|
geniousisme/CodingInterview
|
bd93961d728f1fe266ad5edf91adc5d024e5ca48
|
a64bca9c07a7be8d4060c4b96e89d8d429a7f1a3
|
refs/heads/master
| 2021-01-10T11:15:31.305787 | 2017-03-06T00:03:13 | 2017-03-06T00:03:13 | 43,990,453 | 0 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,401 |
py
|
class Solution(object):
def isMatch(self, string, pattern):
if not pattern:
return not string
if len(pattern) == 1 or pattern[1] != "*":
if len(string) > 0 and (pattern[0] == string[0] or pattern[0] == '.'):
return self.isMatch(string[1:], pattern[1:])
else:
while len(string) > 0 and (pattern[0] == string[0] or pattern[0] == '.'):
if self.isMatch(string, pattern[2:]):
return True
string = string[1:]
return self.isMatch(string, pattern[2:])
class Solution(object):
def isMatch(self, string, pattern):
dp = [[False for _ in xrange(len(pattern) + 1)] for _ in xrange(len(string) + 1)]
dp[0][0] = True
for j in xrange(2, len(pattern) + 1):
if pattern[j - 1] == "*":
dp[0][j] = dp[0][j - 2]
for i in xrange(1, len(string) + 1):
for j in xrange(1, len(pattern) + 1):
if pattern[j - 1] == ".":
dp[i][j] = dp[i - 1][j - 1]
elif pattern[j - 1] == "*":
dp[i][j] = dp[i][j - 1] or dp[i][j - 2] or (dp[i - 1][j] and (string[i - 1] == pattern[j - 2] or pattern[j - 2] == '.'))
else:
dp[i][j] = dp[i - 1][j - 1] and string[i - 1] == pattern[j - 1]
return dp[-1][-1]
|
[
"[email protected]"
] | |
6b6d42eb3d030728790f8497fe392eada6aed1ca
|
7f4fae8e0a9e29fc3abee784a2d7d0beb8252bd5
|
/wulifang/nuke/_rotopaint_dopesheet.py
|
23d06b434f378fa9e35ffc0575925e1cbec8a39d
|
[] |
no_license
|
WuLiFang/Nuke
|
a303646e927c9745f2eaf8dad4e5e1ccc09a30e7
|
49df48ded0985771147b1a40707b5454291eab19
|
refs/heads/master
| 2023-07-21T13:36:27.423572 | 2023-07-17T10:34:04 | 2023-07-17T10:34:04 | 100,696,180 | 16 | 5 | null | 2020-03-08T11:50:16 | 2017-08-18T09:28:26 |
Python
|
UTF-8
|
Python
| false | false | 5,497 |
py
|
# -*- coding=UTF-8 -*-
# pyright: strict, reportTypeCommentUsage=none
from __future__ import absolute_import, division, print_function, unicode_literals
import re
import nuke
import nuke.rotopaint
import nuke.curvelib
from wulifang._util import cast_str, cast_text
from wulifang.nuke._util import (
iter_deep_rotopaint_element,
Panel as _Panel,
CurrentViewer,
raise_panel,
RotopaintLifeTimeType,
knob_of,
RotoKnob,
)
TYPE_CHECKING = False
if TYPE_CHECKING:
from wulifang._compat.str import Str
def _rotopaint_keyframes(n):
# type: (nuke.Node) -> ...
key_frames = set([n.firstFrame(), n.lastFrame()])
for i in iter_deep_rotopaint_element(knob_of(n, "curves", RotoKnob).rootLayer):
if isinstance(
i,
(
nuke.rotopaint.Shape,
nuke.rotopaint.Stroke,
),
):
attrs = i.getAttributes()
lifetime_type = attrs.getValue(0, attrs.kLifeTimeTypeAttribute)
if lifetime_type == RotopaintLifeTimeType.ALL:
continue
key_frames.add(int(attrs.getValue(0, attrs.kLifeTimeMAttribute)))
key_frames.add(int(attrs.getValue(0, attrs.kLifeTimeNAttribute)))
return sorted(key_frames)
def apply_timewarp(rotopaint, timewarp, all_stroke=False):
# type: (nuke.Node, nuke.Node, bool) -> None
"""Apply timewarp to rotopaint node
Args:
rotopaint (nuke.Node): RotoPaint node
timewarp (nuke.Node): TimeWarp node
all_stroke (bool, optional): whether apply to invisible stroke.
Defaults to False.
"""
root_layer = knob_of(rotopaint, "curves", RotoKnob).rootLayer
lookup = timewarp[cast_str("lookup")]
time_map = {
int(match[1]): int(match[0])
for match in re.findall(
r"x(\d+) (\d+)",
cast_text(lookup.toScript()),
)
}
def apply_lookup(attrs, key):
# type: (nuke.curvelib.AnimAttributes, Str) -> None
input_time = int(attrs.getValue(0, key))
if input_time not in time_map:
nuke.message(
cast_str(
"在 {}.input 中找不到值为 {} 的关键帧".format(timewarp.name(), input_time)
)
)
raise ValueError("timewarp lookup failed")
output_time = time_map[int(input_time)]
attrs.set(key, output_time)
for i in iter_deep_rotopaint_element(root_layer):
if isinstance(
i,
(
nuke.rotopaint.Shape,
nuke.rotopaint.Stroke,
),
):
attrs = i.getAttributes()
lifetime_type = attrs.getValue(0, attrs.kLifeTimeTypeAttribute)
if lifetime_type == RotopaintLifeTimeType.ALL:
continue
if not all_stroke and not attrs.getValue(
nuke.frame(), attrs.kVisibleAttribute
):
continue
apply_lookup(attrs, attrs.kLifeTimeNAttribute)
apply_lookup(attrs, attrs.kLifeTimeMAttribute)
class Panel(_Panel):
"""Panel for rotopaint dopesheet command."""
def __init__(
self,
rotopaint, # type: nuke.Node
):
# type: (...) -> None
super(Panel, self).__init__(
cast_str("RotoPaint摄影表"),
cast_str("com.wlf-studio.rotopaint-dopesheet"),
)
if cast_text(rotopaint.Class()) != "RotoPaint":
nuke.message(cast_str("请选中RotoPaint节点"))
raise ValueError("require roto paint node")
self.rotopaint = rotopaint
n = nuke.createNode(cast_str("TimeWarp"))
n.setInput(0, rotopaint)
k = knob_of(n, "lookup", nuke.Array_Knob)
k.fromScript(
cast_str(
"{curve L l %s}"
% (
" ".join(
"x{} {}".format(i, i) for i in _rotopaint_keyframes(rotopaint)
),
)
)
)
k.setExpression(cast_str("floor(curve)"))
n.showControlPanel()
CurrentViewer.show(n)
self.timewarp = n
rotopaint.hideControlPanel()
k = nuke.Text_Knob(
cast_str(""),
cast_str("说明"),
cast_str(
"请在摄影表中编辑 %s.lookup 然后选择以下操作" % (cast_text(self.timewarp.name()),)
),
)
self.addKnob(k)
k = nuke.Script_Knob(cast_str("apply"), cast_str("应用至可见笔画"))
self.addKnob(k)
k = nuke.Script_Knob(cast_str("apply_all"), cast_str("应用至所有笔画"))
self.addKnob(k)
k = nuke.Script_Knob(cast_str("cancel"), cast_str("Cancel"))
self.addKnob(k)
def show(self):
super(Panel, self).show()
raise_panel("DopeSheet.1")
def knobChanged(self, knob):
# type: (nuke.Knob) -> None
is_finished = False
if knob is self["apply"]:
apply_timewarp(self.rotopaint, self.timewarp)
is_finished = True
elif knob is self["apply_all"]:
apply_timewarp(self.rotopaint, self.timewarp, True)
is_finished = True
elif knob is self["cancel"]:
is_finished = True
if is_finished:
nuke.delete(self.timewarp)
self.rotopaint.showControlPanel()
self.destroy()
raise_panel("DAG.1")
|
[
"[email protected]"
] | |
1ef74bbd7592936fff5d016713ea948ded5646f2
|
af4d559792c4255d5f26bc078cd176b70c0e643f
|
/hpsklearn/components/cross_decomposition/_pls.py
|
bdad86189563c644c43df8232c49ae1312194937
|
[
"BSD-3-Clause"
] |
permissive
|
hyperopt/hyperopt-sklearn
|
ec7d5f97ba8fd5a2c283dfec2fa9e0170b61c6ce
|
4b3f6fde3a1ded2e71e8373d52c1b51a0239ef91
|
refs/heads/master
| 2023-08-02T07:19:20.259964 | 2022-12-15T17:53:07 | 2022-12-15T17:53:07 | 8,293,893 | 1,480 | 292 |
NOASSERTION
| 2022-12-15T17:53:08 | 2013-02-19T16:09:53 |
Python
|
UTF-8
|
Python
| false | false | 3,586 |
py
|
import typing
from hpsklearn.components._base import validate
from hyperopt.pyll import scope, Apply
from hyperopt import hp
from sklearn import cross_decomposition
import numpy as np
@scope.define
def sklearn_CCA(*args, **kwargs):
return cross_decomposition.CCA(*args, **kwargs)
@scope.define
def sklearn_PLSCanonical(*args, **kwargs):
return cross_decomposition.PLSCanonical(*args, **kwargs)
@scope.define
def sklearn_PLSRegression(*args, **kwargs):
return cross_decomposition.PLSRegression(*args, **kwargs)
def _pls_n_components(name: str):
"""
Declaration search space 'n_components' parameter
"""
return hp.choice(name, [1, 2])
def _pls_max_iter(name: str):
"""
Declaration search space 'max_iter' parameter
"""
return scope.int(hp.uniform(name, 350, 650))
def _pls_tol(name: str):
"""
Declaration search space 'tol' parameter
"""
return hp.loguniform(name, np.log(1e-7), np.log(1e-5))
def _pls_hp_space(
name_func,
n_components: typing.Union[int, Apply] = None,
scale: bool = True,
max_iter: typing.Union[int, Apply] = None,
tol: typing.Union[float, Apply] = None,
copy: bool = True
):
"""
Hyper parameter search space for
cca
pls canonical
pls regression
"""
hp_space = dict(
n_components=_pls_n_components(name_func("n_components")) if n_components is None else n_components,
scale=scale,
max_iter=_pls_max_iter(name_func("max_iter")) if max_iter is None else max_iter,
tol=_pls_tol(name_func("tol")) if tol is None else tol,
copy=copy
)
return hp_space
def cca(name: str, **kwargs):
"""
Return a pyll graph with hyperparameters that will construct
a sklearn.cross_decomposition.CCA model.
Args:
name: name | str
See help(hpsklearn.components.cross_decomposition._pls._pls_hp_space)
for info on additional available pls arguments.
"""
def _name(msg):
return f"{name}.cca_{msg}"
hp_space = _pls_hp_space(_name, **kwargs)
return scope.sklearn_CCA(**hp_space)
@validate(params=["algorithm"],
validation_test=lambda param: not isinstance(param, str) or param in ["nipals", "svd"],
msg="Invalid parameter '%s' with value '%s'. Value must be in ['nipals', 'svd'].")
def pls_canonical(name: str, algorithm: typing.Union[str, Apply] = None, **kwargs):
"""
Return a pyll graph with hyperparameters that will construct
a sklearn.cross_decomposition.PLSCanonical model.
Args:
name: name | str
algorithm: algorithm for first singular vectors | str
See help(hpsklearn.components.cross_decomposition._pls._pls_hp_space)
for info on additional available pls arguments.
"""
def _name(msg):
return f"{name}.pls_canonical_{msg}"
hp_space = _pls_hp_space(_name, **kwargs)
hp_space["algorithm"] = hp.choice(_name("algorithm"), ["nipals", "svd"]) if algorithm is None else algorithm
return scope.sklearn_PLSCanonical(**hp_space)
def pls_regression(name: str, **kwargs):
"""
Return a pyll graph with hyperparameters that will construct
a sklearn.cross_decomposition.PLSRegression model.
Args:
name: name | str
See help(hpsklearn.components.cross_decomposition._pls._pls_hp_space)
for info on additional available pls arguments.
"""
def _name(msg):
return f"{name}.pls_regression_{msg}"
hp_space = _pls_hp_space(_name, **kwargs)
return scope.sklearn_PLSRegression(**hp_space)
|
[
"[email protected]"
] | |
6be9a2f554cfe414a831a4e52764e37f7205a2d7
|
53b9432487fbb62a5f48d37754d0404e2672a0f7
|
/facebookspider-master2/facebookspider/redyurl.py
|
038eeda369fb5b503d2adbd739ca578de300743c
|
[] |
no_license
|
swg0110/facebook_spider
|
ff7c3caeb76fccd3bbc2b23b586a2b5825a77598
|
a5e3b12d56cb2759942b486b7f4da3b94dfa6839
|
refs/heads/master
| 2021-09-17T18:33:09.636336 | 2018-07-04T11:53:14 | 2018-07-04T11:53:14 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 860 |
py
|
#-*- coding: UTF-8 -*-
import pymongo
import time
from datetime import datetime
# import sys
# reload(sys)
# sys.setdefaultencoding('utf8')
client = pymongo.MongoClient(host="127.0.0.1", port=27017)
db = client["singapore"]
db.authenticate("yufei", "xjtu@2017")
coll = db["facebook2"]
a = coll.find()
print (a.count())
coll = db["singaporeredyurl"]
coll.ensure_index('url', unique=True)
str1 = '全部好友'
for i in a:
try:
for j in i['friendsList'][str1.decode()]:
a = {}
a['name'] = j['name']
a['picture'] = j['picture']
a['url'] = j['url']
a['time'] = i['time']
a['sign'] = 'f'
try:
coll.insert(a)
except:
print ('重复')
pass
except:
print ('出错')
print (i['friendsList'])
|
[
"[email protected]"
] | |
fb9fff047ea9f91c6306fde600390b8cc180df7f
|
ebd6f68d47e192da7f81c528312358cfe8052c8d
|
/swig/Examples/test-suite/python/cpp11_uniform_initialization_runme.py
|
ecb468ccbab5774868fae2adf8e1162f13d56457
|
[
"LicenseRef-scancode-swig",
"GPL-3.0-or-later",
"LicenseRef-scancode-unknown-license-reference",
"GPL-3.0-only",
"Apache-2.0"
] |
permissive
|
inishchith/DeepSpeech
|
965ad34d69eb4d150ddf996d30d02a1b29c97d25
|
dcb7c716bc794d7690d96ed40179ed1996968a41
|
refs/heads/master
| 2021-01-16T16:16:05.282278 | 2020-05-19T08:00:33 | 2020-05-19T08:00:33 | 243,180,319 | 1 | 0 |
Apache-2.0
| 2020-02-26T05:54:51 | 2020-02-26T05:54:50 | null |
UTF-8
|
Python
| false | false | 554 |
py
|
import cpp11_uniform_initialization
var1 = cpp11_uniform_initialization.cvar.var1
if var1.x != 5:
raise RuntimeError
var2 = cpp11_uniform_initialization.cvar.var2
if var2.getX() != 2:
raise RuntimeError
m = cpp11_uniform_initialization.MoreInit()
if m.charptr != None:
raise RuntimeError, m.charptr
m.charptr = "hello sir"
if m.charptr != "hello sir":
raise RuntimeError, m.charptr
if m.more1(m.vi) != 15:
raise RuntimeError, m.vi
if m.more1([-1, 1, 2]) != 2:
raise RuntimeError, m.vi
if m.more1() != 10:
raise RuntimeError
|
[
"[email protected]"
] | |
5aad7e0cb8a7d3330496ab6719606b80e1ce9362
|
d963fb56dbb92cc7317c0a042c9059239ebaa028
|
/problems/LC31.py
|
f1c1a4832845498896b69b66f880c332d2a9f312
|
[] |
no_license
|
ClaudioCarvalhoo/you-can-accomplish-anything-with-just-enough-determination-and-a-little-bit-of-luck
|
20572bde5482ddef379506ce298c21dd5e002492
|
df287ed92a911de49ed4bc7ca5a997d18a96c3f6
|
refs/heads/master
| 2023-06-02T16:27:57.994351 | 2021-06-24T23:16:47 | 2021-06-24T23:16:47 | 284,845,707 | 1 | 0 | null | 2020-10-29T21:35:30 | 2020-08-04T01:24:23 |
Python
|
UTF-8
|
Python
| false | false | 958 |
py
|
# O(n)
# n = len(nums)
class Solution:
def nextPermutation(self, nums: List[int]) -> None:
breakIndex = self.findFirstDescending(nums)
if breakIndex is None:
self.reverseSubList(nums, 0)
return
swapIndex = breakIndex + 1
for i in range(breakIndex + 2, len(nums)):
if nums[i] > nums[breakIndex] and nums[i] <= nums[swapIndex]:
swapIndex = i
self.swap(nums, breakIndex, swapIndex)
self.reverseSubList(nums, breakIndex + 1)
def findFirstDescending(self, nums):
for i in range(len(nums) - 2, -1, -1):
if nums[i] < nums[i + 1]:
return i
return None
def reverseSubList(self, nums, start):
i = start
j = len(nums) - 1
while i < j:
self.swap(nums, i, j)
i += 1
j -= 1
def swap(self, nums, i, j):
nums[i], nums[j] = nums[j], nums[i]
|
[
"[email protected]"
] | |
f901eff5d7fb9faf694cb612fd1d8cc57d32500f
|
9dc423fe2c14e3949a171b81da9d02f87b1c2063
|
/day06/02_SQLAlchmey.py
|
c51e800634f4b59002e9eb4fdc5cfc37bf0b76a7
|
[] |
no_license
|
1751660300/Flask
|
a09ca944f21070cc04116d5fb929cacf386e56cd
|
9fbf6955649f0c5e2e7acd98b29e28ebfdb99cd7
|
refs/heads/master
| 2022-11-09T13:10:45.462516 | 2020-06-28T02:47:19 | 2020-06-28T02:47:19 | 271,776,011 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,050 |
py
|
# -*- coding:utf-8 -*-
"""
SQLAlchmey详细:https://www.cnblogs.com/wupeiqi/articles/8259356.html
1.SQLAlchmey 是python中一种orm框架
目标:将对类/对象的操作 -> sql语句(通过pymysql模块来执行sql语句) -> 对数据库的操作
"""
import time
import threading
import sqlalchemy
from sqlalchemy import create_engine
from sqlalchemy.engine.base import Engine
engine = create_engine(
"mysql+pymysql://root:[email protected]:3306/t1?charset=utf8",
max_overflow=0, # 超过连接池大小外最多创建的连接
pool_size=5, # 连接池大小
pool_timeout=30, # 池中没有线程最多等待的时间,否则报错
pool_recycle=-1 # 多久之后对线程池中的线程进行一次连接的回收(重置)
)
def task(arg):
conn = engine.raw_connection()
cursor = conn.cursor()
cursor.execute(
"select * from t1"
)
result = cursor.fetchall()
cursor.close()
conn.close()
for i in range(20):
t = threading.Thread(target=task, args=(i,))
t.start()
|
[
"[email protected]"
] | |
7397afa12e4a2262330657cc6cba2765d23f42be
|
3135f67392febe5f85a743d1545d00bac40beb5a
|
/cn_stock_holidays/cn_stock_holidays/gateway/__init__.py
|
657b3d4964a852cdccfb0de7f59f08ca7fa443ff
|
[] |
no_license
|
xiyongjian/gateway
|
b8ebda77fed3995f75c63c5f61dc520bfe9c7897
|
59d3c410ce3005c616b354f0d1ad64cf77798573
|
refs/heads/master
| 2022-10-22T21:01:02.063293 | 2018-07-08T04:48:49 | 2018-07-08T04:48:49 | 120,036,602 | 2 | 2 | null | 2022-10-01T12:06:20 | 2018-02-02T22:08:03 |
Python
|
UTF-8
|
Python
| false | false | 231 |
py
|
from cn_stock_holidays.gateway.exchange_calendar_hkex import HKExchangeCalendar
from cn_stock_holidays.gateway.exchange_calendar_shsz import SHSZExchangeCalendar
__all__ = [
'HKExchangeCalendar',
'SHSZExchangeCalendar',
]
|
[
"[email protected]"
] | |
d30bc0c7e55dda9955dd8a25d1ead5b969592d85
|
90e02be4ea2461e4e4a0fd504ce516aadf427c69
|
/old/polysem.py
|
426ea7e7fdecb878ccd048fd5354c90808906cb3
|
[] |
no_license
|
thoppe/polysemous-emoji
|
df5150fce38663389926aff4991c85d3bc442963
|
50b2107b50a3d8ab9719d2736c8925cc68a75180
|
refs/heads/master
| 2021-01-10T02:34:17.728953 | 2017-06-05T20:59:27 | 2017-06-05T20:59:27 | 51,712,669 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 964 |
py
|
from ksvd import KSVD
import h5py, os
from gensim.models.word2vec import Word2Vec
# Load the config files
from configobj import ConfigObj
wcon = ConfigObj("config.ini")["word2vec"]
kcon = ConfigObj("config.ini")["kSVD"]
from gensim.models.word2vec import Word2Vec
f_features = wcon["f_features"].format(**wcon)
clf = Word2Vec.load(f_features)
X = clf.syn0
print clf
print X.shape
result = KSVD(X,
dict_size=kcon.as_int("basis_size"),
target_sparsity=kcon.as_int("sparsity"),
max_iterations=kcon.as_int("iterations"),
enable_printing=True,
enable_threading = True,
print_interval=1)
D,gamma = result
f_model = kcon["f_kSVD"].format(**kcon)
h5 = h5py.File(f_model,'w')
h5.create_dataset("D",data=D, compression="gzip")
h5.create_dataset("gamma",data=gamma, compression="gzip")
# Save the arguments (maybe later?)
#for key in args:
# g.attrs[key] = cargs[key]
h5.close()
|
[
"[email protected]"
] | |
8d4856c6c849a5bc71aa203dc7a9dd6ec06bbf27
|
53fab060fa262e5d5026e0807d93c75fb81e67b9
|
/backup/user_042/ch44_2020_10_07_13_08_47_151008.py
|
10a4ee19987b7143968c5c8eef6249ca9ea6b94d
|
[] |
no_license
|
gabriellaec/desoft-analise-exercicios
|
b77c6999424c5ce7e44086a12589a0ad43d6adca
|
01940ab0897aa6005764fc220b900e4d6161d36b
|
refs/heads/main
| 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 263 |
py
|
nome_mes = input('Qual o nome do mes?')
nome_meses = ['janeiro', 'fevereiro', 'março', 'abril', 'maio', 'junho', 'julho', 'agosto', 'setembro', 'outubro', 'novembro', 'dezembro']
i=0
while i < 12:
if nome_meses[i] == nome_mes :
print (i+1)
i += 1
|
[
"[email protected]"
] | |
0c25dd68f7abaaf858f4ffb5e3109947576dcbbe
|
2e3d63726c1d05b73b9cc22e5bcbead30246a8dc
|
/Facepad/wsgi.py
|
ac7aeeae354627af07c474d0569514805a849d2c
|
[] |
no_license
|
rolycg/tiny_social_network
|
041f6e4ab503bb82eca4cf1efb436d3b5250343a
|
e7ec45d053d291d53bd9d58bbb882b4b3edb6355
|
refs/heads/master
| 2021-01-10T04:27:16.344700 | 2016-03-23T18:19:49 | 2016-03-23T18:19:49 | 54,581,800 | 2 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 391 |
py
|
"""
WSGI config for Facepad project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "Facepad.settings")
application = get_wsgi_application()
|
[
"[email protected]"
] | |
689b90a020d0df47a96c87657ee3a6532ccac798
|
e7e5cc4353671d4cb410acf12fb3de92e8f4ac58
|
/machine-learning/tensorflow_test_SNH/SNH_run.py
|
b17e76f1094e00d56c971d60e846ec7c2695910c
|
[] |
no_license
|
zooniverse/hco-experiments
|
f10d128f4258e830098564477c66bfa2a13dc5d7
|
fb9f4e476f2402fd0d66fb770f6d1b5c433dafbf
|
refs/heads/master
| 2021-04-30T16:40:11.182769 | 2017-07-12T17:22:34 | 2017-07-12T17:22:34 | 80,105,222 | 4 | 2 | null | 2021-01-08T09:56:44 | 2017-01-26T10:27:23 |
Python
|
UTF-8
|
Python
| false | false | 4,846 |
py
|
# Train a simple CNN using Keras
import keras
from keras.datasets import cifar10
from keras.preprocessing.image import ImageDataGenerator
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation, Flatten
from keras.layers import Conv2D, MaxPooling2D
from keras.preprocessing.image import ImageDataGenerator, array_to_img
from keras.preprocessing.image import img_to_array, load_img
import numpy as np
import os
# Parameters
batch_size = 32
num_classes = 2
epochs = 15
data_augmentation = False
path_snh = "D:/Studium_GD/Zooniverse/Data/SNHuntersInception/images/"
# function to import one image
def import_one_image(path):
# this is a PIL image
img = load_img(path)
# this is a Numpy array with shape (3, x, y)
x = img_to_array(img)
# this is a Numpy array with shape (1, 3, x, y)
x = x.reshape((1,) + x.shape)
return x
# read all images from disk
real_files = []
for f in os.listdir(path_snh + 'real'):
# get path
real_files.append(f)
# get image
x = import_one_image(path_snh + 'real/' + f)
try:
x_real = np.vstack((x_real,x))
except:
x_real = x
# generate labels
y_real = [1 for i in range(0,len(real_files))]
bogus_files = []
for f in os.listdir(path_snh + 'bogus'):
# get path
bogus_files.append(f)
# get image
x = import_one_image(path_snh + 'bogus/' + f)
try:
x_bogus = np.vstack((x_bogus,x))
except:
x_bogus = x
# generate labels
y_bogus = [0 for i in range(0,len(bogus_files))]
# generate one big data set
x_data = np.vstack((x_real,x_bogus))
y_data = np.concatenate((y_real,y_bogus))
# generate train and test split using sklearn
from sklearn.model_selection import train_test_split
x_train, x_test, y_train, y_test = train_test_split(x_data, y_data,
test_size=0.33,
random_state=42)
print('x_train shape:', x_train.shape)
print(x_train.shape[0], 'train samples')
print(x_test.shape[0], 'test samples')
# Convert class vectors to binary class matrices.
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)
model = Sequential()
model.add(Conv2D(32, (3, 3), padding='same',
input_shape=x_train.shape[1:]))
model.add(Activation('relu'))
model.add(Conv2D(32, (3, 3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Conv2D(64, (3, 3), padding='same'))
model.add(Activation('relu'))
model.add(Conv2D(64, (3, 3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(512))
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(num_classes))
model.add(Activation('softmax'))
# initiate RMSprop optimizer
opt = keras.optimizers.rmsprop(lr=0.0001, decay=1e-6)
# Let's train the model using RMSprop
model.compile(loss='categorical_crossentropy',
optimizer=opt,
metrics=['accuracy'])
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
x_train /= 255
x_test /= 255
if not data_augmentation:
print('Not using data augmentation.')
model.fit(x_train, y_train,
batch_size=batch_size,
epochs=epochs,
validation_data=(x_test, y_test),
shuffle=True)
else:
print('Using real-time data augmentation.')
# This will do preprocessing and realtime data augmentation:
datagen = ImageDataGenerator(
featurewise_center=False, # set input mean to 0 over the dataset
samplewise_center=False, # set each sample mean to 0
featurewise_std_normalization=False, # divide inputs by std of the dataset
samplewise_std_normalization=False, # divide each input by its std
zca_whitening=False, # apply ZCA whitening
rotation_range=0, # randomly rotate images in the range (degrees, 0 to 180)
width_shift_range=0.1, # randomly shift images horizontally (fraction of total width)
height_shift_range=0.1, # randomly shift images vertically (fraction of total height)
horizontal_flip=True, # randomly flip images
vertical_flip=False) # randomly flip images
# Compute quantities required for feature-wise normalization
# (std, mean, and principal components if ZCA whitening is applied).
datagen.fit(x_train)
# Fit the model on the batches generated by datagen.flow().
model.fit_generator(datagen.flow(x_train, y_train,
batch_size=batch_size),
steps_per_epoch=x_train.shape[0] // batch_size,
epochs=epochs,
validation_data=(x_test, y_test))
|
[
"[email protected]"
] | |
67d5a8da55c644e91aa539b2449116477ff95e23
|
ba41dbc2183bd91e6e9a8669904b85f342775530
|
/mgmt/dump-function-calls.py
|
d5620c7a17ded957d08beb4af09694250bb26f38
|
[
"LicenseRef-scancode-warranty-disclaimer"
] |
no_license
|
fish2000/libimread
|
5d835f98083a897e1d0d9fde4f816cea4496e35f
|
781e2484559136de5171d577d54afa624ca4c8b4
|
refs/heads/master
| 2022-04-28T18:14:27.189975 | 2022-03-20T23:57:15 | 2022-03-20T23:57:15 | 30,621,253 | 7 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,434 |
py
|
#!/usr/bin/env python
# dump_function_calls.py:
# Originally from: https://github.com/smspillaz/scripts/blob/master/list_non_inlined_symbols/dump_function_calls.py
#
# Copyright (c) 2014 Sam Spilsbury <[email protected]>
# Licenced under the MIT Licence.
#
# Looks at the DWARF data for a library and dumps to stdout where
# functions are called
#
# Usage: dump_function_calls.py object [regex]
import re
import sys
import subprocess
def get_function_calls (objdump_output, regex):
function_calls = []
for line in objdump_output.split ("\n"):
if "callq" in line and "<" in line and ">" in line:
if regex is None or (regex is not None and regex.match (line) != None):
mangled = line.split ("<")[1]
if "@" in mangled:
mangled = mangled.split("@")[0]
elif "." in mangled:
mangled = mangled.split(".")[0]
call = subprocess.check_output (["c++filt", mangled])[:-1]
function_calls.append (call)
return set (function_calls)
if (len (sys.argv) < 2):
print "Usage: dump_function_calls.py object [regex]"
object = sys.argv[1];
regex = None
if (len (sys.argv) == 3):
regex = re.compile (sys.argv[2])
objdump_output = subprocess.check_output (["gobjdump", "-S", object])
function_calls = get_function_calls (objdump_output, regex)
for call in function_calls:
print call
|
[
"[email protected]"
] | |
9a8d1f2c92d9086aa11eccf289f7c95d6b8f29d0
|
b37d4c6ae5fa90c5afc6346088c272d3b7c8a37c
|
/backend/course/api/v1/viewsets.py
|
dafe5676cfef6a5d35ce42d0bb66ce7f52f6daf6
|
[] |
no_license
|
crowdbotics-apps/staeci-27357
|
3f18579fe25b97c64db661205c7398a8f27905e7
|
e63469a44549e38d3d01046fbf75c394ef4ec435
|
refs/heads/master
| 2023-04-28T20:35:58.657031 | 2021-05-24T22:53:33 | 2021-05-24T22:53:33 | 370,503,833 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,400 |
py
|
from rest_framework import authentication
from course.models import (
Recording,
Event,
Subscription,
Course,
Group,
Module,
PaymentMethod,
SubscriptionType,
Enrollment,
Lesson,
Category,
)
from .serializers import (
RecordingSerializer,
EventSerializer,
SubscriptionSerializer,
CourseSerializer,
GroupSerializer,
ModuleSerializer,
PaymentMethodSerializer,
SubscriptionTypeSerializer,
EnrollmentSerializer,
LessonSerializer,
CategorySerializer,
)
from rest_framework import viewsets
class CourseViewSet(viewsets.ModelViewSet):
serializer_class = CourseSerializer
authentication_classes = (
authentication.SessionAuthentication,
authentication.TokenAuthentication,
)
queryset = Course.objects.all()
class EventViewSet(viewsets.ModelViewSet):
serializer_class = EventSerializer
authentication_classes = (
authentication.SessionAuthentication,
authentication.TokenAuthentication,
)
queryset = Event.objects.all()
class GroupViewSet(viewsets.ModelViewSet):
serializer_class = GroupSerializer
authentication_classes = (
authentication.SessionAuthentication,
authentication.TokenAuthentication,
)
queryset = Group.objects.all()
class LessonViewSet(viewsets.ModelViewSet):
serializer_class = LessonSerializer
authentication_classes = (
authentication.SessionAuthentication,
authentication.TokenAuthentication,
)
queryset = Lesson.objects.all()
class SubscriptionViewSet(viewsets.ModelViewSet):
serializer_class = SubscriptionSerializer
authentication_classes = (
authentication.SessionAuthentication,
authentication.TokenAuthentication,
)
queryset = Subscription.objects.all()
class RecordingViewSet(viewsets.ModelViewSet):
serializer_class = RecordingSerializer
authentication_classes = (
authentication.SessionAuthentication,
authentication.TokenAuthentication,
)
queryset = Recording.objects.all()
class PaymentMethodViewSet(viewsets.ModelViewSet):
serializer_class = PaymentMethodSerializer
authentication_classes = (
authentication.SessionAuthentication,
authentication.TokenAuthentication,
)
queryset = PaymentMethod.objects.all()
class EnrollmentViewSet(viewsets.ModelViewSet):
serializer_class = EnrollmentSerializer
authentication_classes = (
authentication.SessionAuthentication,
authentication.TokenAuthentication,
)
queryset = Enrollment.objects.all()
class SubscriptionTypeViewSet(viewsets.ModelViewSet):
serializer_class = SubscriptionTypeSerializer
authentication_classes = (
authentication.SessionAuthentication,
authentication.TokenAuthentication,
)
queryset = SubscriptionType.objects.all()
class ModuleViewSet(viewsets.ModelViewSet):
serializer_class = ModuleSerializer
authentication_classes = (
authentication.SessionAuthentication,
authentication.TokenAuthentication,
)
queryset = Module.objects.all()
class CategoryViewSet(viewsets.ModelViewSet):
serializer_class = CategorySerializer
authentication_classes = (
authentication.SessionAuthentication,
authentication.TokenAuthentication,
)
queryset = Category.objects.all()
|
[
"[email protected]"
] | |
d07149fc3d7c5b9da5eb276482171723605c7fab
|
0eb1d32c5e2770ad6d6e8f09061e9d5a09f3c37d
|
/Order/migrations/0005_auto_20210211_0723.py
|
e8f4fb7d13903a1b3498f229ffb1fffe4aa16799
|
[] |
no_license
|
HadiGhazali/digikala
|
da4e993a590d17801b4bf3ce3be37f62e0fce5d8
|
4ccbf199b5c848e0e7550eb8ebaaeaa0bc096cd2
|
refs/heads/main
| 2023-03-12T13:00:22.000792 | 2021-03-04T14:38:25 | 2021-03-04T14:38:25 | 322,250,951 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 384 |
py
|
# Generated by Django 3.1.4 on 2021-02-11 07:23
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('Order', '0004_auto_20210209_1157'),
]
operations = [
migrations.AlterModelOptions(
name='payment',
options={'verbose_name': 'Payment', 'verbose_name_plural': 'Payments'},
),
]
|
[
"[email protected]"
] | |
cae3368e7e8e0c5c1919e14f58e1fb7b2e2170bf
|
d61a6df81bca12df0b743053643bc386d15a4b56
|
/Exercise05/5-47.py
|
2a7a135a3f62d27047e4a304ce8bbba0597e3f54
|
[
"Apache-2.0"
] |
permissive
|
ywyz/IntroducingToProgrammingUsingPython
|
9108bea2a79f8d24693144dc127aa6b32896121f
|
614d59eacb7e37aece871a00f7d1518f7de88708
|
refs/heads/master
| 2020-04-20T06:29:02.999774 | 2019-12-22T12:52:46 | 2019-12-22T12:52:46 | 168,685,428 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 613 |
py
|
'''
@Date: 2019-11-09 20:43:16
@Author: ywyz
@LastModifiedBy: ywyz
@Github: https://github.com/ywyz
@LastEditors: ywyz
@LastEditTime: 2019-11-09 20:52:15
'''
import turtle
import random
turtle.penup()
turtle.goto(-60, 50)
turtle.pendown()
turtle.forward(120)
turtle.right(90)
turtle.forward(100)
turtle.right(90)
turtle.forward(120)
turtle.right(90)
turtle.forward(100)
for n in range(10):
x = random.randrange(-60, 60)
y = random.randrange(-50, 50)
turtle.penup()
turtle.goto(x, y)
turtle.pendown()
turtle.color("red")
turtle.begin_fill()
turtle.circle(5)
turtle.end_fill()
|
[
"[email protected]"
] | |
ddbd6da647684177e27247cfbc0320dccf56315f
|
551dabfe10ea6778546f380d3ee9f0500f352b0f
|
/1.6 Data Split/1.6.2.1.py
|
755230539a32648d535e948fddd664f504a670e2
|
[] |
no_license
|
mohamedalemam/sklearn
|
c32843b6a04fe4b2a326870ba3658e8c1fc3b424
|
387c21357b165a1bc3593f8303ac5f9a672bd62a
|
refs/heads/main
| 2022-12-29T15:38:06.534505 | 2020-10-17T21:26:59 | 2020-10-17T21:26:59 | 301,839,374 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 747 |
py
|
#Import Libraries
from sklearn.model_selection import KFold
#----------------------------------------------------
#KFold Splitting data
kf = KFold(n_splits=4, random_state=44, shuffle =True)
#KFold Data
for train_index, test_index in kf.split(X):
print('Train Data is : \n', train_index)
print('Test Data is : \n', test_index)
print('-------------------------------')
X_train, X_test = X[train_index], X[test_index]
y_train, y_test = y[train_index], y[test_index]
print('X_train Shape is ' , X_train.shape)
print('X_test Shape is ' , X_test.shape)
print('y_train Shape is ' ,y_train.shape)
print('y_test Shape is ' , y_test.shape)
print('========================================')
|
[
"[email protected]"
] | |
1bb76202345c93a96430e6d954e9f8e763c960ab
|
3b00f143f858c495df46cf88a27a18bee388c6b6
|
/Dynamic Programming/edit_distance.py
|
6e0ae8490c463511868684b515372c7cfc18406a
|
[] |
no_license
|
deepcpatel/data_structures_and_algorithms
|
5a4d70e9d74e933a16b132d6701a0dc6b19d9203
|
2c7849f1e31c5c60924eb70830e524b44306d652
|
refs/heads/master
| 2023-01-01T04:47:19.704199 | 2020-10-27T17:35:48 | 2020-10-27T17:35:48 | 307,295,841 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 995 |
py
|
# Link: https://leetcode.com/problems/edit-distance/
# Not my Solution
'''
Explanation:
Define dp(i, j) as the minimum number of operations required to convert word1[i:] to word2[j:].
Then we'll get the recursion below:
dp(i, j) = dp(i + 1, j + 1) if word1[i] == word2[j]
dp(i, j) = 1 + min(dp(i + 1, j + 1), dp(i, j + 1), dp(i + 1, j)) otherwise
'''
from functools import lru_cache
class Solution:
def minDistance(self, word1: str, word2: str) -> int:
@lru_cache(None)
def dp(i, j):
if i >= m:
return n - j # insert the rest of word2[j:]
elif j >= n:
return m - i # delete the rest of word1[i:]
if word1[i] == word2[j]:
return dp(i + 1, j + 1)
# replace, insert, delete, respectively
return 1 + min(dp(i + 1, j + 1), dp(i, j + 1), dp(i + 1, j))
m, n = map(len, (word1, word2))
word1, word2 = map(list, (word1, word2))
return dp(0, 0)
|
[
"[email protected]"
] | |
3e1821a70e48e45f7d23a8a598e644fd55aa3267
|
ffeacff13af906bf5e7a02018a2543902f5dc8ef
|
/01-Python核心编程/代码/04-数据序列/02-列表/hm_02_查找.py
|
507751dc98470c528285216532fc445039e51cd8
|
[
"MIT"
] |
permissive
|
alikslee/Python-itheima-2019
|
457080ee83d0f5f7eaba426da0ea86405d2d5248
|
691035d5ff0e362139c7dbe82f730ec0e060fd2e
|
refs/heads/main
| 2023-01-01T16:27:20.062463 | 2020-10-22T16:20:29 | 2020-10-22T16:20:29 | 305,959,901 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 229 |
py
|
name_list = ['TOM', 'Lily', 'ROSE']
# 1. index()
# print(name_list.index('TOM'))
# print(name_list.index('TOMS'))
# 2. count()
# print(name_list.count('TOM'))
# print(name_list.count('TOMS'))
# 3.len()
print(len(name_list))
|
[
"[email protected]"
] | |
ceb2448af505fefb48dabe7e0516eaf2d46eb5a4
|
a8769709aeb7299fa3757f0e7bba5c617eb8cfe3
|
/lesson-3/k8s/lib/python2.7/site-packages/kubernetes/client/models/v1beta1_api_service_condition.py
|
d851edd6c77101268757fb7d42c4ef39c4686464
|
[
"Apache-2.0"
] |
permissive
|
simox-83/workshop-k8s
|
2ac5e8b282bb7c3337acc726a7d972717bf649cc
|
04cb18e8b5925a3cfd84ca316952a6cb64960b31
|
refs/heads/master
| 2020-03-31T20:52:21.421995 | 2018-10-11T14:43:08 | 2018-10-11T14:43:08 | 152,558,678 | 0 | 0 |
Apache-2.0
| 2018-10-11T08:37:20 | 2018-10-11T08:37:20 | null |
UTF-8
|
Python
| false | false | 6,769 |
py
|
# coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.11.3
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class V1beta1APIServiceCondition(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'last_transition_time': 'datetime',
'message': 'str',
'reason': 'str',
'status': 'str',
'type': 'str'
}
attribute_map = {
'last_transition_time': 'lastTransitionTime',
'message': 'message',
'reason': 'reason',
'status': 'status',
'type': 'type'
}
def __init__(self, last_transition_time=None, message=None, reason=None, status=None, type=None):
"""
V1beta1APIServiceCondition - a model defined in Swagger
"""
self._last_transition_time = None
self._message = None
self._reason = None
self._status = None
self._type = None
self.discriminator = None
if last_transition_time is not None:
self.last_transition_time = last_transition_time
if message is not None:
self.message = message
if reason is not None:
self.reason = reason
self.status = status
self.type = type
@property
def last_transition_time(self):
"""
Gets the last_transition_time of this V1beta1APIServiceCondition.
Last time the condition transitioned from one status to another.
:return: The last_transition_time of this V1beta1APIServiceCondition.
:rtype: datetime
"""
return self._last_transition_time
@last_transition_time.setter
def last_transition_time(self, last_transition_time):
"""
Sets the last_transition_time of this V1beta1APIServiceCondition.
Last time the condition transitioned from one status to another.
:param last_transition_time: The last_transition_time of this V1beta1APIServiceCondition.
:type: datetime
"""
self._last_transition_time = last_transition_time
@property
def message(self):
"""
Gets the message of this V1beta1APIServiceCondition.
Human-readable message indicating details about last transition.
:return: The message of this V1beta1APIServiceCondition.
:rtype: str
"""
return self._message
@message.setter
def message(self, message):
"""
Sets the message of this V1beta1APIServiceCondition.
Human-readable message indicating details about last transition.
:param message: The message of this V1beta1APIServiceCondition.
:type: str
"""
self._message = message
@property
def reason(self):
"""
Gets the reason of this V1beta1APIServiceCondition.
Unique, one-word, CamelCase reason for the condition's last transition.
:return: The reason of this V1beta1APIServiceCondition.
:rtype: str
"""
return self._reason
@reason.setter
def reason(self, reason):
"""
Sets the reason of this V1beta1APIServiceCondition.
Unique, one-word, CamelCase reason for the condition's last transition.
:param reason: The reason of this V1beta1APIServiceCondition.
:type: str
"""
self._reason = reason
@property
def status(self):
"""
Gets the status of this V1beta1APIServiceCondition.
Status is the status of the condition. Can be True, False, Unknown.
:return: The status of this V1beta1APIServiceCondition.
:rtype: str
"""
return self._status
@status.setter
def status(self, status):
"""
Sets the status of this V1beta1APIServiceCondition.
Status is the status of the condition. Can be True, False, Unknown.
:param status: The status of this V1beta1APIServiceCondition.
:type: str
"""
if status is None:
raise ValueError("Invalid value for `status`, must not be `None`")
self._status = status
@property
def type(self):
"""
Gets the type of this V1beta1APIServiceCondition.
Type is the type of the condition.
:return: The type of this V1beta1APIServiceCondition.
:rtype: str
"""
return self._type
@type.setter
def type(self, type):
"""
Sets the type of this V1beta1APIServiceCondition.
Type is the type of the condition.
:param type: The type of this V1beta1APIServiceCondition.
:type: str
"""
if type is None:
raise ValueError("Invalid value for `type`, must not be `None`")
self._type = type
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, V1beta1APIServiceCondition):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
|
[
"[email protected]"
] | |
bf04013c6f0f8ddcd9f3d419707fd2261e896529
|
3fadf661bd3ad1cc530d9197f157b3a4255034b6
|
/romani/migrations/0023_auto_20170107_2116.py
|
df7334c894a9469ccede7ea6d450e969024a0d15
|
[] |
no_license
|
wkov/labiadas
|
69b1c69063edafe4f16467df53f58969c63db5c7
|
bb32de63cbbea951a0df49bc0bf800caaca7fc35
|
refs/heads/master
| 2022-11-28T11:46:55.735429 | 2020-01-11T18:13:37 | 2020-01-11T18:13:37 | 84,640,857 | 0 | 1 | null | 2022-11-22T03:03:08 | 2017-03-11T10:09:42 |
JavaScript
|
UTF-8
|
Python
| false | false | 520 |
py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('romani', '0022_auto_20170107_2114'),
]
operations = [
migrations.RemoveField(
model_name='comanda',
name='data_fi',
),
migrations.AddField(
model_name='contracte',
name='data_fi',
field=models.DateTimeField(blank=True, null=True),
),
]
|
[
"[email protected]"
] | |
6a9e91b65c2d88c02d3a75ffe8f476040710e5f7
|
a5020f97ac75e480e2ac386daf758df33289b8f9
|
/manage.py
|
af14ecd321e4edfa57d8336d47993a5852b335af
|
[] |
no_license
|
emakarov/ovarenik
|
c7adcbd4b771f3264168b2ae3c2438302a7ff520
|
466e55645da223163bd065939fcbac068f16a043
|
refs/heads/master
| 2020-04-11T09:54:25.310143 | 2014-06-22T22:46:36 | 2014-06-22T22:46:36 | 12,011,538 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 251 |
py
|
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "ovarenik.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
|
[
"[email protected]"
] | |
78e397b8a58f688d5430a680877377c3a2db36f3
|
73b5d880fa06943c20ff0a9aee9d0c1d1eeebe10
|
/tinyos-1.x/contrib/t-mac/apps/taskthread.py
|
aaf638497ee5b76570d3204810ec2548a90fdc4e
|
[
"Intel"
] |
permissive
|
x3ro/tinyos-legacy
|
101d19f9e639f5a9d59d3edd4ed04b1f53221e63
|
cdc0e7ba1cac505fcace33b974b2e0aca1ccc56a
|
refs/heads/master
| 2021-01-16T19:20:21.744228 | 2015-06-30T20:23:05 | 2015-06-30T20:23:05 | 38,358,728 | 0 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 711 |
py
|
import threading
class TaskThread(threading.Thread):
"""Thread that executes a task every N seconds"""
def __init__(self):
threading.Thread.__init__(self)
self._finished = threading.Event()
self._interval = 15.0
def setInterval(self, interval):
"""Set the number of seconds we sleep between executing our task"""
self._interval = interval
def shutdown(self):
"""Stop this thread"""
print "shutting down",self
self._finished.set()
def run(self):
while 1:
if self._finished.isSet(): return
self.task()
# sleep for interval or until shutdown
self._finished.wait(self._interval)
def task(self):
"""The task done by this thread - override in subclasses"""
pass
|
[
"[email protected]"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.