blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
288
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 684
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 25
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 128
12.7k
| extension
stringclasses 142
values | content
stringlengths 128
8.19k
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
511fe8b79650e5129209a33e6c7d768af423c6e6
|
2a1f4c4900693c093b2fcf4f84efa60650ef1424
|
/py/dome/backend/apps.py
|
fc8e9e1db58cfc9dbc955eb7df36461f862fe2b5
|
[
"BSD-3-Clause"
] |
permissive
|
bridder/factory
|
b925f494303728fa95017d1ba3ff40ac5cf6a2fd
|
a1b0fccd68987d8cd9c89710adc3c04b868347ec
|
refs/heads/master
| 2023-08-10T18:51:08.988858 | 2021-09-21T03:25:28 | 2021-09-21T03:25:28 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 294 |
py
|
# Copyright 2016 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from __future__ import unicode_literals
from django.apps import AppConfig
class BackendConfig(AppConfig):
name = 'backend'
|
[
"[email protected]"
] | |
bfa4051b7daa99e35be4c69d94d185b37ba84f1b
|
53fab060fa262e5d5026e0807d93c75fb81e67b9
|
/backup/user_035/ch37_2020_03_25_14_04_04_120072.py
|
a165e2f3c23563f7b30d6684819d8aca366bc2cd
|
[] |
no_license
|
gabriellaec/desoft-analise-exercicios
|
b77c6999424c5ce7e44086a12589a0ad43d6adca
|
01940ab0897aa6005764fc220b900e4d6161d36b
|
refs/heads/main
| 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 174 |
py
|
Senha = True
resposta = input("Qual é a senha")
while Senha:
if resposta=="desisto":
Senha = False
else:
Senha = True
return resposta
print("Você acertou a senha!")
|
[
"[email protected]"
] | |
e56f0bd33da3d74267fd6ab2971ead15aa9263b8
|
1c488f486d14c19e19af1a46474af224498be193
|
/experimental/serengeti/blankIBCC.py
|
649a35a733279dc7605d90eb8296b4e245101794
|
[
"Apache-2.0"
] |
permissive
|
JiaminXuan/aggregation
|
fc2117494372428adeed85a9a413e2ff47244664
|
9a7ecbc2d4b143a73e48b1826b3727b6976fa770
|
refs/heads/master
| 2020-12-11T01:49:42.977664 | 2015-05-22T16:21:15 | 2015-05-22T16:21:15 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 5,087 |
py
|
#!/usr/bin/env python
__author__ = 'greghines'
import numpy as np
import matplotlib.pyplot as plt
import csv
import sys
import os
import pymongo
import matplotlib.cbook as cbook
sys.path.append("/home/greg/github/pyIBCC/python")
import ibcc
client = pymongo.MongoClient()
db = client['serengeti_2014-07-28']
collection = db["serengeti_classifications"]
collection2 = db["serengeti_subjects"]
subjects = []
users = []
classifications = []
class_count = {}
blank_count = {}
retiredBlanks = {}
with open("/home/greg/Databases/serengeti_ibcc.py","wb") as f:
f.write("import numpy as np\n")
f.write("scores = np.array([0,1])\n")
f.write("nScores = len(scores)\n")
f.write("nClasses = 2\n")
f.write("inputFile = \"/home/greg/Databases/serengeti_ibcc.csv\"\n")
f.write("outputFile = \"/home/greg/Databases/serengeti_ibcc.out\"\n")
f.write("confMatFile = \"/home/greg/Databases/serengeti_ibcc.mat\"\n")
f.write("nu0 = np.array([30,70])\n")
f.write("alpha0 = np.array([[3, 1], [1,3]])\n")
with open("/home/greg/Databases/serengeti_ibcc.csv","wb") as f:
f.write("a,b,c\n")
import datetime
def update(individual_classifications):
#start by removing all temp files
try:
os.remove("/home/greg/Databases/serengeti_ibcc.out")
except OSError:
pass
try:
os.remove("/home/greg/Databases/serengeti_ibcc.mat")
except OSError:
pass
try:
os.remove("/home/greg/Databases/serengeti_ibcc.csv.dat")
except OSError:
pass
with open("/home/greg/Databases/serengeti_ibcc.csv","a") as f:
for u, s, b in individual_classifications:
f.write(str(u)+","+str(s)+","+str(b)+"\n")
print datetime.datetime.time(datetime.datetime.now())
ibcc.runIbcc("/home/greg/Databases/serengeti_ibcc.py")
print datetime.datetime.time(datetime.datetime.now())
def analyze():
with open("/home/greg/Databases/serengeti_ibcc.out","rb") as f:
reader = csv.reader(f,delimiter=" ")
for subject_index,p0,p1 in reader:
subject_index = int(float(subject_index))
subject_id = subjects[subject_index]
c = class_count[subject_id]
if (float(p1) >= 0.995) and (c>= 2):
if not(subject_id in retiredBlanks):
retiredBlanks[subject_id] = c
#print str(c) + " :: " + str(p1)
i = 0
unknownUsers = []
for r in collection.find({"tutorial": {"$ne": True}}):
try:
user_name = r["user_name"]
except KeyError:
unknownUsers.append(r["user_ip"])
continue
zooniverse_id = r["subjects"][0]["zooniverse_id"]
if zooniverse_id in retiredBlanks:
continue
if ((i%10000) == 0) and (i > 0):
print i
update(classifications)
classifications = []
analyze()
if not(user_name in users):
users.append(user_name)
if not(zooniverse_id in subjects):
subjects.append(zooniverse_id)
class_count[zooniverse_id] = 0
blank_count[zooniverse_id] = 0
i += 1
user_index = users.index(user_name)
subject_index = subjects.index(zooniverse_id)
class_count[zooniverse_id] += 1
a = r["annotations"]
if not("nothing" in a[-1]):
assert('species' in a[0])
blank = 0
else:
blank = 1
blank_count[zooniverse_id] += 1
classifications.append((user_index,subject_index,blank))
if i >= 300000:
break
#print len(unknownUsers)
#print len(list(set(unknownUsers)))
tBlank = 0
fBlank = 0
speciesList = ['blank','elephant','zebra','warthog','impala','buffalo','wildebeest','gazelleThomsons','dikDik','giraffe','gazelleGrants','lionFemale','baboon','hippopotamus','ostrich','human','otherBird','hartebeest','secretaryBird','hyenaSpotted','mongoose','reedbuck','topi','guineaFowl','eland','aardvark','lionMale','porcupine','koriBustard','bushbuck','hyenaStriped','jackal','cheetah','waterbuck','leopard','reptiles','serval','aardwolf','vervetMonkey','rodents','honeyBadger','batEaredFox','rhinoceros','civet','genet','zorilla','hare','caracal','wildcat']
errors = {s.lower():0 for s in speciesList}
for zooniverse_id in retiredBlanks:
r = collection2.find_one({"zooniverse_id" : zooniverse_id})
retire_reason = r["metadata"]["retire_reason"]
if retire_reason in ["blank", "blank_consensus"]:
tBlank += 1
else:
fBlank += 1
print zooniverse_id + " :: " + str(r["location"]["standard"][0])
f = max(r["metadata"]["counters"].items(), key = lambda x:x[1])
print f
try:
errors[f[0].lower()] += 1
print str(blank_count[zooniverse_id]) + "/" + str(class_count[zooniverse_id])
except KeyError:
print "---***"
#print str(r["metadata"]["counters"].values())
print "==---"
print tBlank
print fBlank
print np.mean(retiredBlanks.values())
print np.median(retiredBlanks.values())
print "===---"
for s in speciesList:
if errors[s.lower()] != 0:
print s + " - " + str(errors[s.lower()])
|
[
"[email protected]"
] | |
52b11a09076f3904dc2f45e1e998edf62a885d87
|
aae0432eede626a0ac39ff6d81234e82f8d678c2
|
/leetcode/algorithm/4.median-of-two-sorted-arrays.py
|
63670a63bf49ee10613895df33ff3b9ae3388fc8
|
[] |
no_license
|
KIDJourney/algorithm
|
81c00186a6dfdc278df513d25fad75c78eb1bf68
|
e1cf8e12050b9f1419a734ff93f9c626fc10bfe0
|
refs/heads/master
| 2022-11-24T09:30:16.692316 | 2022-11-06T09:33:51 | 2022-11-06T09:33:51 | 40,428,125 | 2 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,035 |
py
|
#
# @lc app=leetcode id=4 lang=python3
#
# [4] Median of Two Sorted Arrays
#
# @lc code=start
class Solution:
def findMedianSortedArrays(self, nums1, nums2) -> float:
return self.easy(nums1, nums2)
def easy(self, nums1, nums2):
result = []
idx1, idx2 = 0, 0
while True:
if idx1 == len(nums1) and idx2 == (len(nums2)):
break
if idx1 == len(nums1):
result.append(nums2[idx2])
idx2 += 1
continue
if idx2 == len(nums2):
result.append(nums1[idx1])
idx1 += 1
continue
if nums1[idx1] > nums2[idx2]:
result.append(nums2[idx2])
idx2 += 1
else:
result.append(nums1[idx1])
idx1 += 1
mid = len(result) // 2
if len(result) % 2 == 0:
return (result[mid] + result[mid-1]) / 2.0
else:
return (result[mid])
# @lc code=end
|
[
"[email protected]"
] | |
3fd8971af0057cfe6f9120d8654640df8c971099
|
99e76e9e4c8031418c4c50217b48adf1d880cf2f
|
/setup.py
|
6974fdc5b21fd1b544eac798d4363569ad4198d7
|
[
"MIT"
] |
permissive
|
grow/grow-ext-responsive-styles
|
d75a5abb070613641e3da9f3f4cf7dc07e88c51f
|
bb3d8f68edc1f3e1bdf508bb5df8d5b296574e9b
|
refs/heads/master
| 2021-01-03T14:04:15.882718 | 2020-05-20T20:38:09 | 2020-05-20T20:38:09 | 240,096,948 | 0 | 0 |
MIT
| 2020-05-20T20:34:58 | 2020-02-12T19:27:42 |
HTML
|
UTF-8
|
Python
| false | false | 349 |
py
|
from setuptools import setup
setup(
name='grow-ext-responsive-styles',
version='1.0.0',
zip_safe=False,
license='MIT',
author='Grow Authors',
author_email='[email protected]',
include_package_data=True,
packages=[
'responsive_styles',
],
package_data={
'responsive_styles': ['*.html'],
},
)
|
[
"[email protected]"
] | |
4f21bdabf36e65773d6c9289dad471ce6aa16e31
|
178ae62be7de20a50f96361e80bdcff5a5493ae2
|
/koica/templatetags/koica.py
|
36b3a706fcb6f684e4f9896f13b5cc8b25353d75
|
[
"MIT"
] |
permissive
|
synw/django-koica
|
a043800c15fad69f2024557e62fcf0ac4808ffae
|
d8b1c9fa70c428f0aa0db0c523524e9d2ef27377
|
refs/heads/master
| 2021-01-10T03:15:24.570691 | 2015-12-09T14:55:29 | 2015-12-09T14:55:29 | 46,188,691 | 0 | 2 | null | null | null | null |
UTF-8
|
Python
| false | false | 200 |
py
|
from django import template
from koica.utils import sanitize_html
register = template.Library()
@register.filter(is_safe=True)
def remove_pre(value):
return sanitize_html(value, remove_pre=True)
|
[
"[email protected]"
] | |
8e86bc7463a15ee8ba540cebbdc6dbebe01e0474
|
461d7bf019b9c7a90d15b3de05891291539933c9
|
/bip_utils/bip39/bip39_entropy_generator.py
|
47c75cf8f3c76ff3b2cb1f678605ec4780e1d6e9
|
[
"MIT"
] |
permissive
|
renauddahou/bip_utils
|
5c21503c82644b57ddf56735841a21b6306a95fc
|
b04f9ef493a5b57983412c0ce460a9ca05ee1f50
|
refs/heads/master
| 2023-07-16T05:08:45.042084 | 2021-08-19T09:33:03 | 2021-08-19T09:33:03 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,419 |
py
|
# Copyright (c) 2021 Emanuele Bellocchia
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
# Imports
import os
from enum import IntEnum, unique
from typing import List, Union
@unique
class Bip39EntropyBitLen(IntEnum):
""" Enumerative for BIP-0039 entropy bit lengths. """
BIT_LEN_128 = 128,
BIT_LEN_160 = 160,
BIT_LEN_192 = 192,
BIT_LEN_224 = 224,
BIT_LEN_256 = 256,
class Bip39EntropyGeneratorConst:
""" Class container for BIP39 entropy generator constants. """
# Accepted entropy lengths in bit
ENTROPY_BIT_LEN: List[Bip39EntropyBitLen] = [
Bip39EntropyBitLen.BIT_LEN_128,
Bip39EntropyBitLen.BIT_LEN_160,
Bip39EntropyBitLen.BIT_LEN_192,
Bip39EntropyBitLen.BIT_LEN_224,
Bip39EntropyBitLen.BIT_LEN_256,
]
class Bip39EntropyGenerator:
""" Entropy generator class. It generates random entropy bytes with the specified length. """
def __init__(self,
bits_len: Union[int, Bip39EntropyBitLen]) -> None:
""" Construct class by specifying the bits length.
Args:
bits_len (int or Bip39EntropyBitLen): Entropy length in bits
Raises:
ValueError: If the bit length is not valid
"""
if not self.IsValidEntropyBitLen(bits_len):
raise ValueError("Entropy bit length is not valid (%d)" % bits_len)
self.m_bits_len = bits_len
def Generate(self) -> bytes:
""" Generate random entropy bytes with the length specified during construction.
Returns:
bytes: Generated entropy bytes
"""
return os.urandom(self.m_bits_len // 8)
@staticmethod
def IsValidEntropyBitLen(bits_len: Union[int, Bip39EntropyBitLen]) -> bool:
""" Get if the specified entropy bit length is valid.
Args:
bits_len (int or Bip39EntropyBitLen): Entropy length in bits
Returns:
bool: True if valid, false otherwise
"""
return bits_len in Bip39EntropyGeneratorConst.ENTROPY_BIT_LEN
@staticmethod
def IsValidEntropyByteLen(bytes_len: int) -> bool:
""" Get if the specified entropy byte length is valid.
Args:
bytes_len (int): Entropy length in bytes
Returns:
bool: True if valid, false otherwise
"""
return Bip39EntropyGenerator.IsValidEntropyBitLen(bytes_len * 8)
|
[
"[email protected]"
] | |
70b411ba66521bde662ff464e6ab782442fa0581
|
1508f7da93705839660e4fdfb87df7a9664bf087
|
/a10API/a10API/flask/bin/migrate
|
bff34539b04e8d820b8b866d8ef3ee3bbc9995fb
|
[] |
no_license
|
Younglu125/A10_Networks
|
1a1ecebb28dd225f6a1f901a7c28350300df356d
|
78a177ae4c8638d58dc873e4b1c589a1d5aaa717
|
refs/heads/master
| 2020-06-17T00:35:30.325740 | 2016-03-21T18:17:30 | 2016-03-21T18:17:30 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 354 |
#!/home/echou/a10API/flask/bin/python
# EASY-INSTALL-ENTRY-SCRIPT: 'sqlalchemy-migrate==0.7.2','console_scripts','migrate'
__requires__ = 'sqlalchemy-migrate==0.7.2'
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.exit(
load_entry_point('sqlalchemy-migrate==0.7.2', 'console_scripts', 'migrate')()
)
|
[
"[email protected]"
] | ||
3ad99e3d7e9841da8f65b2003210f661dc96df4a
|
0296bc69a0d9608ed826ad7a719395f019df098f
|
/Tools/Compare_images.py
|
f4ba586d2dfa3fcae52e277676f2b4a82ffdf59a
|
[] |
no_license
|
jcn16/Blender_HDRmap_render
|
c0486a77e04c5b41a6f75f123dbdb3d10c682367
|
50e6cdb79fef83081de9830e7105dd425a235a9e
|
refs/heads/main
| 2023-07-19T22:22:53.622052 | 2021-08-20T06:29:10 | 2021-08-20T06:29:10 | 377,757,283 | 2 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,771 |
py
|
import cv2
import numpy as np
def tianchong(img):
m = img.shape[0]
n = img.shape[1]
append = int(np.ceil(abs(m - n) / 2))
if m > n:
constant = cv2.copyMakeBorder(img, 0, 0, append, append, cv2.BORDER_CONSTANT, value=(0, 0, 0))
else:
constant = cv2.copyMakeBorder(img, append, append, 0, 0, cv2.BORDER_CONSTANT, value=(0, 0, 0))
constant = cv2.resize(constant, (512, 512))
return constant
def compare():
image_1=cv2.imread('/media/jcn/新加卷/JCN/JCN_test_datset/RayTracing/Train_HDR_512/126111539900259-h/0_-32_1_dikhololo_sunset_8k_324/raytracing.png')
mask_1=cv2.imread('/media/jcn/新加卷/JCN/JCN_test_datset/RayTracing/Train_HDR_512/126111539900259-h/0_-32_1_dikhololo_sunset_8k_324/alpha.png')
image_1=tianchong(image_1)
mask_1=tianchong(mask_1)
image_2=cv2.imread('/media/jcn/新加卷/JCN/JCN_test_datset/RayTracing/Train_HDR_512/126111539900259-h/0_-32_1_dikhololo_sunset_8k_324/shading.png')
image_1=image_1/255.0*mask_1/255.0
image_2=image_2/255.0*mask_1/255.0
cv2.imshow('image_1',np.asarray(image_1*255,dtype=np.uint8))
cv2.imshow('image_2',np.asarray(image_2*255,dtype=np.uint8))
res=np.asarray(np.clip((image_1-image_2)*255,0,255),dtype=np.uint8)
cv2.imshow('res',res)
cv2.waitKey(0)
def composite():
shading=cv2.imread('/media/jcn/新加卷/JCN/RelightHDR/TEST/images_high_res/10/raytracing.png')
albedo=cv2.imread('/home/jcn/桌面/Oppo/Results_albedo/10/p_albedo.png')
mask=cv2.imread('/home/jcn/桌面/Oppo/Results_albedo/10/gt_mask.png')
relight=albedo/255.0*shading/255.0*mask/255.0
relight=np.asarray(relight*255,dtype=np.uint8)
cv2.imshow('relight',relight)
cv2.waitKey(0)
if __name__=='__main__':
compare()
|
[
"[email protected]"
] | |
f7675475bf4180ae4b05a6af1aebe4521077a136
|
e131e752d826ae698e12e7bc0583362741f9d942
|
/AWS.py
|
c886890f56cf208b48066e6c151d54611fc0b574
|
[] |
no_license
|
abalberchak/TouchFace
|
ba30565be91b848126524aa47377789253370e04
|
d093ece8890b68c72e0855a024d908105df99b94
|
refs/heads/master
| 2021-01-11T01:43:35.067808 | 2016-09-29T03:41:13 | 2016-09-29T03:41:13 | 69,530,129 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,318 |
py
|
#----------------------------------------- Intent Schema Below:------------------------------
{
"intents": [
{
"intent": "AMAZON.ResumeIntent"
},
{
"intent": "AMAZON.PauseIntent"
},
{
"intent": "DojoInfoIntent"
},
{
"intent": "AMAZON.HelpIntent"
},
{
"intent": "AMAZON.StopIntent"
},
{
"intent": "TextBrendenIntent"
},
{
"intent": "GetTouchFaceIntent"
},
{
"intent": "DojoBrendenIntent"
},
{
"intent": "AskBrendan"
},
{
"intent": "twilioIntent"
},
{
"intent": "GroupTextIntent",
"slots": [
{
"name": "Name",
"type": "MEMBERS"
}
]
}
]
}
#----------------------------------------- Utterances Below:------------------------------
DojoInfoIntent what is the coding dojo
DojoInfoIntent tell me about the coding dojo
TextBrendenIntent Text Brendan
GetTouchFaceIntent Tell what does Brenden say
DojoBrendenIntent who is brenden
AskBrendan what is touchface
twilioIntent hi annet
GroupTextIntent text {Name}
|
[
"[email protected]"
] | |
af935ba661ffbdb6c3921e41c3c65c2ba9235ccd
|
843d9f17acea5cfdcc5882cf8b46da82160c251c
|
/adafruit_stepper.py
|
8e9319c17ea13b32312acbe50d018791ab2ea40a
|
[] |
no_license
|
gunny26/raspberry
|
7c1da63785c86412af9fa467ea231b19a97f4384
|
e4eb0d2f537b319d41b6c50b59e69fb297c62d25
|
refs/heads/master
| 2016-09-06T14:02:30.122102 | 2014-01-29T16:31:08 | 2014-01-29T16:31:08 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,335 |
py
|
import RPi.GPIO as GPIO
import time
GPIO.cleanup()
GPIO.setmode(GPIO.BCM)
enable_pin = 18
coil_A_1_pin = 4
coil_A_2_pin = 17
coil_B_1_pin = 23
coil_B_2_pin = 24
GPIO.setup(enable_pin, GPIO.OUT)
GPIO.setup(coil_A_1_pin, GPIO.OUT)
GPIO.setup(coil_A_2_pin, GPIO.OUT)
GPIO.setup(coil_B_1_pin, GPIO.OUT)
GPIO.setup(coil_B_2_pin, GPIO.OUT)
GPIO.output(enable_pin, 1)
def forward(delay, steps):
for i in range(0, steps):
setStep(1, 0, 1, 0)
time.sleep(delay)
setStep(0, 1, 1, 0)
time.sleep(delay)
setStep(0, 1, 0, 1)
time.sleep(delay)
setStep(1, 0, 0, 1)
time.sleep(delay)
def backwards(delay, steps):
for i in range(0, steps):
setStep(1, 0, 0, 1)
time.sleep(delay)
setStep(0, 1, 0, 1)
time.sleep(delay)
setStep(0, 1, 1, 0)
time.sleep(delay)
setStep(1, 0, 1, 0)
time.sleep(delay)
def setStep(w1, w2, w3, w4):
GPIO.output(coil_A_1_pin, w1)
GPIO.output(coil_A_2_pin, w2)
GPIO.output(coil_B_1_pin, w3)
GPIO.output(coil_B_2_pin, w4)
while True:
try:
delay = raw_input("Delay between steps (milliseconds)?")
steps = raw_input("How many steps forward? ")
forward(int(delay) / 1000.0, int(steps))
steps = raw_input("How many steps backwards? ")
backwards(int(delay) / 1000.0, int(steps))
except KeyboardInterrupt:
GPIO.cleanup()
|
[
"[email protected]"
] | |
a7072cf5db1b5527272336c6191bab4e1770b928
|
c840f190b3540bf212de2c70563e57da278fa9cb
|
/hyacinth.py
|
055e735da50162825883a5c29dfd69fcd0f7242d
|
[] |
no_license
|
edelooff/hyacinth
|
b768a871d476dd120f7d2d1acb039a6a9ebf2e19
|
0a6dd15fa1b1357afa566f924ad27b744582464b
|
refs/heads/master
| 2022-04-16T13:24:18.986246 | 2020-04-01T08:15:36 | 2020-04-01T08:15:36 | 251,756,604 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 6,538 |
py
|
from collections import (
Counter,
defaultdict)
import random
import re
import sys
DESIGN = re.compile(r'''
(?P<design>[A-Z])
(?P<size>[SL])
(?P<flowers>(:?\d+[a-z])*) # The specification is fuzzy on 1+ or 0+
(?P<total>\d+)''', re.VERBOSE)
DESIGN_FLOWER = re.compile(r'''
(?P<count>\d+)
(?P<species>[a-z])''', re.VERBOSE)
class Pool:
def __init__(self):
self.common_species = set()
self.designers = []
self.flowers = Counter()
def add_designer(self, designer):
"""Adds a BouquetDesigner for the pool size.
It also updates the set of known required species, allowing better
picking of 'filler' flowers for requested bouquets.
"""
self.designers.append(designer)
self.common_species |= designer.required_flowers.keys()
def add_flower(self, species):
"""Adds a flower of given species to the pool of available flowers."""
self.flowers[species] += 1
for designer in self.designers:
if designer.add(species):
print(self.create_bouquet(designer))
def create_bouquet(self, designer):
"""Creates a bouquet according to the given designers design.
After creating the bouquet, other designers are informed of the
removal of flower species from the shared pool.
"""
bouquet = designer.create(self.flowers, self.common_species)
bouquet_string = designer.stringify_bouquet(bouquet)
for bundle in bouquet.items():
for designer in self.designers:
designer.remove(*bundle)
return bouquet_string
class BouquetDesigner:
def __init__(self, design, flower_size, required_flowers, bouquet_size):
self.design = design
self.flower_size = flower_size
self.bouquet_size = bouquet_size
self.required_flowers = required_flowers
self.filler_quantity = bouquet_size - sum(required_flowers.values())
self.available_filler = 0
self.available_flowers = Counter()
def add(self, species):
"""Adds a species of flower to the local availability cache.
In addition. this will check whether a bouquet can be created based on
the recently seen flowers. If one can be created, this returns True.
"""
if species in self.required_flowers:
self.available_flowers[species] += 1
else:
self.available_filler += 1
return self.can_create()
def can_create(self):
"""Checks whether there are enough flowers to create a bouquet.
This will check if there is enough quantity of the required flowers and
if so, will check if there is enough filler to create a full bouquet.
"""
for flower, quantity in self.required_flowers.items():
if self.available_flowers[flower] < quantity:
return False
available = sum(self.available_flowers.values(), self.available_filler)
if available >= self.bouquet_size:
return True
return False
def create(self, pool, common_species):
"""Returns a bouquet (species listing) assembled from the given pool.
After picking the required flowers, if additional flowers are needed
as filler, this method selects a sample of flowers from the rest of
the pool in two steps:
1. Species of flowers used by other BouquetDesigners are avoided so
that selection for this bouquet causes the least conflict.
2. A random sample of flowers is picked, to avoid consistently stealing
from the same other designers. Randomly selecting also hopefully
generates nice and pleasing outcomes for the recipient, though this
hypothesis has not been tested in the least ;-)
In all cases we bias to picking filler flowers that we have a surplus
of. In an ideal world we would have a function that determines the
correct bias to introduce here.
"""
bouquet = Counter()
for species, quantity in self.required_flowers.items():
pool[species] -= quantity
bouquet[species] += quantity
# Pick the remaining flowers
if self.filler_quantity:
remaining = self.filler_quantity
for do_not_pick in (common_species, set()):
population = []
for species in pool.keys() ^ do_not_pick:
population.extend([species] * pool[species])
sample_size = min(len(population), remaining)
for species in random.sample(population, sample_size):
pool[species] -= 1
bouquet[species] += 1
remaining -= sample_size
if not remaining:
break
return bouquet
def remove(self, species, quantity):
"""Proceses removal of flowers from the flower pool.
This will update either the cache for available required flowers, or
if it's a species not -required- for this design, the filler count.
"""
if species in self.required_flowers:
self.available_flowers[species] -= quantity
else:
self.available_filler -= quantity
def stringify_bouquet(self, bouquet):
"""Returns the formatted bouquet string for this designer."""
flowers = sorted(bouquet.items())
flowerstring = (f'{count}{species}' for species, count in flowers)
return f'{self.design}{self.flower_size}{"".join(flowerstring)}'
@classmethod
def from_specification(cls, design):
"""Creates a BouquetDesigner instance from a string specification."""
spec = DESIGN.match(design).groupdict()
spec_flowers = DESIGN_FLOWER.findall(spec['flowers'])
flowers = {species: int(count) for count, species in spec_flowers}
return cls(spec['design'], spec['size'], flowers, int(spec['total']))
def read_until_empty(fp):
"""Yields lines from the given filepointer until an empty line is hit."""
while (line := fp.readline().strip()):
yield line
def main():
pools = defaultdict(Pool)
for design in read_until_empty(sys.stdin):
designer = BouquetDesigner.from_specification(design)
pools[designer.flower_size].add_designer(designer)
for species, size in read_until_empty(sys.stdin):
pools[size].add_flower(species)
if __name__ == '__main__':
main()
|
[
"[email protected]"
] | |
db365ccaef28c337a5d9c69e8c10f082020063ee
|
c940bcb25e1ed315263b25cbdac49cc4bf92cac1
|
/env/vkviewer/python/georef/georeferenceutils.py
|
92de981594a95d6365cfb3fdb3f7e7f015ad83b1
|
[] |
no_license
|
kwaltr/vkviewer
|
281a3f1b5b08a18a89f232ecd096cea44faca58b
|
01d64df0a9266c65e0c3fb223e073ef384281bdc
|
refs/heads/master
| 2021-01-16T22:09:41.821531 | 2014-02-07T17:19:04 | 2014-02-07T17:19:04 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 4,844 |
py
|
'''
Created on Oct 15, 2013
@author: mendt
'''
import subprocess
""" function: parseYSize
@param - imageFile {String} - path to a image file
@return - {Integer} - value which represents the y size of the file
This function parse the x,y size of a given image file """
def parseXYSize(imageFile):
# run gdalinfo command on imageFile and catch the response via Popen
response = subprocess.Popen("gdalinfo %s"%imageFile, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
# read the console output line by line
for line in response.stdout:
if 'Size is ' in line:
x,y = line[8:].split(', ')
#print "X: %s, Y: %s"%(x,y)
return float(x),float(y)
""" Functions for getting the gcps. """
def getGCPsAsString(unorderedPixels, verzeichnispfad, georefCoords):
pure_gcps = getGCPs(unorderedPixels, verzeichnispfad, georefCoords)
str_gcps = []
for tuple in pure_gcps:
string = " ".join(str(i) for i in tuple[0])+", "+" ".join(str(i) for i in tuple[1])
str_gcps.append(string)
return str_gcps
def getGCPs(unorderedPixels, verzeichnispfad, georefCoords):
# transformed the pixel coordinates to the georef coordinates by recalculating the y values,
# because of a different coordinate origin
transformedUnorderedPixels = []
xSize, ySize = parseXYSize(verzeichnispfad)
for tuple in unorderedPixels:
transformedUnorderedPixels.append((tuple[0],ySize-tuple[1]))
# now order the pixel coords so that there sorting represents the order llc, ulc, urc, lrc
transformedOrderedPixels = orderPixels(transformedUnorderedPixels)
# now create the gcp list
try:
gcpPoints = []
for i in range(0,len(transformedOrderedPixels)):
pixelPoints = (transformedOrderedPixels[i][0],transformedOrderedPixels[i][1])
georefPoints = (georefCoords[i][0],georefCoords[i][1])
gcpPoints.append((pixelPoints,georefPoints))
return gcpPoints
except:
raise
def orderPixels(unorderdPixels):
"""
Function brings a list of tuples which are representing the clipping parameter from the client
in the order llc ulc urc lrc and gives them back at a list. Only valide for pixel coords
@param clippingParameterList: list whichcomprises 4 tuples of x,y coordinates
"""
xList = []
yList = []
for tuple in unorderdPixels:
xList.append(tuple[0])
yList.append(tuple[1])
orderedList = [0, 0, 0, 0]
xList.sort()
yList.sort()
for tuple in unorderdPixels:
if (tuple[0] == xList[0] or tuple[0] == xList[1]) and \
(tuple[1] == yList[2] or tuple[1] == yList[3]):
orderedList[0] = tuple
elif (tuple[0] == xList[0] or tuple[0] == xList[1]) and \
(tuple[1] == yList[0] or tuple[1] == yList[1]):
orderedList[1] = tuple
elif (tuple[0] == xList[2] or tuple[0] == xList[3]) and \
(tuple[1] == yList[0] or tuple[1] == yList[1]):
orderedList[2] = tuple
elif (tuple[0] == xList[2] or tuple[0] == xList[3]) and \
(tuple[1] == yList[2] or tuple[1] == yList[3]):
orderedList[3] = tuple
return orderedList
""" Functions for creating the commands for command line """
""" function: addGCPToTiff
@param - gcPoints {list of gcp} - list of ground control points
@param - srid {Integer} - epsg code of coordiante system
@param - srcPath {String}
@param - destPath {String}
@return - command {String}
Add the ground control points via gdal_translate to the src tiff file """
def addGCPToTiff(gcPoints,srs,srcPath,destPath):
def addGCPToCommandStr(command,gcPoints):
for string in gcPoints:
command = command+"-gcp "+str(string)+" "
return command
command = "gdal_translate --config GDAL_CACHEMAX 500 -a_srs epsg:%s "%srs
command = addGCPToCommandStr(command,gcPoints)
command = command+str(srcPath)+" "+str(destPath)
return command
""" function: georeferenceTiff
@param - shapefilePath {String}
@param - srid {Integer} - epsg code of coordiante system
@param - srcPath {String}
@param - destPath {String}
@param - tyoe {String} - if 'fast' there is less compression
@return - command {String}
Georeferencing via gdalwarp """
def georeferenceTiff(shapefilePath, srid, srcPath, destPath, type=None):
if type == 'fast':
command = "gdalwarp --config GDAL_CACHEMAX 500 -wm 500 -overwrite -co TILED=YES -cutline %s \
-crop_to_cutline -t_srs epsg:%s %s %s"%(shapefilePath,srid,srcPath,destPath)
return command
|
[
"[email protected]"
] | |
57c8c4f7a53557e403719802170a2e4a7bd660c6
|
9ecd7568b6e4f0f55af7fc865451ac40038be3c4
|
/tianlikai/hubei/enshi_zhongbiao.py
|
aa1eb42ebd5cbeb6d019ac1072c18bf552fa29cc
|
[] |
no_license
|
jasonTLK/scrapy
|
f5ac6e575e902c077a07dc0eb9d228506f1a173f
|
2de8245fbc8731cfd868bbd91168e26271045300
|
refs/heads/master
| 2021-01-20T04:22:23.080864 | 2017-04-28T07:46:29 | 2017-04-28T07:46:29 | 89,681,374 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,967 |
py
|
# -*- coding: utf-8 -*-
import scrapy
from scrapy import Request
from scrapy.selector import Selector
try:
from scrapy.spiders import Spider
except:
from scrapy.spiders import BaseSpider as Spider
import datetime
from items.biding import biding_gov
from utils.toDB import *
# 湖北恩施招投标网站
# 中标信息
class hz_gov_Spider(scrapy.Spider):
name = "enshi_zhongbiao.py"
allowed_domains = ["eszggzy.cn"]
custom_settings = {
"DOWNLOADER_MIDDLEWARES": {
'scrapy.downloadermiddlewares.useragent.UserAgentMiddleware': None,
'middlewares.useragent_middleware.RandomUserAgent': 400,
# 'scrapy.downloadermiddlewares.httpproxy.HttpProxyMiddleware': None,
# 'middlewares.proxy_middleware.ProxyMiddleware': 250,
# 'scrapy.downloadermiddlewares.retry.RetryMiddleware': None,
# 'middlewares.retry_middleware.RetryWithProxyMiddleware': 300,
# 'middlewares.timestamp_middleware.TimestampMiddleware': 120
}
}
def start_requests(self):
urls = [
"http://www.eszggzy.cn/TPFront/jyxx/070001/070001003/?Paging=",
"http://www.eszggzy.cn/TPFront/jyxx/070002/070002003/?Paging=",
]
pages = [21, 20]
for i in range(len(urls)):
num=1
while num<=pages[i]:
url =urls[i]+str(num)
num+=1
# print url
yield Request(url=url,callback=self.parse)
# start_urls = [
# "http://www.eszggzy.cn/TPFront/jyxx/070001/070001003/?Paging=1"
# ]
def parse(self, response):
selector = Selector(response)
names = selector.xpath("//td[@align='left']//a/@title").extract()
urls = selector.xpath("//td[@align='left']//a/@href").extract()
print len(names),len(urls)
for i in range(len(names)):
url = "http://www.eszggzy.cn" + "".join(urls[i+4])
str = "".join(names[i]) + "," + url
print str
yield Request(url=url, callback=self.parse2, meta={"info": str})
def parse2(self, response):
infos = response.meta["info"]
items = biding_gov()
items["url"] = response.url
items["name"] = "".join(infos).split(",")[0]
items["info"] = ""
items["create_time"] = datetime.datetime.now()
items["update_time"] = datetime.datetime.now()
page_info = "".join(response.body)
items["info"] = "".join(page_info).decode('gbk')
db = MongodbHandle("172.20.3.10 ", 27017, "spiderBiding")
db.get_insert(
"bid_hubei_EnShi",
{
"url": items["url"],
"name": items["name"],
"info": items["info"],
"create_time": items["create_time"],
"update_time": items["update_time"]
}
)
print items["url"]
print items["name"]
|
[
"[email protected]"
] | |
8bf02c256d73472a61e065933f71d8e075957de5
|
a3d1e8a67ed43e1bea59180cc51c49f25a961a49
|
/scripts/dg2dotty
|
1aee7a8c68572dcdabdf99da9567433445ae7d8b
|
[
"BSD-3-Clause",
"BSD-2-Clause"
] |
permissive
|
WladimirSidorenko/TextNormalization
|
38b076d88a2de40dae72dc8b4096e354b774f2f4
|
ac645fb41260b86491b17fbc50e5ea3300dc28b7
|
refs/heads/master
| 2020-04-14T16:48:42.541883 | 2019-09-29T23:38:28 | 2019-09-29T23:38:28 | 163,962,092 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,209 |
#!/usr/bin/env python2.7
# -*- coding: utf-8; -*-
"""
Utility for representing DG trees in DOTTY format.
Read a DG tree in CONLL-2009 format and output the read tree in GRAPHVIZ
format.
Input format (meaning of columns):
ID FORM LEMMA PLEMMA POS PPOS FEAT PFEAT HEAD PHEAD DEPREL PDEPREL FILLPRED PRED APREDs
0 1 2 3 4 5 6 7 8 9 10 11 12 13 14
Output format (meaning of columns):
"""
##################################################################
# Importing Libraries
import os
import re
import sys
from alt_argparse import argparser
from alt_fio import AltFileInput, AltFileOutput
##################################################################
# Variables and Constants
FIELDSEP = re.compile('\t')
fields = []
FEATURESEP = re.compile('\|')
features = []
QUOTE_RE = re.compile('(")')
NODE_STYLE = 'color="gray",fillcolor="palegreen",style="filled"'
FEAT_LABEL = ' [label="FEAT"];'
FEAT_STYLE = 'shape=box,fillcolor="lightblue",style="filled,rounded",'
w_id = 0
form = ''
lemma = ''
pos = ''
p_id = 0
rel = ''
edges = []
f_id = -1
##################################################################
# Methods
def escape_quote(iline):
"""Prepend all double quotes with a backslash."""
return QUOTE_RE.sub(r"\\\1", iline)
##################################################################
# Processing Arguments
argparser.description="""Utility for determining sentence boundaries."""
argparser.add_argument("-c", "--esc-char", help = """escape character which should
precede lines with meta-information""", nargs = 1, type = str, \
default = os.environ.get("SOCMEDIA_ESC_CHAR", ""))
args = argparser.parse_args()
##################################################################
# Main Body
foutput = AltFileOutput(encoding = args.encoding, \
flush = args.flush)
finput = AltFileInput(*args.files, \
skip_line = args.skip_line, \
print_func = foutput.fprint, \
errors = "replace")
# print graph header
foutput.fprint("""
graph dg {{
forcelabels=true
size="14";
node [{:s}];
0 [label="Root"];
""".format(NODE_STYLE))
for line in finput:
if line and line[0] == args.esc_char:
continue
# interpret fields
fields = line.split()
if not len(fields):
continue
w_id, form, lemma = fields[0], fields[1], fields[3]
pos, p_id, rel = fields[5], fields[9], fields[11]
features = FEATURESEP.split(fields[7])
# add node to the graph
foutput.fprint(w_id, ' [label="' + escape_quote(lemma) + \
"\\n(" + escape_quote(form) + ')"];')
# output features as additional node which will be connected to the current
# one
if features:
foutput.fprint(f_id, ' [{:s} label="'.format(FEAT_STYLE) + \
escape_quote(";\\n".join(features)) + ';"];')
edges.append(w_id + " -- " + str(f_id) + FEAT_LABEL)
f_id -= 1
# remember edge
edges.append(p_id + " -- " + w_id + ' [label="' + rel + '"];')
# output edges
foutput.fprint('\n'.join(edges), "\n}")
|
[
"[email protected]"
] | ||
e61d248ab9d60f7194933ccc8cf31c297f485cc2
|
98f1a0bfa5b20a0b81e9e555d76e706c62d949c9
|
/examples/pytorch/dimenet/modules/envelope.py
|
b9d89620f674a562a255f52694e36235733374cc
|
[
"Apache-2.0"
] |
permissive
|
dmlc/dgl
|
3a8fbca3a7f0e9adf6e69679ad62948df48dfc42
|
bbc8ff6261f2e0d2b5982e992b6fbe545e2a4aa1
|
refs/heads/master
| 2023-08-31T16:33:21.139163 | 2023-08-31T07:49:22 | 2023-08-31T07:49:22 | 130,375,797 | 12,631 | 3,482 |
Apache-2.0
| 2023-09-14T15:48:24 | 2018-04-20T14:49:09 |
Python
|
UTF-8
|
Python
| false | false | 610 |
py
|
import torch.nn as nn
class Envelope(nn.Module):
"""
Envelope function that ensures a smooth cutoff
"""
def __init__(self, exponent):
super(Envelope, self).__init__()
self.p = exponent + 1
self.a = -(self.p + 1) * (self.p + 2) / 2
self.b = self.p * (self.p + 2)
self.c = -self.p * (self.p + 1) / 2
def forward(self, x):
# Envelope function divided by r
x_p_0 = x.pow(self.p - 1)
x_p_1 = x_p_0 * x
x_p_2 = x_p_1 * x
env_val = 1 / x + self.a * x_p_0 + self.b * x_p_1 + self.c * x_p_2
return env_val
|
[
"[email protected]"
] | |
c85c091a3229318315dafe45d892f4fe27ad63c5
|
c8efab9c9f5cc7d6a16d319f839e14b6e5d40c34
|
/source/All_Solutions/0480.滑动窗口中位数/0480-滑动窗口中位数.py
|
b6a27a3906d116af6ae8695a4eafea53559a93c4
|
[
"MIT"
] |
permissive
|
zhangwang0537/LeetCode-Notebook
|
73e4a4f2c90738dea4a8b77883b6f2c59e02e9c1
|
1dbd18114ed688ddeaa3ee83181d373dcc1429e5
|
refs/heads/master
| 2022-11-13T21:08:20.343562 | 2020-04-09T03:11:51 | 2020-04-09T03:11:51 | 277,572,643 | 0 | 0 |
MIT
| 2020-07-06T14:59:57 | 2020-07-06T14:59:56 | null |
UTF-8
|
Python
| false | false | 940 |
py
|
import bisect
class Solution:
def medianSlidingWindow(self, nums: List[int], k: int) -> List[float]:
"""
My solution, using sorted list
Time: O(nlog(k))
Space: O(n+k)
"""
res = []
if not nums or not k:
return res
def append_median():
median = sorted_list[k//2] if k%2==1 else (sorted_list[k//2] + sorted_list[k//2-1])/2
res.append(median)
n = len(nums)
p1, p2 = 0, k
sorted_list = sorted(nums[p1:p2])
append_median()
while p2 != n:
bisect.insort(sorted_list, nums[p2])
del_index = bisect.bisect(sorted_list, nums[p1])
# remember that the index of bisect and list are not same!
del sorted_list[del_index - 1]
append_median()
p1 += 1
p2 += 1
return res
|
[
"[email protected]"
] | |
ecc631a48f59fcc28412207e3d56e26f26d614f1
|
de24f83a5e3768a2638ebcf13cbe717e75740168
|
/moodledata/vpl_data/128/usersdata/222/33411/submittedfiles/al6.py
|
a4e5c49916c0a47643dc35834d5f8c7cd5aca7c0
|
[] |
no_license
|
rafaelperazzo/programacao-web
|
95643423a35c44613b0f64bed05bd34780fe2436
|
170dd5440afb9ee68a973f3de13a99aa4c735d79
|
refs/heads/master
| 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 303 |
py
|
# -*- coding: utf-8 -*-
a=int(input('Digite a:'))
contador=0
for i in range(2,a,1):
if n%i==0:
contador=contador+1
print(i)
for i in range(2,a,1):
if n%1==0:
contador=contador+1
print(i)
if contador==0:
print('Primo')
else:
print('Não primo')
|
[
"[email protected]"
] | |
ef232dab5bc20bf3a6e6d2877ede262ab60bd9c8
|
99249dad36df26a712ae8d900041d53acf3901ea
|
/settings/configurations/LCLS_settings.py
|
0f4d71a15a5f657650c92536b3cfb5a54b7d163c
|
[
"MIT"
] |
permissive
|
bopopescu/Lauecollect
|
f1f79c2cc5ff106df0dedbd6939ec92630d2b305
|
60ae2b05ea8596ba0decf426e37aeaca0bc8b6be
|
refs/heads/master
| 2022-11-29T00:40:28.384831 | 2019-06-05T01:21:36 | 2019-06-05T01:21:36 | 280,989,300 | 0 | 0 |
MIT
| 2020-07-20T02:03:22 | 2020-07-20T02:03:22 | null |
UTF-8
|
Python
| false | false | 1,034 |
py
|
MicroscopeCamera.ImageWindow.Center = (679.0, 512.0)
MicroscopeCamera.Mirror = False
MicroscopeCamera.NominalPixelSize = 0.000517
MicroscopeCamera.Orientation = -90
MicroscopeCamera.camera.IP_addr = '172.21.46.202'
MicroscopeCamera.x_scale = -1.0
MicroscopeCamera.y_scale = 1.0
MicroscopeCamera.z_scale = -1.0
WideFieldCamera.ImageWindow.Center = (738.0, 486.0)
WideFieldCamera.Mirror = False
WideFieldCamera.NominalPixelSize = 0.002445
WideFieldCamera.Orientation = -90
WideFieldCamera.camera.IP_addr = '172.21.46.70'
WideFieldCamera.x_scale = -1.0
WideFieldCamera.y_scale = 1.0
WideFieldCamera.z_scale = -1.0
laser_scope.ip_address = 'femto10.niddk.nih.gov:2000'
rayonix_detector.ip_address = '172.21.46.133:2222'
sample.phi_motor_name = 'SamplePhi'
sample.rotation_center = (-0.7938775, -0.31677586081529113)
sample.x_motor_name = 'SampleX'
sample.xy_rotating = False
sample.y_motor_name = 'SampleY'
sample.z_motor_name = 'SampleZ'
timing_system.ip_address = '172.21.46.207:2000'
xray_scope.ip_address = 'pico21.niddk.nih.gov:2000'
|
[
"[email protected]"
] | |
09f7ff38257927f817ca76e38b02d8f4f94da9fd
|
730707fdefc2934929e1309cfbb0484d62b4bc34
|
/backend/home/migrations/0001_load_initial_data.py
|
bc0ac08ee26bc6af244f1c1862878b762c7d3a2e
|
[] |
no_license
|
crowdbotics-apps/tpl-account-securty-27301
|
885f78b6256e3da6733c534cb85b89f797476e5f
|
44a580b64f14f7598b9e0c7a513976795992b15d
|
refs/heads/master
| 2023-04-26T15:38:35.791087 | 2021-05-23T22:55:42 | 2021-05-23T22:55:42 | 370,173,419 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 561 |
py
|
from django.db import migrations
def create_site(apps, schema_editor):
Site = apps.get_model("sites", "Site")
custom_domain = "tpl-account-securty-27301.botics.co"
site_params = {
"name": "tpl account securty page",
}
if custom_domain:
site_params["domain"] = custom_domain
Site.objects.update_or_create(defaults=site_params, id=1)
class Migration(migrations.Migration):
dependencies = [
("sites", "0002_alter_domain_unique"),
]
operations = [
migrations.RunPython(create_site),
]
|
[
"[email protected]"
] | |
557dc77ea9e99dbf933860debf7334305d13e6aa
|
eff5f0a2470c7023f16f6962cfea35518ec0b89c
|
/Storage_Xs and Os Champion.py
|
7d81e185c2aae6377e67314d2e8577330d0932e8
|
[] |
no_license
|
olegJF/Checkio
|
94ea70b9ee8547e3b3991d17c4f75aed2c2bab2f
|
fc51a7244e16d8d0a97d3bb01218778db1d946aa
|
refs/heads/master
| 2021-01-11T00:46:42.564688 | 2020-03-02T13:36:02 | 2020-03-02T13:36:02 | 70,490,008 | 0 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,829 |
py
|
# -*- coding: utf-8 -*-
def x_and_o(grid, mark):
X_vs_O = {'X':'O', 'O':'X'}
def winner(grid, mark):
WINNER_WAYS = ((0, 1, 2), (3, 4, 5),
(6, 7, 8), (0, 3, 6),
(1, 4, 7), (2, 5, 8),
(0, 4, 8), (2, 4, 6)
)
for row in WINNER_WAYS:
line = grid[row[0]]+grid[row[1]]+grid[row[2]]
if line.count('.') == 1:
if line.count(mark) == 2 or line.count(X_vs_O[mark]) == 2:
return row[line.find('.')]
return False
BEST_MOVES = [4, 0, 2, 6, 8, 1, 3, 5, 7]
FIELD = {0:(0, 0), 1:(0, 1), 2:(0, 2),
3:(1, 0), 4:(1, 1), 5:(1, 2),
6:(2, 0), 7:(2, 1), 8:(2, 2)
}
grid = ''.join(grid)
dot_cnt = grid.count('.')
is_first_move = True if dot_cnt == 9 else False
if is_first_move: return FIELD[4]
is_second_move = True if dot_cnt == 8 else False
is_center_free = True if grid[4] =='.' else False
if is_second_move and is_center_free:
return FIELD[4]
elif is_second_move:
for i in BEST_MOVES:
if grid[i] == '.': return FIELD[i]
cnt_my_mark = grid.count(mark)
cnt_enemy_mark = grid.count(X_vs_O[mark])
was_my_first_move = True if cnt_my_mark == cnt_enemy_mark else False
legal_moves = [ i for i in range(9) if grid[i] =='.']
if was_my_first_move:
if dot_cnt == 7:
for i in (0, 2, 8, 6):
if grid[i] == '.': return FIELD[i]
is_winner = winner(grid, mark)
if is_winner is not False: return FIELD[is_winner]
if dot_cnt == 5:
lines = ((0, 1, 2), (6, 7, 8),
(0, 3, 6), (2, 5, 8))
for x, y in ([0, 8], [2, 6]):
if x in legal_moves and y in legal_moves:
for corner in (x,y):
for line in lines:
if corner in line:
row = grid[line[0]]+grid[line[1]]+grid[line[2]]
cnt_mark = row.count(mark)
cnt_dot = row.count('.')
if cnt_mark ==1 and cnt_dot ==2:
return FIELD[corner]
for move in BEST_MOVES:
if move in legal_moves: return FIELD[move]
else:
is_winner = winner(grid, mark)
if is_winner is not False: return FIELD[is_winner]
if dot_cnt == 6 and grid[4] == mark:
for i in (1, 3, 5, 7):
if i in legal_moves: return FIELD[i]
for move in BEST_MOVES:
if move in legal_moves: return FIELD[move]
print(x_and_o(( "XO.", ".X.", "..O"), "X"))
#print(winner("XO..X....", 'X'))
|
[
"[email protected]"
] | |
bc38069aef7b32c7c351685d0b2122f0d604529e
|
2f5d221d5cd423f07da50ed8be9668d811e550b4
|
/airtest/core/ios/fake_minitouch.py
|
b5eb7986af962fc31648c001e4259ad16c8af15a
|
[
"Apache-2.0"
] |
permissive
|
Pactortester/Airtest
|
d1db25498591992dee525b2ceeb45de9239b319f
|
18e57ae2bbde3f2b95c32f09e214fdf4aec41330
|
refs/heads/master
| 2022-06-03T22:52:54.939200 | 2020-06-29T01:01:30 | 2020-06-29T01:01:30 | 275,080,743 | 1 | 0 |
Apache-2.0
| 2020-06-26T05:28:02 | 2020-06-26T05:28:02 | null |
UTF-8
|
Python
| false | false | 1,979 |
py
|
# coding=utf-8
import subprocess
import os
import re
import struct
import logging
from airtest.utils.logger import get_logger
from airtest.utils.nbsp import NonBlockingStreamReader
from airtest.utils.safesocket import SafeSocket
LOGGING = get_logger(__name__)
class fakeMiniTouch(object):
lastDown = {'x': None, 'y': None}
recentPoint = {'x': None, 'y': None}
def __init__(self, dev):
self.dev = dev
self.swipe_threshold = 10
def setup(self):
pass
def operate(self, operate_arg):
# TODO FIX IPHONT TOUCH
# start down
if operate_arg['type'] == 'down':
self.lastDown['x'] = operate_arg['x']
self.lastDown['y'] = operate_arg['y']
# mouse up
if operate_arg['type'] == 'up':
# in case they may be None
if self.lastDown['x'] is None or self.lastDown['y'] is None:
return
# has recent point
if self.recentPoint['x'] and self.recentPoint['y']:
# swipe need to move longer
# TODO:设定滑动和点击的阈值,目前为10
if abs(self.recentPoint['x'] - self.lastDown['x']) > self.swipe_threshold \
or abs(self.recentPoint['y'] - self.lastDown['y']) > self.swipe_threshold:
self.dev.swipe((self.lastDown['x'], self.lastDown['y']),
(self.recentPoint['x'], self.recentPoint['y']))
else:
self.dev.touch((self.lastDown['x'], self.lastDown['y']))
else:
self.dev.touch((self.lastDown['x'], self.lastDown['y']))
# clear infos
self.lastDown = {'x': None, 'y': None}
self.recentPoint = {'x': None, 'y': None}
if operate_arg['type'] == 'move':
self.recentPoint['x'] = operate_arg['x']
self.recentPoint['y'] = operate_arg['y']
if __name__ == '__main__':
pass
|
[
"[email protected]"
] | |
7066f6fd5882ec68a145a9b5116e7c5eff2d33f2
|
a854f81f3ca0d6e6d6cf60662d05bc301465e28c
|
/backend/booking/migrations/0001_initial.py
|
4d479654287d6f6f7b495a5050811e171d37cb04
|
[] |
no_license
|
crowdbotics-apps/lavadoras-19637
|
7f99e2046a6a92cdcfaec052eb9eadfd807193fd
|
577d0da2626867a8a1b27d2df386c8598e4adc6d
|
refs/heads/master
| 2022-12-02T21:15:17.103593 | 2020-08-18T08:28:47 | 2020-08-18T08:28:47 | 288,397,803 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,317 |
py
|
# Generated by Django 2.2.15 on 2020-08-18 08:26
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('location', '0001_initial'),
('taxi_profile', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='BookingTransaction',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('distance', models.FloatField()),
('price', models.FloatField()),
('status', models.CharField(max_length=10)),
('timestamp_created', models.DateTimeField(auto_now_add=True)),
('timestamp_depart', models.DateTimeField()),
('timestamp_arrive', models.DateTimeField()),
('tip', models.FloatField(blank=True, null=True)),
('driver', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='bookingtransaction_driver', to='taxi_profile.DriverProfile')),
('dropoff', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='bookingtransaction_dropoff', to='location.MapLocation')),
('pickup', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='bookingtransaction_pickup', to='location.MapLocation')),
('user', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='bookingtransaction_user', to='taxi_profile.UserProfile')),
],
),
migrations.CreateModel(
name='Rating',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('rating', models.FloatField()),
('timestamp_created', models.DateTimeField(auto_now_add=True)),
('review', models.TextField(blank=True, null=True)),
('driver', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='rating_driver', to='taxi_profile.DriverProfile')),
('user', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='rating_user', to='taxi_profile.UserProfile')),
],
),
migrations.CreateModel(
name='Message',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('message', models.TextField()),
('timestamp_created', models.DateTimeField(auto_now_add=True)),
('booking', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='message_booking', to='booking.BookingTransaction')),
('driver', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='message_driver', to='taxi_profile.DriverProfile')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='message_user', to='taxi_profile.UserProfile')),
],
),
]
|
[
"[email protected]"
] | |
75d146601fcfb74873d0571bc7d1e05b92491d12
|
8f0b0ec0a0a2db00e2134b62a1515f0777d69060
|
/scripts/study_case/ID_32/0504_softmax_regression.py
|
5d1daab24d438285e89be0a81cd2092dde31f122
|
[
"Apache-2.0"
] |
permissive
|
Liang813/GRIST
|
2add5b4620c3d4207e7661eba20a79cfcb0022b5
|
544e843c5430abdd58138cdf1c79dcf240168a5f
|
refs/heads/main
| 2023-06-09T19:07:03.995094 | 2021-06-30T05:12:19 | 2021-06-30T05:12:19 | 429,016,034 | 0 | 0 |
Apache-2.0
| 2021-11-17T11:19:48 | 2021-11-17T11:19:47 | null |
UTF-8
|
Python
| false | false | 1,389 |
py
|
import myutil as mu
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import sys
sys.path.append("/data")
torch.manual_seed(1)
x_train = [[1, 2, 1, 1],
[2, 1, 3, 2],
[3, 1, 3, 4],
[4, 1, 5, 5],
[1, 7, 5, 5],
[1, 2, 5, 6],
[1, 6, 6, 6],
[1, 7, 7, 7]]
y_train = [2, 2, 2, 1, 1, 1, 0, 0]
x_train = torch.FloatTensor(x_train)
y_train = torch.LongTensor(y_train)
mu.log("x_train", x_train)
mu.log("y_train", y_train)
y_one_hot = torch.zeros(8, 3)
y_one_hot.scatter_(dim=1, index=y_train.unsqueeze(dim=1), value=1)
mu.log("y_one_hot", y_one_hot)
W = torch.zeros((4, 3), requires_grad=True)
b = torch.zeros(1, requires_grad=True)
optimizer = optim.SGD([W, b], lr=0.1)
nb_epoches = 2000
mu.plt_init()
'''inserted code'''
import sys
sys.path.append("/data")
from scripts.utils.torch_utils import TorchScheduler
scheduler = TorchScheduler(name="PyTorchDeepLearningStart.0504_softmax_regression")
'''inserted code'''
while True:
hypothesis = F.softmax(x_train.matmul(W) + b, dim=1)
cost = (y_one_hot * -torch.log(hypothesis)).sum().mean()
optimizer.zero_grad()
cost.backward()
optimizer.step()
'''inserted code'''
scheduler.loss_checker(cost)
scheduler.check_time()
'''inserted code'''
mu.plt_show()
mu.log("W", W)
mu.log("b", b)
|
[
"[email protected]"
] | |
f3342ae253a6c3ea4cdf0a8b6733c66468df32a0
|
b47a907e824b52a6ee02dfb6387d24fa4d7fe88f
|
/config/settings.py
|
711faa6f8b40f97ba26f9110ae9b2a5e620c989a
|
[] |
no_license
|
hiroshi-higashiyama/DJANGO-KAKEIBO
|
413a883fdef2571cacbd6c8679e63a6aecab7ae9
|
564c6047fcc6f6bb4a45b2eec121df619d158952
|
refs/heads/master
| 2022-12-29T19:53:15.186934 | 2020-09-21T01:04:10 | 2020-09-21T01:04:10 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,180 |
py
|
"""
Django settings for config project.
Generated by 'django-admin startproject' using Django 3.0.5.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '3!7$0+ew+1s-)tt%ex9gwqtf_(oq==%7celkb+i7g01_ehy&im'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'kakeibo',
'bootstrapform',
'django.contrib.humanize',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'config.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'config.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.0/topics/i18n/
LANGUAGE_CODE = 'ja'
TIME_ZONE = 'Asia/Tokyo'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.0/howto/static-files/
STATIC_URL = '/static/'
NUMBER_GROUPING = 3
|
[
"[email protected]"
] | |
6e5b22d94e41c54bed477e9c213add68291fd728
|
d85cc746428e787254455c80b66a7309aa715e24
|
/demo_odoo_tutorial/models/models.py
|
fc9e8beafbb712f17fd48d60021152bfda775a67
|
[
"MIT"
] |
permissive
|
AllenHuang101/odoo-demo-addons-tutorial
|
2ef7d47432a2530f1e704f86cba78e3e975ca0f3
|
e719594bc42e3a9b273f5b37980ac61773702ab9
|
refs/heads/master
| 2023-03-28T03:37:46.338483 | 2021-03-29T08:44:22 | 2021-03-29T08:44:22 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,829 |
py
|
from odoo import models, fields, api
from odoo.exceptions import UserError, ValidationError
class DemoOdooTutorial(models.Model):
_name = 'demo.odoo.tutorial'
_description = 'Demo Odoo Tutorial'
_inherit = ['mail.thread', 'mail.activity.mixin'] # track_visibility
name = fields.Char('Description', required=True)
# track_visibility='always' 和 track_visibility='onchange'
is_done_track_onchange = fields.Boolean(
string='Is Done?', default=False, track_visibility='onchange')
name_track_always = fields.Char(string="track_name", track_visibility='always')
start_datetime = fields.Datetime('Start DateTime', default=fields.Datetime.now())
stop_datetime = fields.Datetime('End Datetime', default=fields.Datetime.now())
field_onchange_demo = fields.Char('onchange_demo')
field_onchange_demo_set = fields.Char('onchange_demo_set', readonly=True)
# float digits
# field tutorial
input_number = fields.Float(string='input number', digits=(10,3))
field_compute_demo = fields.Integer(compute="_get_field_compute") # readonly
_sql_constraints = [
('name_uniq', 'unique(name)', 'Description must be unique'),
]
@api.constrains('start_datetime', 'stop_datetime')
def _check_date(self):
for data in self:
if data.start_datetime > data.stop_datetime:
raise ValidationError(
"data.stop_datetime > data.start_datetime"
)
@api.depends('input_number')
def _get_field_compute(self):
for data in self:
data.field_compute_demo = data.input_number * 1000
@api.onchange('field_onchange_demo')
def onchange_demo(self):
if self.field_onchange_demo:
self.field_onchange_demo_set = 'set {}'.format(self.field_onchange_demo)
|
[
"[email protected]"
] | |
271e0a82482eb25eaca4b7f12e7efeb08508fb7a
|
9206e405e9be5f80a08e78b59d1cb79c519ae515
|
/algorithms/codeforces/the_number_of_even_pairs/main.py
|
7b7aac218751e1de472854d40e92a53218a4c619
|
[] |
no_license
|
mfbx9da4/mfbx9da4.github.io
|
ac4e34f0e269fb285e4fc4e727b8564b5db1ce3b
|
0ea1a0d56a649de3ca7fde2d81b626aee0595b2c
|
refs/heads/master
| 2023-04-13T22:15:19.426967 | 2023-04-12T12:14:40 | 2023-04-12T12:14:40 | 16,823,428 | 2 | 0 | null | 2022-12-12T04:36:08 | 2014-02-14T01:30:20 |
SCSS
|
UTF-8
|
Python
| false | false | 738 |
py
|
"""
"""
from math import factorial
def int_as_array(num): return list(map(int, [y for y in str(num)]))
def array_as_int(arr): return int(''.join(map(str, arr)))
def read_int(): return int(input())
def read_array(): return list(map(int, input().split(' ')))
def array_to_string(arr, sep=' '): return sep.join(map(str, arr))
def matrix_to_string(arr, sep=' '): return '[\n' + '\n'.join(
[sep.join(map(str, row)) for row in arr]) + '\n]'
def combine(n, r):
try:
return (factorial(n) / factorial(n - r)) * (1 / r)
except:
return 0
def solve(N, M):
choose_evens = combine(N, 2)
choose_odds = combine(M, 2)
return int(choose_evens + choose_odds)
N, M = read_array()
print(solve(N, M))
|
[
"[email protected]"
] | |
fb483adff09210c3a8dea90d203b5b070f3768fb
|
84379e15e54ba79b7e63c1fceecf712b46f22977
|
/apps/decks/migrations/0016_auto_20181011_1715.py
|
2ac96bd86b326bc8447c68610a43fbba4554b4f0
|
[] |
no_license
|
CoderEnko007/HearthStoneStationBackend
|
a1d74c324233ebd617ad01df13bc609d1f1aa2f6
|
6cc92cb806f19f2a2a0596645028cfe2fa5895d6
|
refs/heads/master
| 2022-12-11T23:20:24.335737 | 2022-09-18T07:04:08 | 2022-09-18T07:04:08 | 144,392,864 | 0 | 0 | null | 2022-12-08T02:22:42 | 2018-08-11T14:40:48 |
JavaScript
|
UTF-8
|
Python
| false | false | 831 |
py
|
# Generated by Django 2.0.4 on 2018-10-11 17:15
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('decks', '0015_auto_20180928_1019'),
]
operations = [
migrations.AddField(
model_name='decks',
name='real_game_count',
field=models.IntegerField(blank=True, null=True, verbose_name='实际对局数'),
),
migrations.AddField(
model_name='trending',
name='real_game_count',
field=models.IntegerField(blank=True, null=True, verbose_name='实际对局数'),
),
migrations.AlterField(
model_name='decks',
name='game_count',
field=models.IntegerField(blank=True, null=True, verbose_name='对局数'),
),
]
|
[
"[email protected]"
] | |
47693d0710e9c072cad944e857787701b982ce3d
|
0ea12ae71b3863a8279fd7200e61f5c40dc3dcb6
|
/image_bosch_detect_ssd_mobile.py
|
92fd277b6022c6d929dd37d5dae50ebf4863411d
|
[
"MIT"
] |
permissive
|
scrambleegg7/Traffic-Light-Classification
|
7dafb32f43bf1c73d62c645105cdc414ebb0cf44
|
2a9f6b8272866f289963905b162c35058ce6a234
|
refs/heads/master
| 2020-04-03T02:58:44.729521 | 2018-10-28T14:57:44 | 2018-10-28T14:57:44 | 154,973,271 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 5,201 |
py
|
import tensorflow as tf
import numpy as np
import datetime
import time
import os, sys
import cv2
from PIL import Image
import yaml
from glob import glob
try:
import matplotlib
matplotlib.use('TkAgg')
finally:
from matplotlib import pyplot as plt
from object_detection.utils import visualization_utils as vis_util
from object_detection.utils import label_map_util
class TrafficLightClassifier(object):
def __init__(self, frozen_model_file):
PATH_TO_MODEL = frozen_model_file
self.detection_graph = tf.Graph()
with self.detection_graph.as_default():
od_graph_def = tf.GraphDef()
# Works up to here.
with tf.gfile.GFile(PATH_TO_MODEL, 'rb') as fid:
serialized_graph = fid.read()
od_graph_def.ParseFromString(serialized_graph)
tf.import_graph_def(od_graph_def, name='')
self.image_tensor = self.detection_graph.get_tensor_by_name('image_tensor:0')
self.d_boxes = self.detection_graph.get_tensor_by_name('detection_boxes:0')
self.d_scores = self.detection_graph.get_tensor_by_name('detection_scores:0')
self.d_classes = self.detection_graph.get_tensor_by_name('detection_classes:0')
self.num_d = self.detection_graph.get_tensor_by_name('num_detections:0')
self.sess = tf.Session(graph=self.detection_graph)
def get_classification(self, img):
# Bounding Box Detection.
with self.detection_graph.as_default():
# Expand dimension since the model expects image to have shape [1, None, None, 3].
img_expanded = np.expand_dims(img, axis=0)
(boxes, scores, classes, num) = self.sess.run(
[self.d_boxes, self.d_scores, self.d_classes, self.num_d],
feed_dict={self.image_tensor: img_expanded})
return boxes, scores, classes, num
def load_image_into_numpy_array(image):
(im_width, im_height) = image.size
return np.array(image.getdata()).reshape((im_height, im_width, 3)).astype(np.uint8)
def get_all_labels(input_yaml, riib=False):
""" Gets all labels within label file
Note that RGB images are 1280x720 and RIIB images are 1280x736.
:param input_yaml: Path to yaml file
:param riib: If True, change path to labeled pictures
:return: images: Labels for traffic lights
"""
images = yaml.load(open(input_yaml, 'rb').read())
for i in range(len(images)):
images[i]['path'] = os.path.abspath(os.path.join(os.path.dirname(input_yaml), images[i]['path']))
if riib:
images[i]['path'] = images[i]['path'].replace('.png', '.pgm')
images[i]['path'] = images[i]['path'].replace('rgb/train', 'riib/train')
images[i]['path'] = images[i]['path'].replace('rgb/test', 'riib/test')
for box in images[i]['boxes']:
box['y_max'] = box['y_max'] + 8
box['y_min'] = box['y_min'] + 8
return images
def detect_label_images(input_yaml, output_folder=None):
"""
Shows and draws pictures with labeled traffic lights.
Can save pictures.
:param input_yaml: Path to yaml file
:param output_folder: If None, do not save picture. Else enter path to folder
"""
PATH_TO_LABELS = r'data/bosch_label_map.pbtxt'
NUM_CLASSES = 14
frozen_model_file = "./models/bosch_freeze_tf1.3/frozen_inference_graph.pb"
label_map = label_map_util.load_labelmap(PATH_TO_LABELS)
categories = label_map_util.convert_label_map_to_categories(label_map, max_num_classes=NUM_CLASSES, use_display_name=True)
category_index = label_map_util.create_category_index(categories)
print(category_index)
# loading models
tfc = TrafficLightClassifier(frozen_model_file)
images = get_all_labels(input_yaml)
if output_folder is not None:
if not os.path.exists(output_folder):
os.makedirs(output_folder)
for idx, image_dict in enumerate(images[:10]):
image_path = image_dict['path']
image_np = cv2.imread( image_path )
if idx == 0:
print(image_path)
timestr = time.strftime("%Y%m%d-%H%M%S")
boxes, scores, classes, num = tfc.get_classification(image_np)
vis_util.visualize_boxes_and_labels_on_image_array(
image_np,
np.squeeze(boxes),
np.squeeze(classes).astype(np.int32),
np.squeeze(scores),
category_index,
use_normalized_coordinates=True,
max_boxes_to_draw=5,
min_score_thresh=0.3,
line_thickness=8)
if idx % 10 == 0 and idx > 0:
print("%d images processed. %s" % ( (idx + 1), image_path ) )
image_file = image_path.split("/")[-1]
cv2.imwrite( os.path.join( output_folder, image_file ) , image_np )
if __name__ == '__main__':
if len(sys.argv) < 2:
print(__doc__)
sys.exit(-1)
label_file = sys.argv[1]
output_folder = None if len(sys.argv) < 3 else sys.argv[2]
detect_label_images(label_file, output_folder)
|
[
"[email protected]"
] | |
66ee42bf083364ea3975225cfe14efbc76c1c287
|
8760f182049d4caf554c02b935684f56f6a0b39a
|
/boar/facebook_connect/migrations/0002_profile_onetoone_to_user.py
|
ed79636574a8ae85a20dfee1a85138d28e7f7b15
|
[
"BSD-3-Clause"
] |
permissive
|
boar/boar
|
c674bc65623ee361af31c7569dd16c6eb8da3b03
|
6772ad31ee5bb910e56e650cc201a476adf216bc
|
refs/heads/master
| 2020-06-09T06:59:31.658154 | 2012-02-28T19:28:58 | 2012-02-28T19:28:58 | 1,734,103 | 1 | 2 | null | null | null | null |
UTF-8
|
Python
| false | false | 4,352 |
py
|
from south.db import db
from django.db import models
from boar.facebook_connect.models import *
class Migration:
def forwards(self, orm):
# Changing field 'FacebookProfile.user'
# (to signature: django.db.models.fields.related.OneToOneField(to=orm['auth.User'], unique=True))
db.alter_column('facebook_connect_facebookprofile', 'user_id', orm['facebook_connect.facebookprofile:user'])
# Creating unique_together for [user] on FacebookProfile.
db.create_unique('facebook_connect_facebookprofile', ['user_id'])
def backwards(self, orm):
# Deleting unique_together for [user] on FacebookProfile.
db.delete_unique('facebook_connect_facebookprofile', ['user_id'])
# Changing field 'FacebookProfile.user'
# (to signature: django.db.models.fields.related.ForeignKey(to=orm['auth.User']))
db.alter_column('facebook_connect_facebookprofile', 'user_id', orm['facebook_connect.facebookprofile:user'])
models = {
'auth.group': {
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '80', 'unique': 'True'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'blank': 'True'})
},
'auth.permission': {
'Meta': {'unique_together': "(('content_type', 'codename'),)"},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'max_length': '30', 'unique': 'True'})
},
'contenttypes.contenttype': {
'Meta': {'unique_together': "(('app_label', 'model'),)", 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'facebook_connect.facebookprofile': {
'Meta': {'unique_together': "(('user', 'uid'),)"},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'uid': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['auth.User']", 'unique': 'True'})
}
}
complete_apps = ['facebook_connect']
|
[
"[email protected]"
] | |
12f6cd8c0d13ddb5246553b8acd29a2595a7a282
|
82ca64c6a819f3e2cb41057f2df9f758cedee28a
|
/BlockChain/venv/bin/python-config
|
ae068f06249b1f99eb784109dbf07bbc241050d5
|
[] |
no_license
|
seanxxxx/coinx
|
619a18f9b2d7f83076083055bfccf0c5e404f665
|
eb1a7ed430c546cf02ddcc79f436200b218d5244
|
refs/heads/master
| 2023-01-28T03:09:10.358463 | 2018-09-07T07:49:19 | 2018-09-07T07:49:19 | 146,564,986 | 0 | 1 | null | 2022-12-20T14:20:06 | 2018-08-29T07:52:37 |
Python
|
UTF-8
|
Python
| false | false | 2,363 |
#!/Users/xuanxu/PycharmProjects/BlockChain/venv/bin/python
import sys
import getopt
import sysconfig
valid_opts = ['prefix', 'exec-prefix', 'includes', 'libs', 'cflags',
'ldflags', 'help']
if sys.version_info >= (3, 2):
valid_opts.insert(-1, 'extension-suffix')
valid_opts.append('abiflags')
if sys.version_info >= (3, 3):
valid_opts.append('configdir')
def exit_with_usage(code=1):
sys.stderr.write("Usage: {0} [{1}]\n".format(
sys.argv[0], '|'.join('--'+opt for opt in valid_opts)))
sys.exit(code)
try:
opts, args = getopt.getopt(sys.argv[1:], '', valid_opts)
except getopt.error:
exit_with_usage()
if not opts:
exit_with_usage()
pyver = sysconfig.get_config_var('VERSION')
getvar = sysconfig.get_config_var
opt_flags = [flag for (flag, val) in opts]
if '--help' in opt_flags:
exit_with_usage(code=0)
for opt in opt_flags:
if opt == '--prefix':
print(sysconfig.get_config_var('prefix'))
elif opt == '--exec-prefix':
print(sysconfig.get_config_var('exec_prefix'))
elif opt in ('--includes', '--cflags'):
flags = ['-I' + sysconfig.get_path('include'),
'-I' + sysconfig.get_path('platinclude')]
if opt == '--cflags':
flags.extend(getvar('CFLAGS').split())
print(' '.join(flags))
elif opt in ('--libs', '--ldflags'):
abiflags = getattr(sys, 'abiflags', '')
libs = ['-lpython' + pyver + abiflags]
libs += getvar('LIBS').split()
libs += getvar('SYSLIBS').split()
# add the prefix/lib/pythonX.Y/config dir, but only if there is no
# shared library in prefix/lib/.
if opt == '--ldflags':
if not getvar('Py_ENABLE_SHARED'):
libs.insert(0, '-L' + getvar('LIBPL'))
if not getvar('PYTHONFRAMEWORK'):
libs.extend(getvar('LINKFORSHARED').split())
print(' '.join(libs))
elif opt == '--extension-suffix':
ext_suffix = sysconfig.get_config_var('EXT_SUFFIX')
if ext_suffix is None:
ext_suffix = sysconfig.get_config_var('SO')
print(ext_suffix)
elif opt == '--abiflags':
if not getattr(sys, 'abiflags', None):
exit_with_usage()
print(sys.abiflags)
elif opt == '--configdir':
print(sysconfig.get_config_var('LIBPL'))
|
[
"[email protected]"
] | ||
cd50fc8b715db9544fca346be9d2f59be5483792
|
52b5773617a1b972a905de4d692540d26ff74926
|
/.history/FrogRiver_20200723134656.py
|
b53537eb14ce4472bd411f219e101697e4edb59b
|
[] |
no_license
|
MaryanneNjeri/pythonModules
|
56f54bf098ae58ea069bf33f11ae94fa8eedcabc
|
f4e56b1e4dda2349267af634a46f6b9df6686020
|
refs/heads/master
| 2022-12-16T02:59:19.896129 | 2020-09-11T12:05:22 | 2020-09-11T12:05:22 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 225 |
py
|
def Frog(X,A):
# given x where the frog wants to go
# find earliest time
# once you get the second that has that position
# return the second
pos = set()
print(Frog(5,[1,3,1,4,2,3,5,4]))
|
[
"[email protected]"
] | |
b37888fa6385baeb41115a66b55bec5886b14fbc
|
387ad3775fad21d2d8ffa3c84683d9205b6e697d
|
/testsuite/trunk/el/el_test_036.py
|
cfab23e5ff03600c188c22c0c83bb31985905443
|
[] |
no_license
|
kodiyalashetty/test_iot
|
916088ceecffc17d2b6a78d49f7ea0bbd0a6d0b7
|
0ae3c2ea6081778e1005c40a9a3f6d4404a08797
|
refs/heads/master
| 2020-03-22T11:53:21.204497 | 2018-03-09T01:43:41 | 2018-03-09T01:43:41 | 140,002,491 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,025 |
py
|
#!/usr/bin/env python
"""
(C) Copyright IBM Corp. 2008
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. This
file and program are licensed under a BSD style license. See
the Copying file included with the OpenHPI distribution for
full licensing terms.
Authors:
Jayashree Padmanabhan <[email protected]>
"""
import unittest
from openhpi import *
class TestSequence(unittest.TestCase):
"""
runTest : EL test
*
* This test verifies the failure of oh_el_map_from_file when el == None
*
* Return value: 0 on success, 1 on failure
"""
def runTest(self):
el = oh_el()
retc = None
# test failure of oh_el_map_from_file with el==None
el = None
retc = oh_el_map_from_file(el, "./elTest.data")
self.assertEqual (retc == SA_OK,False)
if __name__=='__main__':
unittest.main()
|
[
"suntrupth@a44bbd40-eb13-0410-a9b2-f80f2f72fa26"
] |
suntrupth@a44bbd40-eb13-0410-a9b2-f80f2f72fa26
|
f6c327232f55a5253a539568cc9c8d10d656384d
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p02686/s642611525.py
|
914bb9607791cee5d353d156d9afb343faf395b3
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 981 |
py
|
def main():
N = int(input())
up_lines = []
down_lines = []
for i in range(N):
s = input()
height = 0
bottom = 0
for c in s:
if c == "(":
height += 1
else:
height -= 1
bottom = min(bottom, height)
if height > 0:
up_lines.append((bottom, height))
else:
down_lines.append((bottom-height, -height))
up_lines.sort(reverse=True, key=lambda line: line[0])
down_lines.sort(reverse=True, key=lambda line: line[0])
left = 0
for bottom, height in up_lines:
if left + bottom < 0:
print("No")
return
left += height
right = 0
for bottom, height in down_lines:
if right + bottom < 0:
print("No")
return
right += height
if left == right:
print("Yes")
else:
print("No")
if __name__ == "__main__":
main()
|
[
"[email protected]"
] | |
c19012af2e5fe52651cc00b9775abc1d3e4e6ea1
|
a71d5838e292e2c0c7371f7fc7870c7018820ae1
|
/day03/03_pie.py
|
71c8ec39a03c52234f30d2660394d2f3d37a995f
|
[] |
no_license
|
skywalkerqwer/DataScience
|
be91541c3da383d15ee52d0101d2dbb0289c2fde
|
4cfd42f3a9795e295393cdb045852d46e99b6e59
|
refs/heads/master
| 2020-06-17T11:41:40.113864 | 2019-07-15T09:49:40 | 2019-07-15T09:49:40 | 195,913,553 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 449 |
py
|
"""
绘制饼状图
"""
import numpy as np
import matplotlib.pyplot as mp
labels = ['Python', 'JavaScript', 'C++', 'Java', 'PHP']
values = [26, 17, 21, 29, 11]
spaces = [0.05, 0.01, 0.01, 0.01, 0.01]
colors = ['dodgerblue', 'orangered', 'limegreen', 'violet', 'gold']
mp.figure('Pie Chart', facecolor='lightgray')
mp.title('Languages PR')
mp.pie(values, spaces, labels, colors, '%.1f%%', shadow=True, startangle=0, radius=1)
mp.legend()
mp.show()
|
[
"[email protected]"
] | |
0da39b2b6595f0a25f70e3735197ce8c382da45b
|
c7522a46908dfa0556ed6e2fe584fd7124ee5cdc
|
/ApplicationUsers/views.py
|
80dd9c405729e423ad243becbd6d5c57ca1b5930
|
[] |
no_license
|
stheartsachu/Eventlee
|
461cf35961a7f294229d6c611e58a09d9f4e1eb5
|
6b67dfc873203f1322c16664923ffe5a760d50ed
|
refs/heads/master
| 2022-11-13T14:48:39.097718 | 2020-06-30T04:54:45 | 2020-06-30T04:54:45 | 276,000,638 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,806 |
py
|
from django.shortcuts import render,HttpResponse,redirect,HttpResponseRedirect
from ApplicationUsers.form import ApplicationuserForm
# Create your views here.
from ApplicationUsers.models import users
def home(request):
return render(request,"index.html")
def Contact(request):
return render(request,"contact.html")
def gallery(request):
return render(request,"gallery.html")
def signup(request):
if request.method == 'POST':
form = ApplicationuserForm(request.POST)
f = form.save(commit=False)
f.first_name = request.POST['fn']
f.last_name = request.POST['ln']
f.email = request.POST['email']
if request.POST['p1'] == request.POST['p2']:
f.password = request.POST['p2']
else:
return HttpResponse("<h1> Password and Confirm password is not same</h1>")
f.status = True
f.save()
return HttpResponse("User is created sucessfully now, can login to website")
return render(request, 'registration.html')
def login(request):
if request.method == "POST":
un = request.POST["email"]
up = request.POST["password"]
try:
data = users.objects.get(email=un)
except:
return render(request, "login.html", {'emailerror': True})
dp = data.password
active = data.status
if (active == False):
return render(request, "login.html", {'activeerror': True})
else:
if (dp == up):
request.session['emailid'] = un
request.session['Authentication'] = True
return HttpResponse("You are sucessfullly login")
else:
return render(request, "login.html", {'passworderror': True})
return render(request, "login.html")
|
[
"[email protected]"
] | |
7bed90a14fc2ce416d14e56c5bf265e8b646487f
|
7d3b096f803d1a47ad71a5c8aab30ba3aa67828c
|
/chibi_file/__init__.py
|
fe22184683cfdc5c75ca908282fad7a086a9d2bc
|
[] |
no_license
|
dem4ply/chibi_file
|
462244dac712d88915f2b931c5f0822f6d1fa937
|
d27cef794512014b1602486edd0235052b38087a
|
refs/heads/master
| 2020-12-03T05:09:15.825690 | 2017-08-23T09:36:57 | 2017-08-23T09:36:57 | 95,737,905 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,835 |
py
|
import mmap
import os
import shutil
def current_dir():
return os.getcwd()
def inflate_dir( src ):
if '~' in src:
return os.path.expanduser( src )
else:
return os.path.abspath( src )
def is_dir( src ):
return os.path.isdir( src )
def is_file( src ):
return os.path.isfile( src )
def ls( src=None ):
if src is None:
src = current_dir()
return ( name for name in os.listdir( src ) )
def ls_only_dir( src=None ):
return ( name for name in ls( src ) if is_dir( name ) )
def join( *patch ):
return os.path.join( *patch )
def exists( file_name ):
return os.path.exists( file_name )
def copy( source, dest ):
shutil.copy( source, dest )
class Chibi_file:
def __init__( self, file_name ):
self._file_name = file_name
if not self.exists:
self.touch()
self.reread()
@property
def file_name( self ):
return self._file_name
def __del__( self ):
self._file_content.close()
def find( self, string_to_find ):
if isinstance( string_to_find, str ):
string_to_find = string_to_find.encode()
return self._file_content.find( string_to_find )
def reread( self ):
with open( self._file_name, 'r' ) as f:
self._file_content = mmap.mmap( f.fileno(), 0,
prot=mmap.PROT_READ )
def __contains__( self, string ):
return self.find( string ) >= 0
def append( self, string ):
with open( self._file_name, 'a' ) as f:
f.write( string )
self.reread()
@property
def exists( self ):
return exists( self.file_name )
def touch( self ):
open( self.file_name, 'a' ).close()
def copy( self, dest ):
copy( self.file_name, dest )
|
[
"[email protected]"
] | |
b7e89b7513c6151d39dc8adad4fee33e8afcf8f1
|
09cc8367edb92c2f02a0cc1c95a8290ff0f52646
|
/ipypublish_plugins/example_new_plugin.py
|
2fe177802ec9fd3259ca9ac9ac002ef160f3c1f2
|
[
"BSD-3-Clause"
] |
permissive
|
annefou/ipypublish
|
7e80153316ab572a348afe26d309c2a9ee0fb52b
|
917c7f2e84be006605de1cf8851ec13d1a163b24
|
refs/heads/master
| 2020-04-13T16:08:59.845707 | 2018-07-30T18:26:12 | 2018-07-30T18:26:12 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,612 |
py
|
"""html in standard nbconvert format, but with
- a table of contents
- toggle buttons for showing/hiding code & output cells
- converts or removes (if no converter) latex tags (like \cite{abc}, \ref{})
"""
from ipypublish.filters.replace_string import replace_string
from ipypublish.html.create_tpl import create_tpl
from ipypublish.html.ipypublish import latex_doc
# from ipypublish.html.standard import inout_prompt
from ipypublish.html.ipypublish import toc_sidebar
from ipypublish.html.ipypublish import toggle_buttons
from ipypublish.html.standard import content
from ipypublish.html.standard import content_tagging
from ipypublish.html.standard import document
from ipypublish.html.standard import mathjax
from ipypublish.html.standard import widgets
from ipypublish.preprocessors.latex_doc_captions import LatexCaptions
from ipypublish.preprocessors.latex_doc_html import LatexDocHTML
from ipypublish.preprocessors.latex_doc_links import LatexDocLinks
from ipypublish.preprocessors.latextags_to_html import LatexTagsToHTML
from ipypublish.preprocessors.split_outputs import SplitOutputs
oformat = 'HTML'
config = {'TemplateExporter.filters': {'replace_string': replace_string},
'Exporter.filters': {'replace_string': replace_string},
'Exporter.preprocessors': [SplitOutputs, LatexDocLinks, LatexDocHTML, LatexTagsToHTML, LatexCaptions]}
template = create_tpl([
document.tpl_dict,
content.tpl_dict, content_tagging.tpl_dict,
mathjax.tpl_dict, widgets.tpl_dict,
# inout_prompt.tpl_dict,
toggle_buttons.tpl_dict, toc_sidebar.tpl_dict,
latex_doc.tpl_dict
])
|
[
"[email protected]"
] | |
6846461a15b491de3c42e18d6aa4d646d87bad7a
|
4bd5e9b67d98bfcc9611bd8b774c9ab9f4f4d446
|
/Python基础笔记/13/代码/3.多继承.py
|
1693fc8f7b66401a95f44f287cfcb7d4c149f841
|
[] |
no_license
|
zhenguo96/test1
|
fe21510aea7feb674e52fd7a86d4177666f841c5
|
0d8de7e73e7e635d26462a0bc53c773d999498be
|
refs/heads/master
| 2020-05-03T13:09:53.592103 | 2019-04-06T07:08:47 | 2019-04-06T07:08:47 | 178,646,627 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 623 |
py
|
# # 多继承
# class Base1:
# def __init__(self,name):
# self.name = name
# def t1(self):
# print("Base1")
#
# class Base2:
# def __init__(self,name):
# self.name = name
# def t2(self):
# print("Base2")
#
# class Base3:
# def __init__(self, name):
# self.name = name
# def t3(self):
# print("Base3")
#
# # 多继承的子类
# class Child(Base1,Base2,Base3):
# pass
# child = Child('tom')
# print(child.__dict__)
# child.t1()
# child.t2()
# # 继承顺序
# print(Child.mro())
# print(Child.__mro__)
#
|
[
"[email protected]"
] | |
c02698bcbb5677d5aa1cdf687d66869a34eea59c
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p02742/s024664971.py
|
37251941a04a71608f69d756b2f8eb6bf24e8a52
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 259 |
py
|
H, W = [int(_) for _ in input().split()]
if H == 1 or W == 1:
ans = 1
else:
ans = (H // 2) * (W // 2) * 2
if H % 2 == 1:
ans += W // 2
if W % 2 == 1:
ans += H // 2
if H % 2 == 1 and W % 2 == 1:
ans += 1
print(ans)
|
[
"[email protected]"
] | |
d6e8faee78b555a964bcdabf9d7b434fba09a3c0
|
b96f1bad8a74d31d8ff79bc955813bfcd17d7b26
|
/24. Swap Nodes in Pairs.py
|
75e6d9a0451fd14aadd62f665ddbd922cfa44910
|
[] |
no_license
|
brianhu0716/LeetCode-Solution
|
e7177af15e84e833ce8ab05027683ed4ac489643
|
158a4359c90b723545b22c4898047274cc1b80a6
|
refs/heads/main
| 2023-07-11T05:29:56.783795 | 2021-08-28T12:53:14 | 2021-08-28T12:53:14 | 374,991,658 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 444 |
py
|
# -*- coding: utf-8 -*-
"""
Created on Sun Apr 11 15:06:14 2021
@author: Brian
"""
'''
交換相鄰兩node的值即可
'''
class Solution:
def swapPairs(self, head: ListNode) -> ListNode:
while not head or not head.next : return head
ptr = head
while ptr and ptr.next:
temp = ptr.val
ptr.val = ptr.next.val
ptr.next.val = temp
ptr = ptr.next.next
return head
|
[
"[email protected]"
] | |
7374ce7e683ccf1d4913b6f64fb04fb50b016df7
|
6c686d118e6d3072b3694c02c684a6619d4dd03e
|
/rsdns/tests/test_client.py
|
cb34bcfaef1aa74df689f00debfbff8959f697df
|
[
"Apache-2.0"
] |
permissive
|
masthalter/reddwarf
|
02e7b78e1e61178647fe8d98ab53eadfabe66e7f
|
72cf41d573cd7c35a222d9b7a8bfaad937f17754
|
HEAD
| 2016-11-08T16:12:16.783829 | 2012-04-26T22:26:56 | 2012-04-26T22:26:56 | 2,387,563 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,033 |
py
|
import httplib2
import mox
import unittest
from eventlet import pools
from novaclient.client import HTTPClient
from novaclient import exceptions
from rsdns.client.dns_client import DNSaasClient
ACCOUNT_ID = 1155
USERNAME = "test_user"
API_KEY="key"
AUTH_URL="urly"
MANAGEMENT_BASE_URL="mgmter"
class FakeResponse(object):
def __init__(self, status):
self.status = status
class WhenDNSaasClientConnectsSuccessfully(unittest.TestCase):
def setUp(self):
self.mox = mox.Mox()
def tearDown(self):
self.mox.VerifyAll()
def fake_auth(self, *args, **kwargs):
self.auth_called = True
def create_mock_client(self, fake_request_method):
"""
Creates a mocked DNSaasClient object, which calls "fake_request_method"
instead of httplib2.request.
"""
class FakeHttpLib2(object):
pass
FakeHttpLib2.request = fake_request_method
mock_client = self.mox.CreateMock(DNSaasClient)
mock_client.http_pool = pools.Pool()
mock_client.http_pool.create = FakeHttpLib2
mock_client.auth_token = 'token'
return mock_client
def test_make_request(self):
kwargs = {
'headers': {},
'body': "{}"
}
def fake_request(self, *args, **kwargs):
return FakeResponse(200), '{"hi":"hello"}'
mock_client = self.create_mock_client(fake_request)
resp, body = DNSaasClient.request(mock_client, **kwargs)
self.assertEqual(200, resp.status)
self.assertEqual({"hi":"hello"}, body)
def test_make_request_with_old_token(self):
kwargs = {
'headers': {},
'body': '{"message":"Invalid authentication token. Please renew."}'
}
def fake_request(self, *args, **kwargs):
return FakeResponse(401), \
'{"message":"Invalid authentication token. Please renew."}'
mock_client = self.create_mock_client(fake_request)
mock_client.authenticate()
mock_client.authenticate()
mock_client.authenticate()
self.mox.ReplayAll()
self.assertRaises(exceptions.Unauthorized, DNSaasClient.request,
mock_client, **kwargs)
def test_make_request_with_old_token_2(self):
kwargs = {
'headers': {},
'body': "{}"
}
self.count = 0
def fake_request(_self, *args, **kwargs):
self.count += 1
if self.count > 1:
return FakeResponse(200), '{"hi":"hello"}'
else:
return FakeResponse(401), \
'{"message":"Invalid authentication token. ' \
'Please renew."}'
mock_client = self.create_mock_client(fake_request)
mock_client.authenticate()
self.mox.ReplayAll()
resp, body = DNSaasClient.request(mock_client, **kwargs)
self.assertEqual(200, resp.status)
self.assertEqual({"hi":"hello"}, body)
|
[
"[email protected]"
] | |
3c3d8847ece82de5f4ddb2fa122ea976e7da211e
|
2ee3a2b8971118b1a1e8c101382702d698021ad5
|
/weather/models.py
|
8372f28956277f086c9e5f53ff17faa6a968168c
|
[] |
no_license
|
manikshahkataria/weather
|
29a34264fd281cf26758be06d19dd19bbd226cfc
|
1bb5160caab2dc287118ab7ed4a25cf575453ee4
|
refs/heads/master
| 2022-12-11T07:50:28.988645 | 2019-01-19T10:22:10 | 2019-01-19T10:22:10 | 163,946,933 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 226 |
py
|
from django.db import models
class City(models.Model):
name= models.CharField(max_length=25)
def __str__(self):
return self.name
class Meta:
verbose_name_plural='cities'
# Create your models here.
|
[
"[email protected]"
] | |
5fc93f5180bbbf9d6e8482073bcb89bf2d923892
|
2c68f9156087d6d338373f9737fee1a014e4546b
|
/src/connectedk8s/azext_connectedk8s/vendored_sdks/models/authentication_details_value.py
|
982b4554803e85c978165d7b651f09cd77ff0c69
|
[
"LicenseRef-scancode-generic-cla",
"MIT"
] |
permissive
|
anpaz/azure-cli-extensions
|
8b0d4071c49840da9883f13cb0fd1f4515246ee0
|
847fd487fe61e83f2a4163a9393edc9555267bc2
|
refs/heads/master
| 2023-04-23T17:22:53.427404 | 2021-01-29T17:48:28 | 2021-01-29T18:01:33 | 257,394,204 | 2 | 0 |
MIT
| 2021-01-28T10:31:07 | 2020-04-20T20:19:43 |
Python
|
UTF-8
|
Python
| false | false | 890 |
py
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class AuthenticationDetailsValue(Model):
"""Authentication token value.
:param token: Authentication token.
:type token: str
"""
_attribute_map = {
'token': {'key': 'token', 'type': 'str'},
}
def __init__(self, **kwargs):
super(AuthenticationDetailsValue, self).__init__(**kwargs)
self.token = kwargs.get('token', None)
|
[
"[email protected]"
] | |
ab881c94078041feb7fe0fefd3fb0913233feb4b
|
e715be7aef31a307d2cf09d8a4ecf46ea662826f
|
/device_simulator/src/orchestator.py
|
e88831369f171c3e6acd4859ce8da628125314b0
|
[] |
no_license
|
GabrielMartinMoran/TFI_UNTREF
|
0dcfd0d5b4d69c282ce732a21039c4a69a6530af
|
e4abc9bc93b840627a008e3af5f4d86b7cd30732
|
refs/heads/main
| 2023-06-23T11:06:35.138785 | 2021-07-14T13:21:14 | 2021-07-14T13:21:14 | 358,573,316 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,489 |
py
|
import time
from datetime import datetime
from src.models.energy_sensor import EnergySensor
from src.models.console_display import ConsoleDisplay
from src.models.measure import Measure
from src.repositories.user_repository import UserRepository
from src.repositories.device_repository import DeviceRepository
import config
class Orchestator:
def __init__(self, device, ref_voltage, ref_current, user_secret):
self.sensor = EnergySensor(ref_voltage, ref_current)
self.display = ConsoleDisplay()
self.device = device
self.user_repository = UserRepository(user_secret)
self.device_repository = DeviceRepository(user_secret)
self.user = self.user_repository.get_user_data()
self.message = ''
def loop(self):
while(True):
measure = Measure(
self.sensor.get_voltage(),
self.sensor.get_current(),
self.__get_timestamp()
)
try:
self.device_repository.add_measure(self.device.ble_id, measure)
self.message = 'Muestra enviada al servidor'
except Exception as e:
self.message = f'Error: {e}'
self.device.set_last_measure(measure)
self.display.set_ui(self.device, self.user, self.message)
self.display.draw()
time.sleep(config.TIME_BETWEEN_MEASUREMENTS)
def __get_timestamp(self):
return int(datetime.now().timestamp())
|
[
"[email protected]"
] | |
5b6ae4546dda852369334665c79612273e580227
|
0eaf0d3f0e96a839f2ef37b92d4db5eddf4b5e02
|
/past3/e.py
|
6a5af7eb3744903d23f04c0de4ad30e373c33a27
|
[] |
no_license
|
silphire/atcoder
|
b7b02798a87048757745d99e8564397d1ca20169
|
f214ef92f13bc5d6b290746d5a94e2faad20d8b0
|
refs/heads/master
| 2023-09-03T17:56:30.885166 | 2023-09-02T14:16:24 | 2023-09-02T14:16:24 | 245,110,029 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 433 |
py
|
n, m, q = map(int, input().split())
e = [set() for _ in range(n)]
for i in range(m):
u, v = map(int, input().split())
u -= 1
v -= 1
e[u].add(v)
e[v].add(u)
c = list(map(int, input().split()))
for i in range(q):
s = tuple(map(int, input().split()))
vv = s[1] - 1
print(c[vv])
if s[0] == 1:
for ee in e[vv]:
c[ee] = c[vv]
else:
c[vv] = s[2]
|
[
"[email protected]"
] | |
1ee56e00fc1f6518207dde8d7e2c4ad70939ccb7
|
62b90959763f40954a7c6270bfb0529b536b2888
|
/user/forms.py
|
e3f2e1e1a1c2c677d176cbff33084fa0620bcb3a
|
[
"MIT"
] |
permissive
|
thiagosouzalink/blogphoto_Django
|
68698c4fc684f0ba1d9dde795a07f72df32ead38
|
7d09f44b196897c4d31fff2eff8d2a164e44db27
|
refs/heads/master
| 2023-02-20T20:32:00.527084 | 2021-01-25T15:16:07 | 2021-01-25T15:16:07 | 332,782,817 | 3 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 4,226 |
py
|
from django import forms
from django.contrib.auth.forms import UserCreationForm, UserChangeForm
from .models import CustomUser
class UserForm(forms.ModelForm):
""" Formulário para cadastrar usuário"""
username = forms.CharField(
label='Usuário',
error_messages= {
'invalid': 'Nome de usuário inválido, informe apenas letras, números ou @, ., +, -, _',
'max_length': 'Você excedeu o limite de caracteres.',
'unique': 'Nome de usuário já existe.'
},
help_text= "Requeridos 150 caracteres ou menos. Letras, dígitos e @ /. / + / - / _ apenas",
widget=forms.TextInput(attrs={'placeholder':'Username'})
)
email = forms.EmailField(
label='E-mail',
error_messages={'invalid': 'E-mail inválido.'},
help_text='[email protected]',
widget=forms.TextInput(attrs={'placeholder':'E-mail'})
)
first_name = forms.CharField(
label='Nome',
error_messages={'max_length': 'Nome não pode ter mais de 30 caracteres'},
widget=forms.TextInput(attrs={'placeholder':'Nome'})
)
last_name = forms.CharField(
label='Sobrenome',
error_messages={'max_length': 'Sobrenome não pode ter mais de 150 caracteres'},
widget=forms.TextInput(attrs={'placeholder':'Sobrenome'})
)
telefone = forms.CharField(
label='Telefone',
help_text='(xx) xxxxx-xxxx',
widget=forms.TextInput(attrs={'placeholder':'Telefone...'})
)
password = forms.CharField(
label='Senha',
help_text="Digite uma senha segura",
widget=forms.PasswordInput(attrs={'placeholder':'Senha'})
)
password2 = forms.CharField(
label='Confirmar senha',
widget=forms.PasswordInput(attrs={'placeholder':'Repetir senha'})
)
class Meta:
model = CustomUser
fields = (
'username',
'email',
'first_name',
'last_name',
'telefone'
)
def clean_password2(self):
passwords = self.cleaned_data
if passwords['password2'] != passwords['password']:
raise forms.ValidationError("Senhas diferentes")
return passwords['password2']
def save(self, commit=True):
user = CustomUser.objects.create_user(
username=self.cleaned_data['username'],
email=self.cleaned_data['email'],
password=self.cleaned_data['password'],
first_name=self.cleaned_data['first_name'],
last_name=self.cleaned_data['last_name'],
telefone=self.cleaned_data['telefone']
)
return user
class UserProfileForm(forms.ModelForm):
""" Formulário para atualizar dados do usuário"""
facebook = forms.CharField(widget=forms.TextInput(attrs={'placeholder': 'https://www.facebook.com/seu_username'}), required=False)
instagram = forms.CharField(widget=forms.TextInput(attrs={'placeholder': 'https://www.instagram.com/seu_username'}), required=False)
twitter = forms.CharField(widget=forms.TextInput(attrs={'placeholder': 'https://www.twitter.com/seu_username'}), required=False)
class Meta:
model = CustomUser
fields = fields = (
'username',
'email',
'first_name',
'last_name',
'telefone',
'facebook',
'instagram',
'twitter',
'bio'
)
class CustomUserCreateForm(UserCreationForm):
""" Formulário para criar usuário no painel administrativo"""
class Meta:
model = CustomUser
fields = ('first_name', 'last_name', 'telefone')
labels = {'username': 'Username/E-mail'}
def save(self, commit=True):
user = super().save(commit=False)
user.set_password(self.cleaned_data["password1"])
user.username = self.cleaned_data["username"]
if commit:
user.save()
return user
class CustomUserChangeForm(UserChangeForm):
""" Atualizar usuário no painel administrativo"""
class Meta:
model = CustomUser
fields = ('email', 'first_name', 'last_name', 'telefone')
|
[
"[email protected]"
] | |
9378b601770bd4c71b6a616ad9a7a895ad48a7b2
|
6b2a8dd202fdce77c971c412717e305e1caaac51
|
/solutions_5658571765186560_1/Python/StefanPochmann/D.py
|
feca86f2ed13ba681c7c3230c60ce03f3e2c21f7
|
[] |
no_license
|
alexandraback/datacollection
|
0bc67a9ace00abbc843f4912562f3a064992e0e9
|
076a7bc7693f3abf07bfdbdac838cb4ef65ccfcf
|
refs/heads/master
| 2021-01-24T18:27:24.417992 | 2017-05-23T09:23:38 | 2017-05-23T09:23:38 | 84,313,442 | 2 | 4 | null | null | null | null |
UTF-8
|
Python
| false | false | 500 |
py
|
#f = open('D.in')
#def input():
# return next(f)
T = int(input())
for x in range(1, T + 1):
X, R, C = map(int, input().split())
A = R * C
s, S = sorted((R, C))
gabriel = A % X == 0 and \
(X == 1 or
X == 2 or
X == 3 and s >= 2 or
X == 4 and s >= 3 or
X == 5 and s >= 3 and A > 15 or
X == 6 and s >= 4)
print('Case #{}: {}'.format(x, 'GABRIEL' if gabriel else 'RICHARD'))
|
[
"[email protected]"
] | |
37dafa17ed9dc319a258358248dd28b2bbf33390
|
ee8c4c954b7c1711899b6d2527bdb12b5c79c9be
|
/assessment2/amazon/run/core/controllers/old.py
|
b834b8f7e61106d74a5d8d14bccffde5155b3848
|
[] |
no_license
|
sqlconsult/byte
|
02ac9899aebea4475614969b594bfe2992ffe29a
|
548f6cb5038e927b54adca29caf02c981fdcecfc
|
refs/heads/master
| 2021-01-25T14:45:42.120220 | 2018-08-11T23:45:31 | 2018-08-11T23:45:31 | 117,135,069 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 360 |
py
|
#!/usr/bin/env python3
from flask import Blueprint, Flask, render_template, request, url_for
controller = Blueprint('old', __name__, url_prefix='/old')
# @controller.route('/<string:title>', methods=['GET'])
# def lookup(title):
# if title == 'Republic': # TODO 2
# return render_template('republic.html') # TODO 2
# else:
# pass
|
[
"[email protected]"
] | |
21d962029b74b4eafe0c5b512082596bdf3800f2
|
95e7cf518b8d71270a7de6e7c7254861010f5035
|
/garage/tf/algos/batch_polopt.py
|
4245380d53581a9a7d6e72637049760557283eaf
|
[
"MIT"
] |
permissive
|
reslthrowaway/garage
|
aaeadf7e918d80d467b2fcce61c50e8404480f83
|
e921119434d205b6f644f139f6075516fb9ece74
|
refs/heads/master
| 2020-03-28T08:32:58.835060 | 2018-09-08T21:55:41 | 2018-09-08T21:55:41 | 147,972,769 | 2 | 2 | null | null | null | null |
UTF-8
|
Python
| false | false | 6,354 |
py
|
import time
import tensorflow as tf
from garage.algos import RLAlgorithm
import garage.misc.logger as logger
from garage.tf.plotter import Plotter
from garage.tf.samplers import BatchSampler
from garage.tf.samplers import VectorizedSampler
class BatchPolopt(RLAlgorithm):
"""
Base class for batch sampling-based policy optimization methods.
This includes various policy gradient methods like vpg, npg, ppo, trpo,
etc.
"""
def __init__(self,
env,
policy,
baseline,
scope=None,
n_itr=500,
start_itr=0,
batch_size=5000,
max_path_length=500,
discount=0.99,
gae_lambda=1,
plot=False,
pause_for_plot=False,
center_adv=True,
positive_adv=False,
store_paths=False,
whole_paths=True,
fixed_horizon=False,
sampler_cls=None,
sampler_args=None,
force_batch_sampler=False,
**kwargs):
"""
:param env: Environment
:param policy: Policy
:type policy: Policy
:param baseline: Baseline
:param scope: Scope for identifying the algorithm. Must be specified if
running multiple algorithms
simultaneously, each using different environments and policies
:param n_itr: Number of iterations.
:param start_itr: Starting iteration.
:param batch_size: Number of samples per iteration.
:param max_path_length: Maximum length of a single rollout.
:param discount: Discount.
:param gae_lambda: Lambda used for generalized advantage estimation.
:param plot: Plot evaluation run after each iteration.
:param pause_for_plot: Whether to pause before contiuing when plotting.
:param center_adv: Whether to rescale the advantages so that they have
mean 0 and standard deviation 1.
:param positive_adv: Whether to shift the advantages so that they are
always positive. When used in conjunction with center_adv the
advantages will be standardized before shifting.
:param store_paths: Whether to save all paths data to the snapshot.
:return:
"""
self.env = env
self.policy = policy
self.baseline = baseline
self.scope = scope
self.n_itr = n_itr
self.start_itr = start_itr
self.batch_size = batch_size
self.max_path_length = max_path_length
self.discount = discount
self.gae_lambda = gae_lambda
self.plot = plot
self.pause_for_plot = pause_for_plot
self.center_adv = center_adv
self.positive_adv = positive_adv
self.store_paths = store_paths
self.whole_paths = whole_paths
self.fixed_horizon = fixed_horizon
if sampler_cls is None:
if self.policy.vectorized and not force_batch_sampler:
sampler_cls = VectorizedSampler
else:
sampler_cls = BatchSampler
if sampler_args is None:
sampler_args = dict()
self.sampler = sampler_cls(self, **sampler_args)
self.init_opt()
def start_worker(self, sess):
self.sampler.start_worker()
if self.plot:
self.plotter = Plotter(self.env, self.policy, sess)
self.plotter.start()
def shutdown_worker(self):
self.sampler.shutdown_worker()
if self.plot:
self.plotter.shutdown()
def obtain_samples(self, itr):
return self.sampler.obtain_samples(itr)
def process_samples(self, itr, paths):
return self.sampler.process_samples(itr, paths)
def train(self, sess=None):
created_session = True if (sess is None) else False
if sess is None:
sess = tf.Session()
sess.__enter__()
sess.run(tf.global_variables_initializer())
self.start_worker(sess)
start_time = time.time()
last_average_return = None
for itr in range(self.start_itr, self.n_itr):
itr_start_time = time.time()
with logger.prefix('itr #%d | ' % itr):
logger.log("Obtaining samples...")
paths = self.obtain_samples(itr)
logger.log("Processing samples...")
samples_data = self.process_samples(itr, paths)
last_average_return = samples_data["average_return"]
logger.log("Logging diagnostics...")
self.log_diagnostics(paths)
logger.log("Optimizing policy...")
self.optimize_policy(itr, samples_data)
logger.log("Saving snapshot...")
params = self.get_itr_snapshot(itr, samples_data)
if self.store_paths:
params["paths"] = samples_data["paths"]
logger.save_itr_params(itr, params)
logger.log("Saved")
logger.record_tabular('Time', time.time() - start_time)
logger.record_tabular('ItrTime', time.time() - itr_start_time)
logger.dump_tabular(with_prefix=False)
if self.plot:
self.plotter.update_plot(self.policy, self.max_path_length)
if self.pause_for_plot:
input("Plotting evaluation run: Press Enter to "
"continue...")
self.shutdown_worker()
if created_session:
sess.close()
return last_average_return
def log_diagnostics(self, paths):
self.policy.log_diagnostics(paths)
self.baseline.log_diagnostics(paths)
def init_opt(self):
"""
Initialize the optimization procedure. If using tensorflow, this may
include declaring all the variables and compiling functions
"""
raise NotImplementedError
def get_itr_snapshot(self, itr, samples_data):
"""
Returns all the data that should be saved in the snapshot for this
iteration.
"""
raise NotImplementedError
def optimize_policy(self, itr, samples_data):
raise NotImplementedError
|
[
"[email protected]"
] | |
64f1c7bd8f0f8bab932d8e95efb828f317b84145
|
50008b3b7fb7e14f793e92f5b27bf302112a3cb4
|
/recipes/Python/438806_catenateFilesFactory/recipe-438806.py
|
59a8c77281e6b148e80c4e006fc5987455451ecf
|
[
"Python-2.0",
"MIT"
] |
permissive
|
betty29/code-1
|
db56807e19ac9cfe711b41d475a322c168cfdca6
|
d097ca0ad6a6aee2180d32dce6a3322621f655fd
|
refs/heads/master
| 2023-03-14T08:15:47.492844 | 2021-02-24T15:39:59 | 2021-02-24T15:39:59 | 341,878,663 | 0 | 0 |
MIT
| 2021-02-24T15:40:00 | 2021-02-24T11:31:15 |
Python
|
UTF-8
|
Python
| false | false | 2,449 |
py
|
import os
def catenateFilesFactory(isTextFiles=True, isClearTgt=True, isCreateTgt=True):
"""return a catenateFiles function parameterized by the factory arguments.
isTextFiles: Catenate text files. If the last line of a non-empty file
is not terminated by an EOL, append an EOL to it.
isClearTgt If the target file already exists, clear its original
contents before appending the source files.
isCreateTgt If the target file does not already exist, and this
parameter is True, create the target file; otherwise raise
an IOError.
"""
eol = os.linesep
lenEol = len(eol)
def catenateFiles(tgtFile, *srcFiles):
isTgtAppendEol = False
if os.path.isfile(tgtFile):
if isClearTgt:
tgt = open(tgtFile, 'wb')
tgt.close()
elif isTextFiles:
tgt = open(tgtFile, 'rb')
data = tgt.read()
tgt.close()
if len(data) and (len(data) < lenEol or data[-lenEol:] != eol):
isTgtAppendEol = True
elif not isCreateTgt:
raise IOError, "catenateFiles target file '%s' not found" % (
tgtFile)
tgt = open(tgtFile, 'ab')
if isTgtAppendEol:
tgt.write(eol)
for srcFile in srcFiles:
src = open(srcFile, 'rb')
data = src.read()
src.close()
tgt.write(data)
if (isTextFiles and len(data) and
(len(data) < lenEol or data[-lenEol:] != eol)):
tgt.write(eol)
tgt.close()
return
# Support reflection and doc string.
catenateFiles.isTextFiles = isTextFiles
catenateFiles.isClearTgt = isClearTgt
catenateFiles.isCreateTgt = isCreateTgt
if isTextFiles:
docFileType = "text"
else:
docFileType = "binary"
if isCreateTgt:
docCreate = "Create tgtFile if it does not already exist."
else:
docCreate = "Require that tgtFile already exists."
if isClearTgt:
docClear = "replace"
else:
docClear = "append to"
catenateFiles.__doc__ = """Catenate %s srcFiles to %s the tgtFile.
%s
All of the srcFiles must exist; otherwise raise an IOError.
""" % (docFileType, docClear, docCreate)
return catenateFiles
|
[
"[email protected]"
] | |
8402be75cce1ddbd62ff54e6ca1af746d547ba7e
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p04044/s043863854.py
|
77675a21e84977960d992326c6742602cc68d034
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 172 |
py
|
N, L = map(int,input().split())
word = []
count = 0
while N > count:
S = input()
word.append(S)
count += 1
word = sorted(word)
ans = ''.join(word)
print(ans)
|
[
"[email protected]"
] | |
5f2b378d006e7aa2e46251661e0d4e03d3b9810f
|
d452e34253561a47b974e260dabd8dcda6e750a2
|
/supervised_learning/0x0B-face_verification/5-main.py
|
0859b3e7ecf4dc518afbab30ba555f77a521f265
|
[] |
no_license
|
JohnCook17/holbertonschool-machine_learning
|
57fcb5b9d351826c3e3d5478b3b4fbe16cdfac9f
|
4200798bdbbe828db94e5585b62a595e3a96c3e6
|
refs/heads/master
| 2021-07-07T10:16:21.583107 | 2021-04-11T20:38:33 | 2021-04-11T20:38:33 | 255,424,823 | 3 | 2 | null | null | null | null |
UTF-8
|
Python
| false | false | 554 |
py
|
#!/usr/bin/env python3
from align import FaceAlign
import matplotlib.image as mpimg
import matplotlib.pyplot as plt
from matplotlib.patches import Circle
import numpy as np
fa = FaceAlign('models/landmarks.dat')
test_img = mpimg.imread('HBTN/KirenSrinivasan.jpg')
anchors = np.array([[0.194157, 0.16926692], [0.7888591, 0.15817115], [0.4949509, 0.5144414]], dtype=np.float32)
aligned = fa.align(test_img, np.array([36, 45, 33]), anchors, 96)
plt.imshow(aligned)
ax = plt.gca()
for anchor in anchors:
ax.add_patch(Circle(anchor * 96, 1))
plt.show()
|
[
"[email protected]"
] | |
7508ed13cb989f8e06150d4a366684e8cb626f4c
|
890c8b8e90e516a5a3880eca9b2d217662fe7d84
|
/armulator/armv6/opcodes/abstract_opcodes/usad8.py
|
6568222b03d6464463dd16b171bf86a89484d155
|
[
"MIT"
] |
permissive
|
doronz88/armulator
|
b864135996f876c7857b79a314d4aa06cc19c549
|
0294feac2785c8947e5943ac0c34f941ee4b5fff
|
refs/heads/master
| 2022-11-05T08:14:42.405335 | 2020-06-18T23:53:17 | 2020-06-18T23:53:17 | 273,363,061 | 2 | 0 | null | 2020-06-18T23:51:03 | 2020-06-18T23:51:02 | null |
UTF-8
|
Python
| false | false | 1,008 |
py
|
from armulator.armv6.opcodes.abstract_opcode import AbstractOpcode
from bitstring import BitArray
class Usad8(AbstractOpcode):
def __init__(self, m, d, n):
super(Usad8, self).__init__()
self.m = m
self.d = d
self.n = n
def execute(self, processor):
if processor.condition_passed():
absdiff1 = abs(
processor.registers.get(self.n)[24:32].uint - processor.registers.get(self.m)[24:32].uint)
absdiff2 = abs(
processor.registers.get(self.n)[16:24].uint - processor.registers.get(self.m)[16:24].uint)
absdiff3 = abs(
processor.registers.get(self.n)[8:16].uint - processor.registers.get(self.m)[8:16].uint)
absdiff4 = abs(
processor.registers.get(self.n)[0:8].uint - processor.registers.get(self.m)[0:8].uint)
result = absdiff1 + absdiff2 + absdiff3 + absdiff4
processor.registers.set(self.d, BitArray(uint=result, length=32))
|
[
"[email protected]"
] | |
1a27b7cb18413d5522bf3d1a3fb9298b4be330c4
|
6810a482759afd585db7bb0b85fd0416f0450e6d
|
/Open Kattis/sibice.py
|
f2d6c7c2fb3679fd80f435ba5541e145a8be4611
|
[] |
no_license
|
BenRStutzman/kattis
|
01b000ac2353c8b8000c6bddec3698f66b0198ef
|
005720f853e7f531a264227d0d9aaa19d4d7cf1b
|
refs/heads/master
| 2020-07-15T23:52:45.785021 | 2019-11-09T03:28:06 | 2019-11-09T03:28:06 | 205,675,532 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 292 |
py
|
import sys
import math
[num, box_length, box_width] = [int(num) for num in sys.stdin.readline().split()]
diag = box_length**2 + box_width**2
for i in range(num):
match_length = int(sys.stdin.readline())
if match_length**2 <= diag:
print("DA")
else:
print("NE")
|
[
"[email protected]"
] | |
a27806e252e67dc407b440d4781e0d23bf86fc34
|
f3827ae39e077daf5507959a13d1ac4a782fe084
|
/src/accounts/urls.py
|
c29e329060e8daed823e775738b48945589f62da
|
[] |
no_license
|
achiengcindy/ExtendingDjangoAuth
|
c6bc2c5360d90378d7d96efb3132506ad10349d9
|
19214ef7ef9ccdcc66e4ec15fa9e22e5fd5e24f3
|
refs/heads/master
| 2020-03-26T20:09:34.687774 | 2018-08-28T19:32:46 | 2018-08-28T19:32:46 | 145,308,095 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 416 |
py
|
from django.urls import path
from .views import register, home, edit, activate, account_activation_sent
urlpatterns = [
path('', home, name='home'),
path('register/', register, name='register'),
path('edit/', edit, name='edit'),
path('activate/<slug:uidb64>/<slug:token>)/', activate, name='activate'),
path('account_activation_sent/', account_activation_sent, name='account_activation_sent')
]
|
[
"[email protected]"
] | |
9d1544e03e6517060106ba3d8555c94351c4e3c9
|
b5fb45288ed2a204692051ab78e72d8aa6e5accd
|
/argo_data_scripts/util/count_concurrent.py
|
d13a0c91034e0cdcecb1c9531b3779cf08de3da0
|
[
"Apache-2.0"
] |
permissive
|
nithinksath96/MMdetection_TensorRT_FP16
|
d4987f003798f5d6d4fe5bde2f30dd5ee2e8596d
|
c8379b209d4deeff9350baf5bbedfc95fb8941f4
|
refs/heads/master
| 2023-02-13T20:00:21.834541 | 2021-01-06T09:24:20 | 2021-01-06T09:24:20 | 327,260,988 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,792 |
py
|
# given rt_ series input, count the max number of concurrent jobs
# current implementation only applies to inf results, where processing starts immediately
import argparse, json, pickle
from os.path import join, isfile
from tqdm import tqdm
import numpy as np
from pycocotools.coco import COCO
import sys; sys.path.insert(0, '..'); sys.path.insert(0, '.')
from util import mkdir2
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--annot-path', type=str, required=True)
parser.add_argument('--fps', type=float, default=30)
parser.add_argument('--result-dir', type=str, required=True)
parser.add_argument('--out-dir', type=str, default=None)
parser.add_argument('--type', type=str, default='det')
parser.add_argument('--overwrite', action='store_true', default=False)
opts = parser.parse_args()
return opts
def main():
opts = parse_args()
out_dir = mkdir2(opts.out_dir) if opts.out_dir else opts.result_dir
db = COCO(opts.annot_path)
seqs = db.dataset['sequences']
seq_dirs = db.dataset['seq_dirs']
n_concurrent = []
for sid, seq in enumerate(tqdm(seqs)):
frame_list = [img for img in db.imgs.values() if img['sid'] == sid]
results = pickle.load(open(join(opts.result_dir, seq + '.pkl'), 'rb'))
# use raw results when possible in case we change class subset during evaluation
if opts.type == 'det':
timestamps = results['timestamps']
input_fidx = results['input_fidx']
else:
det1_timestamps = results['det1_timestamps']
det2_timestamps = results['det2_timestamps']
det1_input_fidx = results['det1_input_fidx']
det2_input_fidx = results['det2_input_fidx']
timestamps = np.concatenate((det1_timestamps, det2_timestamps))
input_fidx = np.concatenate((det1_input_fidx, det2_input_fidx))
t_start = np.asarray(input_fidx)/opts.fps
t_end = np.asarray(timestamps)
t_all = np.concatenate((t_start, t_end))
order = np.argsort(t_all)
n_output = len(t_start)
n_current = 0
max_current = 0
for i in order:
if i < n_output:
# start
n_current += 1
max_current = max(max_current, n_current)
else:
# end
n_current -= 1
n_concurrent.append(max_current)
print(f'Max number of concurrent jobs {max(n_concurrent)}')
out_path = join(out_dir, 'n_concurrent.pkl')
if opts.overwrite or not isfile(out_path):
pickle.dump(n_concurrent, open(out_path, 'wb'))
if __name__ == '__main__':
main()
|
[
"[email protected]"
] | |
76ec0bfa00c7191eecde64d7a553a7998771bae9
|
4b1a3db3249a51bcd2b9699edcb60c2716b2889e
|
/discharge.py
|
e851bc189eee5ad877ce01278d2da3087be6cfb8
|
[] |
no_license
|
shohei/tank-simulation
|
799e7c8db543f639984dbadb1b41b83fc818b831
|
d74b8d09eb1d5e1fa1fa61327f4fff35754ac5eb
|
refs/heads/master
| 2021-01-02T09:14:46.709964 | 2015-07-29T13:48:52 | 2015-07-29T13:48:52 | 39,488,510 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 949 |
py
|
#!/usr/bin/env python
#-*- coding:utf-8 -*-
#*** TANK PRESSURE SIMULATION ***
#*** AUTHOR: SHOHEI AOKI ********
#*** PROGRAMMED: 22 JUL 2015 ****
from pylab import *
##### PARAMETER AREA ############
gamma = 1.4
de = 2 #[mm]
Ae = pi*(de*0.001/2)*(de*0.001/2)#[m2]
T0i = (24 + 273.15) # Room temperature
R = 289 # J/(kg dot K)
V = 1/1000.0 #1L as [m3]
sigma = sqrt(gamma*((2/(gamma+1))**((gamma+1)/(gamma-1)))) # critical flow efficient
##### SIMULATION FOR AIR DISCHARGE FROM TANK ##
t = arange(0.01,100,0.01) # 100 sec
pi = 0.1013*5*(10**6) # 5[MPa]
V_array = [1/1000.0,10/1000.0,20/1000.0]
for V in V_array:
p0 = (1 + (((gamma - 1)/2)*(Ae*sigma*sqrt(R * T0i))/V)*t)**((-1)*2*gamma/(gamma-1))*pi
plot(t,p0*(10**-6))
##### VISUALIZATION AREA #########
title('TANK PRESSURE TRANSITION BY AIR DISCHARGE')
legend(('1L','10L','20L'),'upper right')
xlabel('t [sec]')
ylabel('p0 [MPa]')
savefig('./image/tank-discharge.png')
show()
|
[
"[email protected]"
] | |
3d8932d7ae7a374fb4a9079e524d480316a6f5d4
|
817ff801938d25776b2564b3087c8a3c674da1a7
|
/NUP153_AnalyseComplex/WT_Minimization/chainBCP/WT_chainBCP_Minimization_6.py
|
93b4c101aa994d29251e3c4bc96938fd94e7bad9
|
[] |
no_license
|
yanghaobojordan/HIV1-Capsid
|
b22e21a9ad530ae11f128f409e298c5ab68871ee
|
f44f04dc9886e660c1fe870936c48e0e5bb5adc6
|
refs/heads/main
| 2023-04-09T01:27:26.626676 | 2021-04-23T18:17:07 | 2021-04-23T18:17:07 | 360,968,418 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,838 |
py
|
from pyrosetta import *
from pyrosetta import PyMOLMover
from pyrosetta.toolbox import cleanATOM
from pyrosetta.toolbox import get_secstruct
from pyrosetta.teaching import *
from pyrosetta.toolbox import get_hbonds
from pyrosetta.toolbox import mutate_residue
from pyrosetta.rosetta.protocols.relax import *
from pyrosetta.rosetta.protocols.simple_moves import *
from pyrosetta.rosetta.core.fragment import *
from pyrosetta.rosetta.protocols.moves import *
from pyrosetta.rosetta.protocols.rigid import *
from pyrosetta.rosetta.protocols.docking import *
import sys
init()
def main():
filename=sys.argv[1]
pose=pose_from_pdb(filename)
test=Pose()
test.assign(pose)
scorefxn=get_fa_scorefxn()
dumpfile = 'WT_chainBCP_Minimization_6.pdb'
txtfile = 'WT_chainBCP_Minimization_6.txt'
newfile = open(txtfile, "w")
newfile.write(str(scorefxn(test)))
newfile.write('\n')
kT = 1
mc = MonteCarlo(test, scorefxn, kT)
min_mover = MinMover()
mm = MoveMap()
mm.set_bb(True)
mm.set_chi(True)
min_mover.movemap(mm)
min_mover.score_function(scorefxn)
min_mover.min_type("dfpmin")
min_mover.tolerance(0.001)
task_pack=standard_packer_task(test)
task_pack.restrict_to_repacking()
task_pack.or_include_current(True)
pack_mover=PackRotamersMover(scorefxn, task_pack)
for i in range(20):
pack_mover.apply(test)
mc.boltzmann(test)
newfile.write(str(i))
newfile.write(' ')
newfile.write(str(scorefxn(test)))
newfile.write(' ')
newfile.write(str(CA_rmsd(pose, test)))
newfile.write('\n')
mc.recover_low(test)
print ('Repacking Complete')
print ('Lowest Score ', scorefxn(test))
print (mc.show_scores())
print (mc.show_counters())
print (mc.show_state())
newfile.write('Repacking Complete')
newfile.write(' ')
newfile.write(str(scorefxn(test)))
newfile.write('\n')
for i in range(10000):
min_mover.apply(test)
mc.boltzmann(test)
newfile.write(str(i))
newfile.write(' ')
newfile.write(str(scorefxn(test)))
newfile.write(' ')
newfile.write(str(CA_rmsd(pose, test)))
newfile.write('\n')
mc.recover_low(test)
print ('Minimization Complete')
print ('Lowest Score ', scorefxn(test))
print (mc.show_scores())
print (mc.show_counters())
print (mc.show_state())
newfile.write('Minimization Complete')
newfile.write(' ')
newfile.write(str(scorefxn(test)))
newfile.write('\n')
newfile.write('RMSD')
newfile.write(' ')
newfile.write(str(CA_rmsd(pose, test)))
newfile.write('\n')
newfile.close()
test.dump_pdb(dumpfile)
main()
|
[
"[email protected]"
] | |
9f4a4daa608a920aafce684a30429bf510d9d867
|
b381b5ce79ec03e281cba7e6ea253b286205fba1
|
/openstack/map_reduce/v1/job.py
|
2ab8baab2c33c86bccb1719887437cb769f0ca27
|
[
"Apache-2.0"
] |
permissive
|
sun363587351/python-openstacksdk
|
972eedc24199c3b8a15bd21accd29a6ec70febdb
|
f9e055300b1c79637d7b6a791168427f27322d73
|
refs/heads/master
| 2020-12-02T19:23:01.771376 | 2017-07-05T13:24:47 | 2017-07-05T13:24:47 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,901 |
py
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from openstack import resource2 as resource
from openstack.auto_scaling import auto_scaling_service
from openstack.auto_scaling.v1 import get_next_marker
from openstack.map_reduce import map_reduce_service
class ExecutableJob(resource.Resource):
"""Executable Job
The executable job indicates for a job and job-execution(I do not know why
the attributes is so different with job and job-execution...)
"""
#: Properties
#: Job name
job_name = resource.Body("job_name")
#: Job type, supports: ``MapReduce``, ``Spark``, ``Hive``, ``hql``,
#: ``DistCp``, ``SparkScript``, ``SparkSql``
job_type = resource.Body("job_type")
#: Path of the .jar package or .sql file for job execution
jar_path = resource.Body("jar_path")
#: Key parameter for job execution
arguments = resource.Body("arguments")
#: Path for inputting data which must start with ``/`` or ``s3a://``
input = resource.Body("input")
#: Path for outputting data, which must start with / or s3a://
output = resource.Body("output")
#: Path for storing job logs that record job running status.
#: This path must start with / or s3a://
job_log = resource.Body("job_log")
#: Whether to delete the cluster after the jobs are complete
shutdown_cluster = resource.Body("shutdown_cluster")
#: Data import and export
file_action = resource.Body("file_action")
#: whether to submit the job when the cluster is ready.
submit_job_once_cluster_run = resource.Body(
"submit_job_once_cluster_run", type=bool)
#: HiveQL statement
hql = resource.Body("hql")
#: SQL program path
hive_script_path = resource.Body("hive_script_path")
#: Reserved attribute, is job protected
is_protected = resource.Body("is_protected", type=bool)
#: Reserved attribute, is job public
is_public = resource.Body("is_public", type=bool)
class Job(resource.Resource):
resource_key = "job"
resources_key = "jobs"
base_path = "/jobs"
service = map_reduce_service.MapReduceService()
# capabilities
allow_create = True
allow_update = True
patch_update = True
allow_list = True
allow_get = True
allow_delete = True
_query_mapping = resource.QueryParameters(
"sort_by"
)
#: Properties
#: Job name
name = resource.Body("name")
#: Job type, supports: ``MapReduce``, ``Spark``, ``Hive``, ``hql``,
#: ``DistCp``, ``SparkScript``, ``SparkSql``
type = resource.Body("type")
#: A list of programs to be executed by the job
mains = resource.Body("mains", type=list)
#: A list of job-binaries required by the job
libs = resource.Body("libs", type=list)
#: Reserved attribute, user customer interfaces
interface = resource.Body("interface", type=list)
#: Job description
description = resource.Body("description")
#: Reserved attribute, is job protected
is_protected = resource.Body("is_protected", type=bool)
#: Reserved attribute, is job public
is_public = resource.Body("is_public", type=bool)
#: UTC date and time of the job created time
created_at = resource.Body("created_at")
#: UTC date and time of the job last updated time
updated_at = resource.Body("updated_at")
#: The tenant this job belongs to
tenant_id = resource.Body("tenant_id")
|
[
"[email protected]"
] | |
2d25dc2c818efe033ad59ee1eb11f2c8ccfde452
|
1ea966542e28e24b2f3f7d5e0352cbdc110a979a
|
/Algorithm/Programmers/Programmers_2개이하로다른비트.py
|
9a1ef5bccf76215078cf0ce46a5b4b707c54eb9b
|
[] |
no_license
|
yunhacho/SelfStudy
|
9ff7002362f6e9d8fe7d1ca3ccf94ee96726f635
|
99912af3df014a6864893c9274dbf83ff9ed05a8
|
refs/heads/main
| 2023-08-25T06:56:21.116419 | 2021-10-28T07:35:09 | 2021-10-28T07:35:09 | 360,470,454 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 242 |
py
|
def solution(numbers):
binarys=[]
for n in numbers:
b='0'+ bin(n)[2:]; i=len(b)-b[::-1].find('0')-1
binarys.append('{}{}{}'.format(b[:i],'1' if len(b)-1==i else '10',b[i+2:]))
return [int(b, 2) for b in binarys]
|
[
"[email protected]"
] | |
dbbb78de7586a3fd69f564f7384cb29ca7f56999
|
5793b470eea39ba99ff4d16325d462440647b77d
|
/System/Threads/thread-count.py
|
e0c36329cec628396bc4ee866d2c6f9daca41c6c
|
[] |
no_license
|
zhongjiezheng/python
|
01a99438fc4681817d4d0e623673afa1e488864c
|
5c5725ad0e75d07e016b64d79eddf3d88a524fa0
|
refs/heads/master
| 2021-01-17T11:57:12.373343 | 2015-07-08T09:04:48 | 2015-07-08T09:04:48 | 35,549,549 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 501 |
py
|
"""
thread basics: start 5 copies of a function running in parallel;
uses time.sleep so that the main thread doesn't die too earlly--
this kills all other threads on some platforms; stdout is shared:
thread output may be intermixd in this version arbitrarily.
"""
import _thread as thread, time
def counter(myId, count):
for i in range(count):
time.sleep(1)
print('[%s] => %s' % (myId, i))
for i in range(5):
thread.start_new_thread(counter,(i, 5))
time.sleep(6)
print('Main thread exiting.')
|
[
"[email protected]"
] | |
48b6b7cb9368a9db6760084e0982e05ee92758d6
|
04142fdda9b3fb29fb7456d5bc3e504985f24cbe
|
/mmcv/ops/masked_conv.py
|
919702e9cbd04b9e1f5c93147bcced8a1be38c61
|
[
"Apache-2.0"
] |
permissive
|
open-mmlab/mmcv
|
419e301bbc1d7d45331d67eccfd673f290a796d5
|
6e9ee26718b22961d5c34caca4108413b1b7b3af
|
refs/heads/main
| 2023-08-31T07:08:27.223321 | 2023-08-28T09:02:10 | 2023-08-28T09:02:10 | 145,670,155 | 5,319 | 1,900 |
Apache-2.0
| 2023-09-14T02:37:16 | 2018-08-22T07:05:26 |
Python
|
UTF-8
|
Python
| false | false | 4,851 |
py
|
# Copyright (c) OpenMMLab. All rights reserved.
import math
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from torch.autograd import Function
from torch.autograd.function import once_differentiable
from torch.nn.modules.utils import _pair
from ..utils import ext_loader
ext_module = ext_loader.load_ext(
'_ext', ['masked_im2col_forward', 'masked_col2im_forward'])
class MaskedConv2dFunction(Function):
@staticmethod
def symbolic(g, features, mask, weight, bias, padding, stride=1):
return g.op(
'mmcv::MMCVMaskedConv2d',
features,
mask,
weight,
bias,
padding_i=padding,
stride_i=stride)
@staticmethod
def forward(ctx,
features: torch.Tensor,
mask: torch.Tensor,
weight: torch.nn.Parameter,
bias: torch.nn.Parameter,
padding: int = 0,
stride: int = 1) -> torch.Tensor:
assert mask.dim() == 3 and mask.size(0) == 1
assert features.dim() == 4 and features.size(0) == 1
assert features.size()[2:] == mask.size()[1:]
pad_h, pad_w = _pair(padding)
stride_h, stride_w = _pair(stride)
if stride_h != 1 or stride_w != 1:
raise ValueError(
'Stride could not only be 1 in masked_conv2d currently.')
out_channel, in_channel, kernel_h, kernel_w = weight.size()
if features.device.type == 'npu':
import torch_npu
output = torch_npu.npu_conv2d(
features,
weight,
bias,
stride=(stride_h, stride_w),
padding=(pad_h, pad_w),
dilation=(1, 1),
groups=1)
if mask.size()[1:] != output.size()[2:]:
raise ValueError(
'The mask is inconsistent with the shape of output_conv.')
mask = mask > 0
mask = mask.type(output.dtype)
output = output * mask
return output
batch_size = features.size(0)
out_h = int(
math.floor(
torch.true_divide((features.size(2) + 2 * pad_h -
(kernel_h - 1) - 1), stride_h) + 1))
out_w = int(
math.floor(
torch.true_divide((features.size(3) + 2 * pad_w -
(kernel_w - 1) - 1), stride_w) + 1))
mask_inds = torch.nonzero(mask[0] > 0, as_tuple=False)
output = features.new_zeros(batch_size, out_channel, out_h, out_w)
if mask_inds.numel() > 0:
mask_h_idx = mask_inds[:, 0].contiguous()
mask_w_idx = mask_inds[:, 1].contiguous()
data_col = features.new_zeros(in_channel * kernel_h * kernel_w,
mask_inds.size(0))
ext_module.masked_im2col_forward(
features,
mask_h_idx,
mask_w_idx,
data_col,
kernel_h=kernel_h,
kernel_w=kernel_w,
pad_h=pad_h,
pad_w=pad_w)
masked_output = torch.addmm(1, bias[:, None], 1,
weight.view(out_channel, -1), data_col)
ext_module.masked_col2im_forward(
masked_output,
mask_h_idx,
mask_w_idx,
output,
height=out_h,
width=out_w,
channels=out_channel)
return output
@staticmethod
@once_differentiable
def backward(ctx, grad_output: torch.Tensor) -> tuple:
return (None, ) * 5
masked_conv2d = MaskedConv2dFunction.apply
class MaskedConv2d(nn.Conv2d):
"""A MaskedConv2d which inherits the official Conv2d.
The masked forward doesn't implement the backward function and only
supports the stride parameter to be 1 currently.
"""
def __init__(self,
in_channels: int,
out_channels: int,
kernel_size: Union[int, Tuple[int, ...]],
stride: int = 1,
padding: int = 0,
dilation: int = 1,
groups: int = 1,
bias: bool = True):
super().__init__(in_channels, out_channels, kernel_size, stride,
padding, dilation, groups, bias)
def forward(self,
input: torch.Tensor,
mask: Optional[torch.Tensor] = None) -> torch.Tensor:
if mask is None: # fallback to the normal Conv2d
return super().forward(input)
else:
return masked_conv2d(input, mask, self.weight, self.bias,
self.padding)
|
[
"[email protected]"
] | |
745b47f4b9653e1adb5938a611487ad9e3201e35
|
303bac96502e5b1666c05afd6c2e85cf33f19d8c
|
/solutions/python3/944.py
|
bd687a861c6ef672174fcc914271cffea1314b06
|
[
"MIT"
] |
permissive
|
jxhangithub/leetcode
|
5e82f4aeee1bf201e93e889e5c4ded2fcda90437
|
0de1af607557d95856f0e4c2a12a56c8c57d731d
|
refs/heads/master
| 2022-05-22T12:57:54.251281 | 2022-03-09T22:36:20 | 2022-03-09T22:36:20 | 370,508,127 | 1 | 0 |
MIT
| 2022-03-09T22:36:20 | 2021-05-24T23:16:10 | null |
UTF-8
|
Python
| false | false | 138 |
py
|
class Solution:
def minDeletionSize(self, A):
return sum(any(a[j] > b[j] for a, b in zip(A, A[1:])) for j in range(len(A[0])))
|
[
"[email protected]"
] | |
4da391e6845015007b01093614347747e5b52720
|
09e57dd1374713f06b70d7b37a580130d9bbab0d
|
/data/p4VQE/R4/benchmark/startPyquil309.py
|
b9a32c54bbe98a26e0fe98284c0b30b521a3eef5
|
[
"BSD-3-Clause"
] |
permissive
|
UCLA-SEAL/QDiff
|
ad53650034897abb5941e74539e3aee8edb600ab
|
d968cbc47fe926b7f88b4adf10490f1edd6f8819
|
refs/heads/main
| 2023-08-05T04:52:24.961998 | 2021-09-19T02:56:16 | 2021-09-19T02:56:16 | 405,159,939 | 2 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,195 |
py
|
# qubit number=4
# total number=12
import pyquil
from pyquil.api import local_forest_runtime, QVMConnection
from pyquil import Program, get_qc
from pyquil.gates import *
import numpy as np
conn = QVMConnection()
def make_circuit()-> Program:
prog = Program() # circuit begin
prog += H(1) # number=2
prog += H(2) # number=3
prog += H(3) # number=4
prog += Y(3) # number=5
prog += SWAP(1,0) # number=6
prog += SWAP(1,0) # number=7
prog += Y(3) # number=8
prog += Y(3) # number=9
prog += SWAP(1,0) # number=10
prog += SWAP(1,0) # number=11
# circuit end
return prog
def summrise_results(bitstrings) -> dict:
d = {}
for l in bitstrings:
if d.get(l) is None:
d[l] = 1
else:
d[l] = d[l] + 1
return d
if __name__ == '__main__':
prog = make_circuit()
qvm = get_qc('4q-qvm')
results = qvm.run_and_measure(prog,1024)
bitstrings = np.vstack([results[i] for i in qvm.qubits()]).T
bitstrings = [''.join(map(str, l)) for l in bitstrings]
writefile = open("../data/startPyquil309.csv","w")
print(summrise_results(bitstrings),file=writefile)
writefile.close()
|
[
"[email protected]"
] | |
9c2e5b9526c6eadce1fc38a03bb4c1f15495d7bc
|
551b75f52d28c0b5c8944d808a361470e2602654
|
/huaweicloud-sdk-vpc/huaweicloudsdkvpc/v2/model/neutron_create_security_group_option.py
|
0f2264525c7e8c7ce51ffcb365afd1fd1693468f
|
[
"Apache-2.0"
] |
permissive
|
wuchen-huawei/huaweicloud-sdk-python-v3
|
9d6597ce8ab666a9a297b3d936aeb85c55cf5877
|
3683d703f4320edb2b8516f36f16d485cff08fc2
|
refs/heads/master
| 2023-05-08T21:32:31.920300 | 2021-05-26T08:54:18 | 2021-05-26T08:54:18 | 370,898,764 | 0 | 0 |
NOASSERTION
| 2021-05-26T03:50:07 | 2021-05-26T03:50:07 | null |
UTF-8
|
Python
| false | false | 3,837 |
py
|
# coding: utf-8
import pprint
import re
import six
class NeutronCreateSecurityGroupOption:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'description': 'str',
'name': 'str'
}
attribute_map = {
'description': 'description',
'name': 'name'
}
def __init__(self, description=None, name=None):
"""NeutronCreateSecurityGroupOption - a model defined in huaweicloud sdk"""
self._description = None
self._name = None
self.discriminator = None
if description is not None:
self.description = description
if name is not None:
self.name = name
@property
def description(self):
"""Gets the description of this NeutronCreateSecurityGroupOption.
功能说明:安全组描述 取值范围:0-255个字符
:return: The description of this NeutronCreateSecurityGroupOption.
:rtype: str
"""
return self._description
@description.setter
def description(self, description):
"""Sets the description of this NeutronCreateSecurityGroupOption.
功能说明:安全组描述 取值范围:0-255个字符
:param description: The description of this NeutronCreateSecurityGroupOption.
:type: str
"""
self._description = description
@property
def name(self):
"""Gets the name of this NeutronCreateSecurityGroupOption.
功能说明:安全组名称 取值范围:0-255个字符 约束:不允许为“default”
:return: The name of this NeutronCreateSecurityGroupOption.
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this NeutronCreateSecurityGroupOption.
功能说明:安全组名称 取值范围:0-255个字符 约束:不允许为“default”
:param name: The name of this NeutronCreateSecurityGroupOption.
:type: str
"""
self._name = name
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, NeutronCreateSecurityGroupOption):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
[
"[email protected]"
] | |
23857be0068cb1b58270601d7ea42d5393abbad8
|
2dfbb97b47fd467f29ffb26faf9a9f6f117abeee
|
/leetcode/84.py
|
a8c43f1a9b98fb79e9c45be4a3ddfa8e973b06fe
|
[] |
no_license
|
liuweilin17/algorithm
|
0e04b2d36dfb6b7b1b0e0425daf69b62273c54b5
|
d3e8669f932fc2e22711e8b7590d3365d020e189
|
refs/heads/master
| 2020-12-30T11:03:40.085105 | 2020-04-10T03:46:01 | 2020-04-10T03:46:01 | 98,844,919 | 3 | 1 | null | 2018-10-05T03:01:02 | 2017-07-31T03:35:14 |
C++
|
UTF-8
|
Python
| false | false | 4,644 |
py
|
###########################################
# Let's Have Some Fun
# File Name: 84.py
# Author: Weilin Liu
# Mail: [email protected]
# Created Time: Fri Sep 27 11:00:23 2019
###########################################
#coding=utf-8
#!/usr/bin/python
# 84. Largest Rectangle in Histogram
class SegmentNode:
def __init__(self, begin, end):
self.min_v = -1
self.begin = begin
self.end = end
self.left = None
self.right = None
class Solution:
# O(n^3), time limit exceed
def largestRectangleArea1(self, heights: List[int]) -> int:
N = len(heights)
ret = 0
for i in range(N):
for j in range(i, N):
ret = max(ret, min(heights[i:j+1])*(j-i+1))
return ret
# O(n^2), time limit exceed
def largestRectangleArea2(self, heights: List[int]) -> int:
N = len(heights)
ret = 0
for i in range(N):
min_h = heights[i]
for j in range(i, N):
min_h = min(min_h, heights[j])
ret = max(ret, min_h*(j-i+1))
return ret
# divide and conquer
# the maximum area of rectangle is one of these:
# 1. minimum height * number of bars
# 2. maximum area of bars on the left of minimum height
# 3. maximum area of bars on the right of minimum height
# average O(nlogn)
# worst O(n^2) when heights are sorted
# time limit exceed
def largestRectangleArea3(self, heights: List[int]) -> int:
def helper(begin, end):
if begin > end: return 0
min_ind = begin
min_height = heights[min_ind]
for i in range(begin+1, end+1):
if heights[i] < min_height:
min_ind = i
min_height = heights[i]
a1 = min_height * (end - begin + 1)
a2 = helper(begin, min_ind-1)
a3 = helper(min_ind+1, end)
return max([a1, a2, a3])
N = len(heights)
return helper(0, N-1)
# divide and conquer with segment tree
def largestRectangleArea4(self, heights: List[int]) -> int:
# build segment tree for find mininum value in heights
def buildSegmentTree(begin, end):
if begin > end: return None
root = SegmentNode(begin, end)
if begin == end:
root.min_v = begin
return root
else:
middle = (begin + end) // 2
root.left = buildSegmentTree(begin, middle)
root.right = buildSegmentTree(middle+1, end)
root.min_v = root.left.min_v if heights[root.left.min_v] < heights[root.right.min_v] else root.right.min_v
return root
# find the mininum value in segment tree
def query(nd, begin, end):
if nd == None or begin > nd.end or end < nd.begin:
return -1
# I don't know why, check the review updates below this solution
if begin <= nd.begin and end >= nd.end:
return nd.min_v
left_min = query(nd.left, begin, end)
right_min = query(nd.right, begin, end)
if left_min == -1: return right_min
if right_min == -1: return left_min
return left_min if heights[left_min] < heights[right_min] else right_min
def helper(begin, end):
if begin > end: return 0
elif begin == end: return heights[begin]
else: pass
min_ind = query(root, begin, end)
print(begin, end, min_ind)
min_height = heights[min_ind]
a1 = min_height * (end - begin + 1)
a2 = helper(begin, min_ind-1)
a3 = helper(min_ind+1, end)
return max([a1, a2, a3])
N = len(heights)
root = buildSegmentTree(0, N-1)
return helper(0, N-1)
# stack
# st[-1] is the local maximum heights, we calcuate the area from its left to st[-1], all the heights on the left is in the stack and smaller than it
def largestRectangleArea5(self, heights: List[int]) -> int:
st = [-1] # use -1 to calculate the minimum width
N = len(heights)
max_area = 0
for i in range(N):
while st[-1] != -1 and heights[st[-1]] >= heights[i]:
max_area = max(max_area, heights[st.pop()] * (i - st[-1] - 1))
st.append(i)
while st[-1] != -1:
max_area = max(max_area, heights[st.pop()] * (len(heights) - st[-1] -1))
return max_area
|
[
"[email protected]"
] | |
658a507fcf159ac4b48d14cc5cca2cfada4e319d
|
3c2a197bf32e72444c5f36559ad0cb9b64035516
|
/codeskeleton/value_generators/random_int.py
|
d9aac5df824e8d25278a8ef6f114953e6b7a0a9f
|
[] |
no_license
|
appressoas/codeskeleton
|
c538bbfccf19735464dc42996b754cf9199a14a3
|
604011cb27c47b02d325379895bc23b543797216
|
refs/heads/master
| 2021-01-19T19:24:21.100020 | 2017-06-26T09:34:45 | 2017-06-26T09:34:45 | 88,417,371 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 228 |
py
|
import random
from .registry import GENERATOR_REGISTRY
def random_int(from_int=0, to_int=999999999):
return random.randint(from_int, to_int)
GENERATOR_REGISTRY.register(generator_name='random_int', function=random_int)
|
[
"[email protected]"
] | |
161c6671e458fed554bf825c179cc4b7abb336c1
|
96aab9f77de8170ae93004d699bd0b11e820b2d4
|
/rest/app/user/urls.py
|
c0b6dccc152b56208b129bc04563ca5b6a09e9fd
|
[] |
no_license
|
JasoSalgado/rest-app
|
8dbc842d6de0ec705fd04bc94e79ee75ad80f2e2
|
3d1662800bd1e98142a0edca244c82498cc4832b
|
refs/heads/master
| 2022-11-15T15:25:44.135084 | 2020-07-16T14:58:59 | 2020-07-16T14:58:59 | 280,182,647 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 218 |
py
|
from django.conf.urls import url
from rest.app.user.views import UserRegistrationView, UserLoginView
urlpatterns = [
url(r'^signup', UserRegistrationView.as_view()),
url(r'^signin', UserLoginView.as_view()),
]
|
[
"[email protected]"
] | |
7a021ccaf3c670c98dfc5155d1cbd84b76bfd436
|
2caf6885511af24443e22aaa43cd679d694f6f80
|
/note/my_note/first_month/day06/do_it.py
|
ec11f1b1af222ae157ca35960d3fb73a0a203e08
|
[] |
no_license
|
nandadao/Python_note
|
7f9ba54a73af05c935b4f7e24cacb728859a6c69
|
abddfc2e9a1704c88867cff1898c9251f59d4fb5
|
refs/heads/master
| 2020-11-25T18:29:50.607670 | 2019-12-19T01:28:02 | 2019-12-19T01:28:02 | 228,793,207 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 751 |
py
|
# dict_like = {
# "qtx":["编码","看书","跑步"],
# "lzmly":["看电影","编码","美食","唱歌"]
# }
#
# # for i in dict_like["qtx"]:
# # print("qtx",i)
#
# # 所有人的爱好
# for item in dict_like:
# for i in dict_like[item]:
# print(item, i)
dict_city = {
"北京":{
"景区":["天安门","天坛","故宫"],
"美食":["驴打滚","豆汁"]
},
"四川":{
"景区":["九寨沟","宽窄巷子"],
"美食":["火锅","串串香"]
},
}
for i in dict_city["北京"]["景区"]:
print("北京美食",i)
for item in dict_city:
print("城市有:", item)
for item in dict_city:
for i in dict_city[item]["景区"]:
print(item, i)
|
[
"[email protected]"
] | |
17ff71f9320ed1b5a19d7b730f0302b2113591eb
|
196f7e3238f961fb5eba7a794f0b0c75d7c30ba1
|
/Python自动化运维技术与最佳实践/2业务服务监控/213对比nginx配置文件test.py
|
9cc58a6e05df6dd2f918590e93150457966d7b24
|
[] |
no_license
|
Liaoyingjie/Pythonlearn
|
d0b1b95110017af7e063813660e52c61a6333575
|
8bca069f38a60719acac5aa39bd347f90ab0bfb1
|
refs/heads/master
| 2020-04-08T07:35:07.357487 | 2018-04-12T16:44:43 | 2018-04-12T16:44:43 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 815 |
py
|
#!/usr/bin/python
import difflib
import sys
textfile1='nginx.conf.v1'
textfile2='nginx.conf.v2'
def readfile(filename):
try:
fileHandle = open (filename, 'a+' )
text=fileHandle.read().splitlines()
fileHandle.close()
return text
except IOError as error:
print('Read file Error:'+str(error))
sys.exit()
if textfile1=="" or textfile2=="":
print("Usage: simple3.py filename1 filename2")
sys.exit()
#textfile1='HtmlDiff.html'
#textfile2='HtmlDiff111.html'
text1_lines = readfile(textfile1)
text2_lines = readfile(textfile2)
d = difflib.HtmlDiff()
#print(d.make_file(text1_lines, text2_lines))
f=open('213对比Nginx网页结果.html', 'a+')
#print(d.make_file(text1_lines,text2_lines))
print((d.make_file(text1_lines,text2_lines)),file=f)
f.close()
|
[
"[email protected]"
] | |
df1b483b12a18f285047ae7d1f7b07f90b38e4ab
|
a43504f11666edffa9497630d9fcad31566b4349
|
/app/bot_engine/request_makers.py
|
7c475e770f2ee3cf8dc298e5fc7d6fa180ffd930
|
[] |
no_license
|
korid24/shop_list_webhook_bot
|
4896d0e731679815b043ba4c997651fe4f3682a9
|
efe359b42a8b625ea78b855664358937419a1785
|
refs/heads/master
| 2022-12-29T09:51:16.212730 | 2020-10-22T10:16:20 | 2020-10-22T10:16:20 | 306,046,971 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,933 |
py
|
import json
import requests
from typing import List, NamedTuple, Optional, Union
from config import AUTH_TOKEN, BASE_URL
# from local_utils import write_json
from bot_engine.utils import path_to
HEADERS = {
'Content-type': 'application/json',
'Authorization': 'token {}'.format(AUTH_TOKEN),
'Accept-Language': 'en-US'}
class RequestConstructor(NamedTuple):
"""
Шаблон для информации запроса
"""
url: str
method: str
data: Optional[Union[list, dict]]
def create_user(
telegram_id: int,
first_name: Optional[str],
last_name: Optional[str],
nickname: Optional[str]) -> RequestConstructor:
"""
Формирует информацию для запроса на добавление пользоавателя
"""
data = {
'telegram_id': telegram_id,
'first_name': first_name,
'last_name': last_name,
'nickname': nickname}
url = BASE_URL + path_to('user')
return RequestConstructor(url=url, data=data, method='post')
def add_elements(
telegram_id: int,
position: int,
elements: List[str]) -> RequestConstructor:
"""
Формирует информацию для запроса на добавление элемента
"""
data = []
for element in elements:
data.append({'title': element})
if position:
url = (BASE_URL + path_to('user', telegram_id) +
path_to('purchaselist', position) +
path_to('purchase') + 'bulk_create/')
else:
url = (BASE_URL + path_to('user', telegram_id) +
path_to('purchaselist') + 'bulk_create/')
return RequestConstructor(url=url, data=data, method='post')
def replace_element(
telegram_id: int,
position: int,
old_ind: int,
new_ind: int) -> RequestConstructor:
"""
Формирует информацию для запроса на перемещение элемента
"""
data = {'ind': new_ind}
if position:
url = (BASE_URL + path_to('user', telegram_id) +
path_to('purchaselist', position) +
path_to('purchase', old_ind))
else:
url = (BASE_URL + path_to('user', telegram_id) +
path_to('purchaselist', old_ind))
return RequestConstructor(url=url, data=data, method='patch')
def remove_elements(
telegram_id: int,
position: int,
elements: List[int]) -> RequestConstructor:
"""
Формирует информацию для запроса на удаление элемента
"""
data = {'items': elements}
if position:
url = (BASE_URL + path_to('user', telegram_id) +
path_to('purchaselist', position) +
path_to('purchase') + 'bulk_delete/')
else:
url = (BASE_URL + path_to('user', telegram_id) +
path_to('purchaselist') + 'bulk_delete/')
return RequestConstructor(url=url, data=data, method='delete')
def get_all(telegram_id: int) -> RequestConstructor:
"""
Формирует информацию для запроса на получение полной инфы о пользователе
"""
url = BASE_URL + path_to('user', telegram_id)
return RequestConstructor(url=url, data=None, method='get')
def make_request(
info: RequestConstructor, answer: bool = True) -> Union[dict, int]:
"""
Совершает запрос исходя из предоставленной инфы. Возвращает тело ответа
если нужно, а если не нужно то код ответа
"""
response = requests.request(
method=info.method,
url=info.url,
data=json.dumps(info.data),
headers=HEADERS)
if not answer:
return response.status_code
return response.json()
|
[
"[email protected]"
] | |
2d9da259fdc14b8cb5bf137e5ab76ab8e8182a96
|
c019093a2474b92bda1b9fcab0ae750937aedc1c
|
/jaxlie/manifold/_manifold_helpers.py
|
9bfeaae7af619693187e8bbe93f513efab7291ad
|
[
"MIT"
] |
permissive
|
mfkiwl/jaxlie
|
6c8a83d367299592a68bb80c7dc9816e9e006f09
|
4dbe16f3c1d1cfda30e0418ef5d1e1772cf9f537
|
refs/heads/master
| 2023-07-13T17:11:16.693321 | 2021-08-31T18:51:33 | 2021-08-31T18:51:33 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,652 |
py
|
from typing import TypeVar, cast
import jax
from jax import numpy as jnp
from .. import hints
from .._base import MatrixLieGroup
from .._se2 import SE2
from .._se3 import SE3
from .._so2 import SO2
from .._so3 import SO3
T = TypeVar("T", bound=MatrixLieGroup)
@jax.jit
def rplus(transform: T, delta: hints.TangentVector) -> T:
"""Manifold right plus.
Computes `T_wb = T_wa @ exp(delta)`.
Args:
transform: `T_wa`
delta: `T_ab.log()`
Returns:
T: `T_wb`
"""
return transform @ type(transform).exp(delta)
@jax.jit
def rplus_jacobian_parameters_wrt_delta(transform: MatrixLieGroup) -> hints.MatrixJax:
"""Analytical Jacobians for `jaxlie.manifold.rplus()`, linearized around a zero
local delta.
Useful for on-manifold optimization.
Equivalent to --
```
def rplus_jacobian_parameters_wrt_delta(transform: MatrixLieGroup) -> jnp.ndarray:
# Since transform objects are pytree containers, note that `jacfwd` returns a
# transformation object itself and that the Jacobian terms corresponding to the
# parameters are grabbed explicitly.
return jax.jacfwd(
jaxlie.manifold.rplus, # Args are (transform, delta)
argnums=1, # Jacobian wrt delta
)(transform, onp.zeros(transform.tangent_dim)).parameters()
```
Args:
transform
Returns:
Jacobian. Shape should be `(Group.parameters_dim, Group.tangent_dim)`.
"""
if type(transform) is SO2:
# Jacobian row indices: cos, sin
# Jacobian col indices: theta
transform_so2 = cast(SO2, transform)
J = jnp.zeros((2, 1))
cos, sin = transform_so2.unit_complex
J = J.at[0].set(-sin).at[1].set(cos)
elif type(transform) is SE2:
# Jacobian row indices: cos, sin, x, y
# Jacobian col indices: vx, vy, omega
transform_se2 = cast(SE2, transform)
J = jnp.zeros((4, 3))
# Translation terms
J = J.at[2:, :2].set(transform_se2.rotation().as_matrix())
# Rotation terms
J = J.at[:2, 2:3].set(
rplus_jacobian_parameters_wrt_delta(transform_se2.rotation())
)
elif type(transform) is SO3:
# Jacobian row indices: qw, qx, qy, qz
# Jacobian col indices: omega x, omega y, omega z
transform_so3 = cast(SO3, transform)
w, x, y, z = transform_so3.wxyz
_unused_neg_w, neg_x, neg_y, neg_z = -transform_so3.wxyz
J = (
jnp.array(
[
[neg_x, neg_y, neg_z],
[w, neg_z, y],
[z, w, neg_x],
[neg_y, x, w],
]
)
/ 2.0
)
elif type(transform) is SE3:
# Jacobian row indices: qw, qx, qy, qz, x, y, z
# Jacobian col indices: vx, vy, vz, omega x, omega y, omega z
transform_se3 = cast(SE3, transform)
J = jnp.zeros((7, 6))
# Translation terms
J = J.at[4:, :3].set(transform_se3.rotation().as_matrix())
# Rotation terms
J = J.at[:4, 3:6].set(
rplus_jacobian_parameters_wrt_delta(transform_se3.rotation())
)
else:
assert False, f"Unsupported type: {type(transform)}"
assert J.shape == (transform.parameters_dim, transform.tangent_dim)
return J
@jax.jit
def rminus(a: T, b: T) -> hints.TangentVectorJax:
"""Manifold right minus.
Computes `delta = (T_wa.inverse() @ T_wb).log()`.
Args:
a: `T_wa`
b: `T_wb`
Returns:
`T_ab.log()`
"""
return (a.inverse() @ b).log()
|
[
"[email protected]"
] | |
0628d28942b07798a3581b0c726246718d0103bf
|
6ed233ec80984cd8d6eb5b8f2efde1ac5feadc4b
|
/ebc/nbr2018/tests/base.py
|
d8f1b8dc478a579559e54fd5ce377970766ca953
|
[
"Unlicense"
] |
permissive
|
lflrocha/ebc.nbr2018
|
ce03abd238dca532d8adedaae0778b519b334852
|
0259390fecda065bf040b08e5ae3050ba96b1c4e
|
refs/heads/master
| 2020-04-08T12:44:17.247750 | 2019-08-08T18:13:57 | 2019-08-08T18:13:57 | 159,359,454 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,962 |
py
|
"""Test setup for integration and functional tests.
When we import PloneTestCase and then call setupPloneSite(), all of
Plone's products are loaded, and a Plone site will be created. This
happens at module level, which makes it faster to run each test, but
slows down test runner startup.
"""
from Products.Five import zcml
from Products.Five import fiveconfigure
from Testing import ZopeTestCase as ztc
from Products.PloneTestCase import PloneTestCase as ptc
from Products.PloneTestCase.layer import onsetup
# When ZopeTestCase configures Zope, it will *not* auto-load products
# in Products/. Instead, we have to use a statement such as:
# ztc.installProduct('SimpleAttachment')
# This does *not* apply to products in eggs and Python packages (i.e.
# not in the Products.*) namespace. For that, see below.
# All of Plone's products are already set up by PloneTestCase.
@onsetup
def setup_product():
"""Set up the package and its dependencies.
The @onsetup decorator causes the execution of this body to be
deferred until the setup of the Plone site testing layer. We could
have created our own layer, but this is the easiest way for Plone
integration tests.
"""
# Load the ZCML configuration for the example.tests package.
# This can of course use <include /> to include other packages.
fiveconfigure.debug_mode = True
import ebc.nbr2018
zcml.load_config('configure.zcml', ebc.nbr2018)
fiveconfigure.debug_mode = False
# We need to tell the testing framework that these products
# should be available. This can't happen until after we have loaded
# the ZCML. Thus, we do it here. Note the use of installPackage()
# instead of installProduct().
# This is *only* necessary for packages outside the Products.*
# namespace which are also declared as Zope 2 products, using
# <five:registerPackage /> in ZCML.
# We may also need to load dependencies, e.g.:
# ztc.installPackage('borg.localrole')
ztc.installPackage('ebc.nbr2018')
# The order here is important: We first call the (deferred) function
# which installs the products we need for this product. Then, we let
# PloneTestCase set up this product on installation.
setup_product()
ptc.setupPloneSite(products=['ebc.nbr2018'])
class TestCase(ptc.PloneTestCase):
"""We use this base class for all the tests in this package. If
necessary, we can put common utility or setup code in here. This
applies to unit test cases.
"""
class FunctionalTestCase(ptc.FunctionalTestCase):
"""We use this class for functional integration tests that use
doctest syntax. Again, we can put basic common utility or setup
code in here.
"""
def afterSetUp(self):
roles = ('Member', 'Contributor')
self.portal.portal_membership.addMember('contributor',
'secret',
roles, [])
|
[
"[email protected]"
] | |
0bccda679a470479ed2e699aaf932957507e734f
|
5c0c0176db0ccf2c24b6b5ed459a8dc144518b13
|
/nni/nas/benchmarks/nasbench101/graph_util.py
|
10805685fec3ff7359ec39dc0ae1c019e67950ae
|
[
"MIT"
] |
permissive
|
petuum/nni
|
ac4f4a1c4d6df71684eeffa127b7c4858fd29e97
|
8134be6269902939232482d63649c06f9864be6d
|
refs/heads/master
| 2023-02-18T11:21:41.078889 | 2021-01-20T03:21:50 | 2021-01-20T03:21:50 | 302,736,456 | 4 | 3 |
MIT
| 2020-11-20T20:21:15 | 2020-10-09T19:34:11 |
Python
|
UTF-8
|
Python
| false | false | 3,790 |
py
|
import hashlib
import numpy as np
from .constants import INPUT, LABEL2ID, OUTPUT
def _labeling_from_architecture(architecture, vertices):
return [INPUT] + [architecture['op{}'.format(i)] for i in range(1, vertices - 1)] + [OUTPUT]
def _adjancency_matrix_from_architecture(architecture, vertices):
matrix = np.zeros((vertices, vertices), dtype=np.bool)
for i in range(1, vertices):
for k in architecture['input{}'.format(i)]:
matrix[k, i] = 1
return matrix
def nasbench_format_to_architecture_repr(adjacency_matrix, labeling):
"""
Computes a graph-invariance MD5 hash of the matrix and label pair.
Imported from NAS-Bench-101 repo.
Parameters
----------
adjacency_matrix : np.ndarray
A 2D array of shape NxN, where N is the number of vertices.
``matrix[u][v]`` is 1 if there is a direct edge from `u` to `v`,
otherwise it will be 0.
labeling : list of str
A list of str that starts with input and ends with output. The intermediate
nodes are chosen from candidate operators.
Returns
-------
tuple and int and dict
Converted number of vertices and architecture.
"""
num_vertices = adjacency_matrix.shape[0]
assert len(labeling) == num_vertices
architecture = {}
for i in range(1, num_vertices - 1):
architecture['op{}'.format(i)] = labeling[i]
assert labeling[i] not in [INPUT, OUTPUT]
for i in range(1, num_vertices):
architecture['input{}'.format(i)] = [k for k in range(i) if adjacency_matrix[k, i]]
return num_vertices, architecture
def infer_num_vertices(architecture):
"""
Infer number of vertices from an architecture dict.
Parameters
----------
architecture : dict
Architecture in NNI format.
Returns
-------
int
Number of vertices.
"""
op_keys = set([k for k in architecture.keys() if k.startswith('op')])
intermediate_vertices = len(op_keys)
assert op_keys == {'op{}'.format(i) for i in range(1, intermediate_vertices + 1)}
return intermediate_vertices + 2
def hash_module(architecture, vertices):
"""
Computes a graph-invariance MD5 hash of the matrix and label pair.
This snippet is modified from code in NAS-Bench-101 repo.
Parameters
----------
matrix : np.ndarray
Square upper-triangular adjacency matrix.
labeling : list of int
Labels of length equal to both dimensions of matrix.
Returns
-------
str
MD5 hash of the matrix and labeling.
"""
labeling = _labeling_from_architecture(architecture, vertices)
labeling = [LABEL2ID[t] for t in labeling]
matrix = _adjancency_matrix_from_architecture(architecture, vertices)
in_edges = np.sum(matrix, axis=0).tolist()
out_edges = np.sum(matrix, axis=1).tolist()
assert len(in_edges) == len(out_edges) == len(labeling)
hashes = list(zip(out_edges, in_edges, labeling))
hashes = [hashlib.md5(str(h).encode('utf-8')).hexdigest() for h in hashes]
# Computing this up to the diameter is probably sufficient but since the
# operation is fast, it is okay to repeat more times.
for _ in range(vertices):
new_hashes = []
for v in range(vertices):
in_neighbors = [hashes[w] for w in range(vertices) if matrix[w, v]]
out_neighbors = [hashes[w] for w in range(vertices) if matrix[v, w]]
new_hashes.append(hashlib.md5(
(''.join(sorted(in_neighbors)) + '|' +
''.join(sorted(out_neighbors)) + '|' +
hashes[v]).encode('utf-8')).hexdigest())
hashes = new_hashes
fingerprint = hashlib.md5(str(sorted(hashes)).encode('utf-8')).hexdigest()
return fingerprint
|
[
"[email protected]"
] | |
2ec5a1156d06c902673f739affb49f1533f4092d
|
24bc4990e9d0bef6a42a6f86dc783785b10dbd42
|
/build/fuchsia/PRESUBMIT.py
|
f8c7df28fc5fd1397f1569b6b65e371324b3fa65
|
[
"BSD-3-Clause"
] |
permissive
|
nwjs/chromium.src
|
7736ce86a9a0b810449a3b80a4af15de9ef9115d
|
454f26d09b2f6204c096b47f778705eab1e3ba46
|
refs/heads/nw75
| 2023-08-31T08:01:39.796085 | 2023-04-19T17:25:53 | 2023-04-19T17:25:53 | 50,512,158 | 161 | 201 |
BSD-3-Clause
| 2023-05-08T03:19:09 | 2016-01-27T14:17:03 | null |
UTF-8
|
Python
| false | false | 1,591 |
py
|
# Copyright 2021 The Chromium Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Presubmit script for Fuchsia.
See http://dev.chromium.org/developers/how-tos/depottools/presubmit-scripts for
details on the presubmit API built into depot_tools.
"""
USE_PYTHON3 = True
import os
def CommonChecks(input_api, output_api):
build_fuchsia_dir = input_api.PresubmitLocalPath()
def J(*dirs):
"""Returns a path relative to presubmit directory."""
return input_api.os_path.join(build_fuchsia_dir, *dirs)
tests = []
unit_tests = [
J('binary_sizes_test.py'),
J('binary_size_differ_test.py'),
J('device_target_test.py'),
J('gcs_download_test.py'),
J('update_images_test.py'),
J('update_product_bundles_test.py'),
J('update_sdk_test.py'),
]
# TODO(1309977): enable on Windows when fixed.
if os.name != 'nt':
unit_tests.extend([J('fvdl_target_test.py')])
tests.extend(
input_api.canned_checks.GetUnitTests(input_api,
output_api,
unit_tests=unit_tests,
run_on_python2=False,
run_on_python3=True,
skip_shebang_check=True))
return input_api.RunTests(tests)
def CheckChangeOnUpload(input_api, output_api):
return CommonChecks(input_api, output_api)
def CheckChangeOnCommit(input_api, output_api):
return CommonChecks(input_api, output_api)
|
[
"[email protected]"
] | |
a7bbc60feece73e88f0a57f6209db2d14d87241c
|
bc441bb06b8948288f110af63feda4e798f30225
|
/user_service_sdk/model/cmdb_extend/subsystem_dependency_pb2.pyi
|
56c626d516302fae9e256521a00d0df10a2ecd97
|
[
"Apache-2.0"
] |
permissive
|
easyopsapis/easyops-api-python
|
23204f8846a332c30f5f3ff627bf220940137b6b
|
adf6e3bad33fa6266b5fa0a449dd4ac42f8447d0
|
refs/heads/master
| 2020-06-26T23:38:27.308803 | 2020-06-16T07:25:41 | 2020-06-16T07:25:41 | 199,773,131 | 5 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 4,344 |
pyi
|
# @generated by generate_proto_mypy_stubs.py. Do not edit!
import sys
from google.protobuf.descriptor import (
Descriptor as google___protobuf___descriptor___Descriptor,
)
from google.protobuf.internal.containers import (
RepeatedCompositeFieldContainer as google___protobuf___internal___containers___RepeatedCompositeFieldContainer,
)
from google.protobuf.message import (
Message as google___protobuf___message___Message,
)
from typing import (
Iterable as typing___Iterable,
Optional as typing___Optional,
Text as typing___Text,
Union as typing___Union,
)
from typing_extensions import (
Literal as typing_extensions___Literal,
)
from user_service_sdk.model.cmdb_extend.app_dependency_pb2 import (
AppDependency as user_service_sdk___model___cmdb_extend___app_dependency_pb2___AppDependency,
)
builtin___bool = bool
builtin___bytes = bytes
builtin___float = float
builtin___int = int
if sys.version_info < (3,):
builtin___buffer = buffer
builtin___unicode = unicode
class SubsystemDependency(google___protobuf___message___Message):
DESCRIPTOR: google___protobuf___descriptor___Descriptor = ...
class ConnectSubsystems(google___protobuf___message___Message):
DESCRIPTOR: google___protobuf___descriptor___Descriptor = ...
abbreviation = ... # type: typing___Text
object_id = ... # type: typing___Text
instance_id = ... # type: typing___Text
name = ... # type: typing___Text
def __init__(self,
*,
abbreviation : typing___Optional[typing___Text] = None,
object_id : typing___Optional[typing___Text] = None,
instance_id : typing___Optional[typing___Text] = None,
name : typing___Optional[typing___Text] = None,
) -> None: ...
if sys.version_info >= (3,):
@classmethod
def FromString(cls, s: builtin___bytes) -> SubsystemDependency.ConnectSubsystems: ...
else:
@classmethod
def FromString(cls, s: typing___Union[builtin___bytes, builtin___buffer, builtin___unicode]) -> SubsystemDependency.ConnectSubsystems: ...
def MergeFrom(self, other_msg: google___protobuf___message___Message) -> None: ...
def CopyFrom(self, other_msg: google___protobuf___message___Message) -> None: ...
def ClearField(self, field_name: typing_extensions___Literal[u"abbreviation",b"abbreviation",u"instance_id",b"instance_id",u"name",b"name",u"object_id",b"object_id"]) -> None: ...
abbreviation = ... # type: typing___Text
name = ... # type: typing___Text
object_id = ... # type: typing___Text
instance_id = ... # type: typing___Text
@property
def components(self) -> google___protobuf___internal___containers___RepeatedCompositeFieldContainer[user_service_sdk___model___cmdb_extend___app_dependency_pb2___AppDependency]: ...
@property
def connect_subsystems(self) -> google___protobuf___internal___containers___RepeatedCompositeFieldContainer[SubsystemDependency.ConnectSubsystems]: ...
def __init__(self,
*,
abbreviation : typing___Optional[typing___Text] = None,
name : typing___Optional[typing___Text] = None,
object_id : typing___Optional[typing___Text] = None,
instance_id : typing___Optional[typing___Text] = None,
components : typing___Optional[typing___Iterable[user_service_sdk___model___cmdb_extend___app_dependency_pb2___AppDependency]] = None,
connect_subsystems : typing___Optional[typing___Iterable[SubsystemDependency.ConnectSubsystems]] = None,
) -> None: ...
if sys.version_info >= (3,):
@classmethod
def FromString(cls, s: builtin___bytes) -> SubsystemDependency: ...
else:
@classmethod
def FromString(cls, s: typing___Union[builtin___bytes, builtin___buffer, builtin___unicode]) -> SubsystemDependency: ...
def MergeFrom(self, other_msg: google___protobuf___message___Message) -> None: ...
def CopyFrom(self, other_msg: google___protobuf___message___Message) -> None: ...
def ClearField(self, field_name: typing_extensions___Literal[u"abbreviation",b"abbreviation",u"components",b"components",u"connect_subsystems",b"connect_subsystems",u"instance_id",b"instance_id",u"name",b"name",u"object_id",b"object_id"]) -> None: ...
|
[
"[email protected]"
] | |
c534d7c22c8c9cfe1da125036b9b6e7f079298dc
|
6cfa6d84722cf560b9dc144ba826d857e884d8fb
|
/redis/ticker/config/role.sample.py
|
793f9031371e1b17ce60702bbe524190ca842034
|
[] |
no_license
|
chaeplin/dash-ticker
|
b5e3702c87bc351ae40863de8cd8a55dddc74330
|
99e1fdc4e105601bdcfa55e80c524ca48294bee8
|
refs/heads/master
| 2021-01-18T23:07:24.246729 | 2017-11-23T09:39:40 | 2017-11-23T09:39:40 | 72,606,879 | 2 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 223 |
py
|
HOST_ROLE = 'MASTER'
#HOST_ROLE = 'SLAVE'
# SENTINEL CHECK
# MASTER
MASTER_SETINEL_HOST = '192.168.10.3'
MASTER_REDIS_MASTER = '192.168.10.2'
#SLAVE
SLAVE_SETINEL_HOST = '192.168.10.4'
SLAVE_REDIS_MASTER = '192.168.10.1'
|
[
"[email protected]"
] | |
a2aad80e1bd8dcac5b76b43c7c1b79f9d346ecb5
|
b501a5eae1018c1c26caa96793c6ee17865ebb2d
|
/data_persistence_and_exchange/sqlite3/sqlite3_iterdump.py
|
b3e3cb6c5fd4e677c2df637f142fcfd822cb06dd
|
[] |
no_license
|
jincurry/standard_Library_Learn
|
12b02f9e86d31ca574bb6863aefc95d63cc558fc
|
6c7197f12747456e0f1f3efd09667682a2d1a567
|
refs/heads/master
| 2022-10-26T07:28:36.545847 | 2018-05-04T12:54:50 | 2018-05-04T12:54:50 | 125,447,397 | 0 | 1 | null | 2022-10-02T17:21:50 | 2018-03-16T01:32:50 |
Python
|
UTF-8
|
Python
| false | false | 866 |
py
|
import sqlite3
schema_filename = 'todo_schema.sql'
with sqlite3.connect(':memory:') as conn:
conn.row_factory = sqlite3.Row
print('Creating schema')
with open(schema_filename, 'rt') as f:
schema = f.read()
conn.executescript(schema)
print('Inserting initial data')
conn.execute("""
insert INTO project (name, description, deadline)
VALUES ('pymotw', 'Python Module fo the Week', '2018-12-01')
""")
data = [
('write about select', 'done', '2010-10-03', 'pymotw'),
('write about random', 'waiting', '2010-11-10', 'pymotw'),
('write about sqlite3', 'active', '2010-10-17', 'pymotw'),
]
conn.executemany("""
insert INTO task (details, status, deadline, project)
VALUES (?, ?, ?, ?)
""", data)
print('Dumping:')
for text in conn.iterdump():
print(text)
|
[
"[email protected]"
] | |
bd32ba3fd62a9486d5b5dbaac375ebf63d3f6370
|
b4871c8dd8ef257d604ac221ecff0c71e14f06cb
|
/pilot_curriculum/django_apps/generic_nasa/nasa/migrations/0001_initial.py
|
99716aced67f5162e56b39feda4fbd930ab74f6c
|
[] |
no_license
|
manzur1990/tracecamp_curriculum
|
b55605b0bbe4b5e3b333ae3fb105141e53f42e39
|
e9c8ee9a3c151a5cd57137f6575d1342a7de83fb
|
refs/heads/master
| 2022-04-13T15:24:54.305552 | 2019-08-12T13:58:40 | 2019-08-12T13:58:40 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 628 |
py
|
# Generated by Django 2.1.4 on 2018-12-21 15:00
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='NasaComments',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('date', models.DateField()),
('comment', models.TextField()),
('rating', models.IntegerField()),
('image_url', models.URLField()),
],
),
]
|
[
"[email protected]"
] | |
496afa8406a6ad5f9584ceddba65ba6374ac3cfb
|
cc44edfa1edbedea3ad044805be7548e0ccba70d
|
/0x0C-python-almost_a_circle/models/square.py
|
dc5e2428176dcbb01162c0529f50870f361569e2
|
[] |
no_license
|
set808/holbertonschool-higher_level_programming
|
421f0da1f91cd56eb2daa4e07a51b4a505d53edc
|
eb276a4e68e5cc43498459eec78fc05f72e2cd48
|
refs/heads/master
| 2020-03-09T13:07:43.824914 | 2018-09-08T00:26:46 | 2018-09-08T00:26:46 | 128,802,718 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,833 |
py
|
#!/usr/bin/python3
'''
Defines the class Square
'''
from models.rectangle import Rectangle
class Square(Rectangle):
'''Square class that inherits from Rectangle
'''
def __init__(self, size, x=0, y=0, id=None):
'''Initializes the Square object
Args:
size (int): size of the square
x (int): position on the x axis
y (int): position on the y axis
id (int): the id of the object
'''
super().__init__(size, size, x, y, id)
def __str__(self):
'''Returns a string representation of a Square object
'''
return ('[Square] ({:d}) {:d}/{:d} - {:d}'.
format(self.id, self.x, self.y, self.width))
@property
def size(self):
'''Returns the size of the Square object
Return:
returns the size
'''
return self.width
@size.setter
def size(self, value):
'''Sets the size of the Square object
Args:
value (int): the new size value
'''
self.width = value
self.height = value
def update(self, *args, **kwargs):
'''Updates the Square instance
Args:
list of new values to update Square values
'''
if args:
keys = ['id', 'size', 'x', 'y']
for key, value in zip(keys, args):
setattr(self, key, value)
return
else:
for key, value in kwargs.items():
if key in kwargs.keys():
setattr(self, key, value)
def to_dictionary(self):
'''Returns a dictionary representation of a Square
Return:
returns the dictionary representation
'''
return {'id': self.id, 'size': self.size, 'x': self.x, 'y': self.y}
|
[
"[email protected]"
] | |
698e29f11047d2ec058edc49d83078842a204ea8
|
f98ca6e020f21b303f8cc2a8474f71ce436f2d75
|
/tests/test_jsonlib.py
|
a67045757d2f9a40191ad7b77c8194728c2eb43e
|
[
"Apache-2.0",
"CC-BY-4.0"
] |
permissive
|
garnerargoed/clldutils
|
df8afd714ab0ae1004aeb47dc24e1e96bb33323b
|
0d85c0bf46184bb99c6800ecbfa7f5db87cb2e7e
|
refs/heads/master
| 2021-09-07T22:36:17.564100 | 2018-03-02T09:54:44 | 2018-03-02T09:54:44 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 855 |
py
|
# coding: utf8
from __future__ import unicode_literals
from datetime import date
import pytest
from clldutils.jsonlib import dump, load, parse, update, format
def test_parse_json_with_datetime():
assert parse(dict(d='2012-12-12T20:12:12.12'))['d'].year
def test_update(tmppath):
p = tmppath / 'test'
with pytest.raises(ValueError):
with update(p):
pass # pragma: no cover
with update(p, default={}) as obj:
obj['a'] = 1
with update(p) as obj:
assert obj['a'] == 1
obj['a'] = 2
with update(p) as obj:
assert obj['a'] == 2
def test_json(tmppath):
d = {'a': 234, 'ä': 'öäüß'}
p = tmppath / 'test'
dump(d, p, indent=4)
for k, v in load(p).items():
assert d[k] == v
def test_format_json():
format(date.today())
assert format(5) == 5
|
[
"[email protected]"
] | |
f8eccfe1aedbc9b1aa4f0a96d6d6e702357e2324
|
9c69e2fc689194237ef294071a9c14c6dfabe545
|
/src/ultimate/user/urls.py
|
f8a18af2ec2ba1e3f063efd7648d2a5fa4b8f419
|
[] |
permissive
|
a2ultimate/ultimate-league-app
|
69f0331f5efeb2883c00990eb7a59ac346a34c69
|
1b40e8a01950fc526db9b649b78ada71ec567624
|
refs/heads/main
| 2023-04-03T06:43:24.471566 | 2023-03-16T00:55:22 | 2023-03-16T00:55:22 | 8,152,035 | 4 | 2 |
BSD-3-Clause
| 2023-03-16T00:55:24 | 2013-02-12T03:15:25 |
Python
|
UTF-8
|
Python
| false | false | 1,599 |
py
|
from django.conf.urls import url, include
from django.contrib.auth.views import login, logout, password_reset, password_reset_done, password_reset_confirm, password_reset_complete
from . import views
urlpatterns = [
url(r'^$', views.index, {}, 'user'),
url(r'^log-in/$', login, {'template_name': 'user/login.html'}, 'auth_log_in'),
url(r'^log-out/$', logout, {'template_name': 'user/logout.html'}, 'auth_log_out'),
url(r'^password/reset/$', password_reset, {'post_reset_redirect': '/user/password/reset/done/', 'template_name': 'user/registration/password_reset_form.html',
'email_template_name': 'user/registration/password_reset_email.html', 'subject_template_name': 'user/registration/password_reset_subject.txt', }, 'password_reset'),
url(r'^password/reset/done/$', password_reset_done,
{'template_name': 'user/registration/password_reset_done.html'}, 'password_reset_done'),
url(r'^password/reset/(?P<uidb64>[0-9A-Za-z_\-]+)/(?P<token>[0-9A-Za-z]{1,13}-[0-9A-Za-z]{1,20})/$', password_reset_confirm, {
'post_reset_redirect': '/user/password/done/', 'template_name': 'user/registration/password_reset_confirm.html'}, 'password_reset_confirm'),
url(r'^password/done/$', password_reset_complete,
{'template_name': 'user/registration/password_reset_complete.html'}, 'password_reset_confirm'),
url(r'^sign-up/$', views.signup, {}, 'registration_register'),
url(r'^edit/profile/$', views.editprofile, {}, 'editprofile'),
url(r'^edit/ratings/$', views.editratings, {}, 'editratings'),
]
|
[
"[email protected]"
] | |
4ee1c835bfb47e715ce2c96c65cf218f187bab31
|
282d0a84b45b12359b96bbf0b1d7ca9ee0cb5d19
|
/Malware1/venv/Lib/site-packages/numpy/lib/npyio.py
|
6eb9cbd18235dbb0bd65ee520e9f94852156d02a
|
[] |
no_license
|
sameerakhtar/CyberSecurity
|
9cfe58df98495eac6e4e2708e34e70b7e4c055d3
|
594973df27b4e1a43f8faba0140ce7d6c6618f93
|
refs/heads/master
| 2022-12-11T11:53:40.875462 | 2020-09-07T23:13:22 | 2020-09-07T23:13:22 | 293,598,094 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 130 |
py
|
version https://git-lfs.github.com/spec/v1
oid sha256:40ba76b8ee10e92857396797dd69934bd5b9c1c413138a9ccef88a8b534917a6
size 84853
|
[
"[email protected]"
] | |
8710c34e56994907a5c2a8375d3db551668509d1
|
5e5b3776acc441b77a970db29d99e850b79be65e
|
/gist/define.py
|
3f301f552c4ea79b674caf06a826dd54b686c3f2
|
[
"Apache-2.0"
] |
permissive
|
eightnoteight/x-gist
|
f153ae7c5ae5d9335af23ba54c0c668b71a5c157
|
ec65e5193f989238a026a8b239eabf61c5ec7a8d
|
refs/heads/master
| 2020-12-24T16:23:40.157360 | 2014-08-15T04:54:14 | 2014-08-15T04:54:14 | 22,781,473 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 244 |
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
class struct(dict):
def __init__(self, **kwargs):
super(struct, self).__init__(**kwargs)
self.__dict__ = self
client = struct(name="x-gist",
url ="https://github.com/eightnoteight/x-gist")
|
[
"[email protected]"
] | |
ac2dea16ccd2b71756d09ad35c1724375eada021
|
5c2f520dde0cf8077facc0fcd9a92bc1a96d168b
|
/microbenchmarks/exceptions_ubench.py
|
8260f1764de23db08de7146823a2733eba4417dc
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference",
"Python-2.0"
] |
permissive
|
nagyist/pyston
|
b613337a030ef21a3f03708febebe76cedf34c61
|
14ba2e6e6fb5c7316f66ccca86e6c6a836d96cab
|
refs/heads/master
| 2022-12-24T03:56:12.885732 | 2015-02-25T11:11:08 | 2015-02-25T11:28:13 | 31,314,596 | 0 | 0 |
NOASSERTION
| 2022-12-17T08:15:11 | 2015-02-25T13:24:41 |
Python
|
UTF-8
|
Python
| false | false | 241 |
py
|
def f():
# Try to eliminate as much non-exception stuff as possible:
from __builtin__ import Exception
e = Exception()
for i in xrange(100000):
try:
raise e
except Exception:
pass
f()
|
[
"[email protected]"
] | |
67f7daaefaae8d776a203ce1eb65de7d4fc4810a
|
cb57a9ea4622b94207d12ea90eab9dd5b13e9e29
|
/lc/python/0339_nested_list_weight_sum.py
|
4db7961460d959975511fefeea17b42164eec24f
|
[] |
no_license
|
boknowswiki/mytraning
|
b59585e1e255a7a47c2b28bf2e591aef4af2f09a
|
5e2f6ceacf5dec8260ce87e9a5f4e28e86ceba7a
|
refs/heads/master
| 2023-08-16T03:28:51.881848 | 2023-08-10T04:28:54 | 2023-08-10T04:28:54 | 124,834,433 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,821 |
py
|
# dfs
# time O(n)
# space O(1)
# """
# This is the interface that allows for creating nested lists.
# You should not implement it, or speculate about its implementation
# """
#class NestedInteger:
# def __init__(self, value=None):
# """
# If value is not specified, initializes an empty list.
# Otherwise initializes a single integer equal to value.
# """
#
# def isInteger(self):
# """
# @return True if this NestedInteger holds a single integer, rather than a nested list.
# :rtype bool
# """
#
# def add(self, elem):
# """
# Set this NestedInteger to hold a nested list and adds a nested integer elem to it.
# :rtype void
# """
#
# def setInteger(self, value):
# """
# Set this NestedInteger to hold a single integer equal to value.
# :rtype void
# """
#
# def getInteger(self):
# """
# @return the single integer that this NestedInteger holds, if it holds a single integer
# Return None if this NestedInteger holds a nested list
# :rtype int
# """
#
# def getList(self):
# """
# @return the nested list that this NestedInteger holds, if it holds a nested list
# Return None if this NestedInteger holds a single integer
# :rtype List[NestedInteger]
# """
class Solution:
def depthSum(self, nestedList: List[NestedInteger]) -> int:
if not nestedList:
return 0
ret = 0
def dfs(nl, level):
nonlocal ret
for nnl in nl:
if nnl.isInteger():
ret += nnl.getInteger()*level
else:
dfs(nnl.getList(), level+1)
return
dfs(nestedList, 1)
return ret
|
[
"[email protected]"
] | |
3215f4395ddfe3f66ca86b29a70209aa7b2a2b1b
|
f576f0ea3725d54bd2551883901b25b863fe6688
|
/sdk/databox/azure-mgmt-databox/azure/mgmt/databox/__init__.py
|
01935d3cb4901323838878ded3e2783723747739
|
[
"MIT",
"LicenseRef-scancode-generic-cla",
"LGPL-2.1-or-later"
] |
permissive
|
Azure/azure-sdk-for-python
|
02e3838e53a33d8ba27e9bcc22bd84e790e4ca7c
|
c2ca191e736bb06bfbbbc9493e8325763ba990bb
|
refs/heads/main
| 2023-09-06T09:30:13.135012 | 2023-09-06T01:08:06 | 2023-09-06T01:08:06 | 4,127,088 | 4,046 | 2,755 |
MIT
| 2023-09-14T21:48:49 | 2012-04-24T16:46:12 |
Python
|
UTF-8
|
Python
| false | false | 726 |
py
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from ._data_box_management_client import DataBoxManagementClient
__all__ = ['DataBoxManagementClient']
try:
from ._patch import patch_sdk # type: ignore
patch_sdk()
except ImportError:
pass
from ._version import VERSION
__version__ = VERSION
|
[
"[email protected]"
] | |
91d66a042dd155e0b4c535b6fd0d0b0c5f21e6bc
|
5c8139f1e57e06c7eaf603bd8fe74d9f22620513
|
/PartB/Py括号的配对问题.py
|
07341985e258c672ef6b2f14eda73e2fd3628ce7
|
[] |
no_license
|
madeibao/PythonAlgorithm
|
c8a11d298617d1abb12a72461665583c6a44f9d2
|
b4c8a75e724a674812b8a38c0202485776445d89
|
refs/heads/master
| 2023-04-03T07:18:49.842063 | 2021-04-11T12:02:40 | 2021-04-11T12:02:40 | 325,269,130 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 507 |
py
|
lst = raw_input().strip()
stack = []
flag = True
for i in lst:
if i == '(':
stack.append('(')
elif i == '[':
stack.append('[')
elif i == ')':
if len(stack) > 0 and stack[-1] == '(':
stack.pop(-1)
else:
flag = False
break
elif i == ']':
if len(stack) > 0 and stack[-1] == '[':
stack.pop(-1)
else:
flag = False
break
if flag:
print('true')
else:
print('false')
|
[
"[email protected]"
] | |
6518580d8c0f42beea9b03ecc5cf5026c5eb4b0b
|
a2f6e449e6ec6bf54dda5e4bef82ba75e7af262c
|
/venv/Lib/site-packages/pandas/tests/io/pytables/__init__.py
|
04573ec7273cbbee29a9587f2fd75e67ef512d86
|
[] |
no_license
|
mylonabusiness28/Final-Year-Project-
|
e4b79ccce6c19a371cac63c7a4ff431d6e26e38f
|
68455795be7902b4032ee1f145258232212cc639
|
refs/heads/main
| 2023-07-08T21:43:49.300370 | 2021-06-05T12:34:16 | 2021-06-05T12:34:16 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 128 |
py
|
version https://git-lfs.github.com/spec/v1
oid sha256:7899383beb479b67e688296a1f549aed94a571607226a1afea25dce1a3dc152c
size 411
|
[
"[email protected]"
] | |
a0501f5fe8d7b2dbd0b347bf845646f7cd23628d
|
34599a66861f7d95a5740eeb5329ea77014e18d4
|
/problems_solving/algospot/firetrucks.py
|
0006d6df8ed42a042757bbebd9b4c40fa0a3cf8c
|
[] |
no_license
|
laolee010126/algorithm-with-python
|
f0f5f1bc3cbe374ccbb59e10ac639674c44ae743
|
89ff0c47a6d8b0cd5b31a25bb3981b8e90971f19
|
refs/heads/master
| 2022-04-01T17:38:36.199309 | 2020-01-14T01:54:22 | 2020-01-14T01:54:22 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,108 |
py
|
"""Get least sum of distances to dispatch firetrucks to houses on fire
:input:
1
8 12 3 2
1 2 3
1 6 9
2 3 6
3 4 4
3 5 2
4 5 7
6 5 5
8 6 5
6 7 3
8 7 3
7 5 1
2 8 3
2 3 5
4 6
:return:
16
"""
from heapq import heappush, heappop
from math import inf
from sys import stdin
get_input = stdin.readline
def min_dist(g, dest, src):
V = len(g)
# Add a new trasparent vertex connecting fire stations into one sinlge station
for s in src:
g[0].append((0, s))
g[s].append((0, 0))
# 1. priority queue version
# pq = [(0, 0)]
# dist = [inf] * V
# dist[0] = 0
# while pq:
# cost, here = heappop(pq)
# if cost > dist[here]:
# continue
# for dc, there in g[here]:
# nxt_dist = cost + dc
# if nxt_dist < dist[there]:
# dist[there] = nxt_dist
# heappush(pq, (nxt_dist, there))
# return sum(dist[d] for d in dest)
# 2. Non-priority queue version
dist = [inf] * V
dist[0] = 0
visited = [False] * V
while True:
min_dist = inf
here = None
for v in range(V):
if dist[v] < min_dist and not visited[v]:
min_dist = dist[v]
here = v
if min_dist == inf:
break
visited[here] = True
for dc, there in g[here]:
nxt_dist = dist[here] + dc
if not visited[there] and nxt_dist < dist[there]:
dist[there] = nxt_dist
return sum(dist[d] for d in dest)
if __name__ == '__main__':
C = int(get_input().strip())
ans = []
for _ in range(C):
V, E, DEST, SRC = (int(n) for n in get_input().strip().split())
g = [[] for _ in range(V+1)]
for _ in range(E):
a, b, d = (int(n) for n in get_input().strip().split())
g[a].append((d, b))
g[b].append((d, a))
dest = [int(n) for n in get_input().strip().split()]
src = [int(n) for n in get_input().strip().split()]
ans.append(min_dist(g, dest, src))
for n in ans:
print(n)
|
[
"[email protected]"
] | |
d1772e7e5ce09349017f1c2dd30cdfbab93383ed
|
977396938e6a077423276eda152d4541578eb527
|
/migrations/versions/f9155326f52d_.py
|
6b91c18d0de33f955ebc4eeda030c79a1e03e91c
|
[] |
no_license
|
Areum0921/web_pybo
|
688c741a5a8b5fa3d8df51f058c7ec0a8288ae91
|
0c830eda270dbbe3257e3458af4576b38d5dbaa8
|
refs/heads/master
| 2023-06-19T06:40:41.327188 | 2021-07-16T02:29:34 | 2021-07-16T02:29:34 | 355,765,108 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,375 |
py
|
"""empty message
Revision ID: f9155326f52d
Revises: 5496eea3137d
Create Date: 2021-03-29 14:31:37.557367
"""
from alembic import op
import sqlalchemy as sa
import sqlalchemy_utils
# revision identifiers, used by Alembic.
revision = 'f9155326f52d'
down_revision = '5496eea3137d'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('_alembic_tmp_answer')
with op.batch_alter_table('answer', schema=None) as batch_op:
batch_op.alter_column('ip',
existing_type=sa.VARCHAR(length=50),
nullable=False)
with op.batch_alter_table('question', schema=None) as batch_op:
batch_op.add_column(sa.Column('user_id', sa.Integer(), server_default='1', nullable=True))
batch_op.create_foreign_key(batch_op.f('fk_question_user_id_user'), 'user', ['user_id'], ['id'], ondelete='CASCADE')
with op.batch_alter_table('user', schema=None) as batch_op:
batch_op.create_unique_constraint(batch_op.f('uq_user_email'), ['email'])
batch_op.create_unique_constraint(batch_op.f('uq_user_username'), ['username'])
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
with op.batch_alter_table('user', schema=None) as batch_op:
batch_op.drop_constraint(batch_op.f('uq_user_username'), type_='unique')
batch_op.drop_constraint(batch_op.f('uq_user_email'), type_='unique')
with op.batch_alter_table('question', schema=None) as batch_op:
batch_op.drop_constraint(batch_op.f('fk_question_user_id_user'), type_='foreignkey')
batch_op.drop_column('user_id')
with op.batch_alter_table('answer', schema=None) as batch_op:
batch_op.alter_column('ip',
existing_type=sa.VARCHAR(length=50),
nullable=True)
op.create_table('_alembic_tmp_answer',
sa.Column('id', sa.INTEGER(), nullable=False),
sa.Column('question_id', sa.INTEGER(), nullable=True),
sa.Column('content', sa.TEXT(), nullable=False),
sa.Column('create_date', sa.DATETIME(), nullable=False),
sa.Column('ip', sa.VARCHAR(length=50), nullable=False),
sa.ForeignKeyConstraint(['question_id'], ['question.id'], ondelete='CASCADE'),
sa.PrimaryKeyConstraint('id')
)
# ### end Alembic commands ###
|
[
"[email protected]"
] | |
3720c2cfb59920028d138cfe49a9a780696b3a31
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p03804/s226430328.py
|
40ea2fbbe5322fb1e9b734e857d574fcafee112b
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 532 |
py
|
N, M = map(int, input().split())
A = [input() for _ in range(N)]
B = [input() for _ in range(M)]
for i in range(N - M + 1):
for j in range(N - M + 1):
check = True
count = 0
for k in range(M):
if (A[i + k][j: j + M] == B[k]):
# print(A[i + k][j:j + M], B[k])
count += 1
continue
else:
check = False
break
if (check and count == M):
print('Yes')
exit()
print('No')
|
[
"[email protected]"
] | |
a3c15c175c51765051f69df3b52980e1fd7a3f0a
|
e3ec7260806c1e2b045a0de93a150a5c3fc1b9df
|
/test_sin.py
|
cfd93e45de3207cebfeb5d1bfd66b21c78b149ef
|
[
"Apache-2.0"
] |
permissive
|
FearFactor1/SPA
|
58a21c9ec7a72a78f5ff50214e58faac43a3059d
|
a05aaa924c5bebb52cd508ebdf7fd3b81c49fac7
|
refs/heads/master
| 2021-07-07T04:25:12.525595 | 2020-11-16T14:35:33 | 2020-11-16T14:35:33 | 204,684,720 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 801 |
py
|
from selenium import webdriver
import time
from selenium.webdriver.support.ui import Select
link = "http://suninjuly.github.io/selects2.html"
#nav__item-auto > a
try:
browser = webdriver.Chrome()
browser.get(link)
num1 = browser.find_element_by_css_selector("#num1").text
num2 = browser.find_element_by_css_selector("#num2").text
sum = int(num1) + int(num2)
select = Select(browser.find_element_by_tag_name("select"))
select.select_by_value(str(sum))
button = browser.find_element_by_xpath("//*/button[contains(text(), 'Submit')]")
button.click()
finally:
# успеваем скопировать код за 30 секунд
time.sleep(30)
# закрываем браузер после всех манипуляций
browser.quit()
|
[
"[email protected]"
] | |
e514273c815a754c9cc94115339b8f0ab6c3c284
|
58e8117c418a8931d56fe57ba0edd38214c79642
|
/ate/context.py
|
ed59e79081a32350eef9099c194c3270b9fc91da
|
[
"MIT"
] |
permissive
|
229051923/ApiTestEngine
|
90b495ee7be037a35f63ffa3b2c95f6ba24f43ce
|
4a529068f451c8880681286518ff6a643ecf9067
|
refs/heads/master
| 2021-01-01T16:09:10.737245 | 2017-07-19T15:29:29 | 2017-07-19T15:29:29 | 97,780,015 | 2 | 0 | null | 2017-07-20T02:09:36 | 2017-07-20T02:09:36 | null |
UTF-8
|
Python
| false | false | 7,621 |
py
|
import copy
import importlib
import re
import types
from collections import OrderedDict
from ate import exception, testcase, utils
def is_function(tup):
""" Takes (name, object) tuple, returns True if it is a function.
"""
name, item = tup
return isinstance(item, types.FunctionType)
class Context(object):
""" Manages context functions and variables.
context has two levels, testset and testcase.
"""
def __init__(self):
self.testset_config = {}
self.testset_shared_variables_mapping = dict()
self.testcase_config = {}
self.testcase_variables_mapping = dict()
self.init_context()
def init_context(self, level='testset'):
"""
testset level context initializes when a file is loaded,
testcase level context initializes when each testcase starts.
"""
if level == "testset":
self.testset_config["functions"] = {}
self.testset_config["variables"] = OrderedDict()
self.testset_config["request"] = {}
self.testset_shared_variables_mapping = {}
self.testcase_config["functions"] = {}
self.testcase_config["variables"] = OrderedDict()
self.testcase_config["request"] = {}
self.testcase_variables_mapping = copy.deepcopy(self.testset_shared_variables_mapping)
def import_requires(self, modules):
""" import required modules dynamicly
"""
for module_name in modules:
globals()[module_name] = importlib.import_module(module_name)
def bind_functions(self, function_binds, level="testcase"):
""" Bind named functions within the context
This allows for passing in self-defined functions in testing.
e.g. function_binds:
{
"add_one": lambda x: x + 1, # lambda function
"add_two_nums": "lambda x, y: x + y" # lambda function in string
}
"""
eval_function_binds = {}
for func_name, function in function_binds.items():
if isinstance(function, str):
function = eval(function)
eval_function_binds[func_name] = function
self.__update_context_config(level, "functions", eval_function_binds)
def import_module_functions(self, modules, level="testcase"):
""" import modules and bind all functions within the context
"""
for module_name in modules:
imported = importlib.import_module(module_name)
imported_functions_dict = dict(filter(is_function, vars(imported).items()))
self.__update_context_config(level, "functions", imported_functions_dict)
def register_variables_config(self, variable_binds, level="testcase"):
""" register variable configs
@param (list) variable_binds, variable can be value or custom function
e.g.
[
{"TOKEN": "debugtalk"},
{"random": "${gen_random_string(5)}"},
{"json": {'name': 'user', 'password': '123456'}},
{"md5": "${gen_md5($TOKEN, $json, $random)}"}
]
"""
if level == "testset":
for variable_bind in variable_binds:
self.testset_config["variables"].update(variable_bind)
elif level == "testcase":
self.testcase_config["variables"] = copy.deepcopy(self.testset_config["variables"])
for variable_bind in variable_binds:
self.testcase_config["variables"].update(variable_bind)
def register_request(self, request_dict, level="testcase"):
self.__update_context_config(level, "request", request_dict)
def __update_context_config(self, level, config_type, config_mapping):
"""
@param level: testset or testcase
@param config_type: functions, variables or request
@param config_mapping: functions config mapping or variables config mapping
"""
if level == "testset":
self.testset_config[config_type].update(config_mapping)
elif level == "testcase":
self.testcase_config[config_type].update(config_mapping)
def get_parsed_request(self):
""" get parsed request, with each variable replaced by bind value.
testcase request shall inherit from testset request configs,
but can not change testset configs, that's why we use copy.deepcopy here.
"""
testcase_request_config = utils.deep_update_dict(
copy.deepcopy(self.testset_config["request"]),
self.testcase_config["request"]
)
parsed_request = testcase.parse_template(
testcase_request_config,
self._get_evaluated_testcase_variables()
)
return parsed_request
def bind_extracted_variables(self, variables_mapping):
""" bind extracted variable to current testcase context and testset context.
since extracted variable maybe used in current testcase and next testcases.
"""
self.testset_shared_variables_mapping.update(variables_mapping)
self.testcase_variables_mapping.update(variables_mapping)
def get_testcase_variables_mapping(self):
return self.testcase_variables_mapping
def _get_evaluated_testcase_variables(self):
""" variables in variables_config will be evaluated each time
"""
testcase_functions_config = copy.deepcopy(self.testset_config["functions"])
testcase_functions_config.update(self.testcase_config["functions"])
self.testcase_config["functions"] = testcase_functions_config
testcase_variables_config = copy.deepcopy(self.testset_config["variables"])
testcase_variables_config.update(self.testcase_config["variables"])
self.testcase_config["variables"] = testcase_variables_config
for var_name, var_value in self.testcase_config["variables"].items():
self.testcase_variables_mapping[var_name] = self.get_eval_value(var_value)
return self.testcase_variables_mapping
def get_eval_value(self, data):
""" evaluate data recursively, each variable in data will be evaluated.
"""
if isinstance(data, (list, tuple)):
return [self.get_eval_value(item) for item in data]
if isinstance(data, dict):
evaluated_data = {}
for key, value in data.items():
evaluated_data[key] = self.get_eval_value(value)
return evaluated_data
if isinstance(data, (int, float)):
return data
# data is in string format here
data = "" if data is None else data.strip()
if utils.is_variable(data):
# variable marker: $var
variable_name = utils.parse_variable(data)
value = self.testcase_variables_mapping.get(variable_name)
if value is None:
raise exception.ParamsError(
"%s is not defined in bind variables!" % variable_name)
return value
elif utils.is_functon(data):
# function marker: ${func(1, 2, a=3, b=4)}
fuction_meta = utils.parse_function(data)
func_name = fuction_meta['func_name']
args = fuction_meta.get('args', [])
kwargs = fuction_meta.get('kwargs', {})
args = self.get_eval_value(args)
kwargs = self.get_eval_value(kwargs)
return self.testcase_config["functions"][func_name](*args, **kwargs)
else:
return data
|
[
"[email protected]"
] | |
a6db9d0ebe8e9a8f0ab0a1cacff578441a2234ba
|
4fcfc6834f598954e069e9481e4f69d6f7205f3b
|
/Week1/day_3/Flask_Intro/first_flask_app/server.py
|
a7693151c6981a66648352d4cf8857f87de47de7
|
[] |
no_license
|
sadieBoBadie/jan_2020_python_stack
|
dadc77a8a76fd4b900bc31ad83ed4680a0802aa1
|
b91da3da23ea57a27c086075f4b86c5bfec413d0
|
refs/heads/main
| 2023-03-02T22:27:46.877290 | 2021-01-26T19:04:57 | 2021-01-26T19:04:57 | 326,852,429 | 0 | 3 | null | null | null | null |
UTF-8
|
Python
| false | false | 543 |
py
|
from flask import Flask, render_template
app = Flask(__name__)
@app.route('/')
def puppies():
return "<h1 style='color: red'>Puppies are cute!</h1>"
@app.route('/<animal>/<color>')
@app.route('/<animal>')
@app.route('/<animal>/<color>/<int:num>')
def display_animal(animal, color="blue", num=5):
print(f"Animal: {animal}")
print(f"Color: {color}")
print("Type of the num var: ", type(num))
return render_template('index.html', animal=animal, color=color, num=num)
if __name__=="__main__":
app.run(debug=True)
|
[
"[email protected]"
] | |
3a7c1d7adfb59f00b11ae77e1d37b1885d33f881
|
d1ad7bfeb3f9e3724f91458277284f7d0fbe4b2d
|
/react/003-react-django-justdjango/backend/env/bin/sqlformat
|
b08eaac3345a9fc3b0a7dbb48e6607276b57395a
|
[] |
no_license
|
qu4ku/tutorials
|
01d2d5a3e8740477d896476d02497d729a833a2b
|
ced479c5f81c8aff0c4c89d2a572227824445a38
|
refs/heads/master
| 2023-03-10T20:21:50.590017 | 2023-03-04T21:57:08 | 2023-03-04T21:57:08 | 94,262,493 | 0 | 0 | null | 2023-01-04T21:37:16 | 2017-06-13T22:07:54 |
PHP
|
UTF-8
|
Python
| false | false | 307 |
#!/Users/kamilwroniewicz/_code/_github/_tutorials/react/003-react-django-justdjango/backend/env/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from sqlparse.__main__ import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
|
[
"[email protected]"
] | ||
a1df8914c35f5e949416165a0782c85926e4e9f7
|
2cf9f165cb4d6e8e9009d74b43020fe2d5c1964f
|
/chat/migrations/0001_initial.py
|
a2db3bb2cfb40b358f72ef4113b611bc648018b0
|
[] |
no_license
|
jimy1824/chat
|
29f5039c6284083b8328502932795bee586dec14
|
627ad4678c6215d37322737b38b3e5eb6d69696f
|
refs/heads/master
| 2023-04-27T15:10:51.316824 | 2019-11-08T05:00:12 | 2019-11-08T05:00:12 | 220,081,959 | 1 | 0 | null | 2023-04-21T20:40:47 | 2019-11-06T20:17:20 |
HTML
|
UTF-8
|
Python
| false | false | 1,194 |
py
|
# Generated by Django 2.2.6 on 2019-11-06 19:01
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Chat',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('is_active', models.BooleanField(default=True)),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('message', models.TextField()),
('receiver', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='message_receiver', to=settings.AUTH_USER_MODEL)),
('sender', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='message_sender', to=settings.AUTH_USER_MODEL)),
],
options={
'abstract': False,
},
),
]
|
[
"[email protected]"
] | |
f64c6ffb584cd043d80268c613a23fadf9f3d960
|
d0e268862f359bbeec426b00a0c45788f6fb0b4e
|
/lesson22-优化小实例/main.py
|
10fefdf9bbed196814d0bc20e94497752dbfa13d
|
[] |
no_license
|
jpegbert/PyTorch
|
f87c2e38572c51842785de5ed1b39bb641402ac6
|
482421c76a093312ffeff7e5af4ecd3ab0cdcf30
|
refs/heads/master
| 2023-08-27T03:56:26.883297 | 2021-11-08T06:03:30 | 2021-11-08T06:03:30 | 326,677,679 | 2 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 853 |
py
|
import numpy as np
from matplotlib import pyplot as plt
import torch
def himmelblau(x):
return (x[0] ** 2 + x[1] - 11) ** 2 + (x[0] + x[1] ** 2 - 7) ** 2
x = np.arange(-6, 6, 0.1)
y = np.arange(-6, 6, 0.1)
print('x,y range:', x.shape, y.shape)
X, Y = np.meshgrid(x, y)
print('X,Y maps:', X.shape, Y.shape)
Z = himmelblau([X, Y])
fig = plt.figure('himmelblau')
ax = fig.gca(projection='3d')
ax.plot_surface(X, Y, Z)
ax.view_init(60, -30)
ax.set_xlabel('x')
ax.set_ylabel('y')
plt.show()
# [1., 0.], [-4, 0.], [4, 0.]
x = torch.tensor([-4., 0.], requires_grad=True)
print(x)
optimizer = torch.optim.Adam([x], lr=1e-3)
for step in range(20000):
pred = himmelblau(x)
optimizer.zero_grad()
pred.backward()
optimizer.step()
if step % 2000 == 0:
print('step {}: x = {}, f(x) = {}'.format(step, x.tolist(), pred.item()))
|
[
"[email protected]"
] | |
035504981c5c4ce873430a3439ea302e21244885
|
53c3462ff265b6273f4a4fa17f6d59688f69def0
|
/剑指offer/65_hasPath.py
|
fc4f3bf5633e263578be82adeacb409feea73759
|
[] |
no_license
|
17764591637/jianzhi_offer
|
b76e69a3ecb2174676da2c8d8d3372a3fc27b5c4
|
27e420ee302d5ab6512ecfdb8d469b043fb7102d
|
refs/heads/master
| 2023-08-03T01:32:51.588472 | 2019-10-13T07:56:21 | 2019-10-13T07:56:21 | 197,692,548 | 2 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,300 |
py
|
'''
请设计一个函数,用来判断在一个矩阵中是否存在一条包含某字符串所有字符的路径。
路径可以从矩阵中的任意一个格子开始,每一步可以在矩阵中向左,向右,向上,向下移动一个格子。
如果一条路径经过了矩阵中的某一个格子,则之后不能再次进入这个格子。
例如 a b c e s f c s a d e e 这样的3 X 4 矩阵中包含一条字符串"bcced"的路径,但是矩阵中不包含"abcb"路径,
因为字符串的第一个字符b占据了矩阵中的第一行第二个格子之后,路径不能再次进入该格子。
分析:回溯算法
这是一个可以用回朔法解决的典型题。首先,在矩阵中任选一个格子作为路径的起点。如果路径上的第i个字符不是ch,
那么这个格子不可能处在路径上的第i个位置。如果路径上的第i个字符正好是ch,那么往相邻的格子寻找路径上的第i+1个字符。
除在矩阵边界上的格子之外,其他格子都有4个相邻的格子。重复这个过程直到路径上的所有字符都在矩阵中找到相应的位置。
由于回朔法的递归特性,路径可以被开成一个栈。当在矩阵中定位了路径中前n个字符的位置之后,在与第n个字符对应的格子
的周围都没有找到第n+1个字符,这个时候只要在路径上回到第n-1个字符,重新定位第n个字符。
由于路径不能重复进入矩阵的格子,还需要定义和字符矩阵大小一样的布尔值矩阵,用来标识路径是否已经进入每个格子。
当矩阵中坐标为(row,col)的格子和路径字符串中相应的字符一样时,从4个相邻的格子(row,col-1),(row-1,col),
(row,col+1)以及(row+1,col)中去定位路径字符串中下一个字符如果4个相邻的格子都没有匹配字符串中下一个的字符,
表明当前路径字符串中字符在矩阵中的定位不正确,我们需要回到前一个,然后重新定位。一直重复这个过程,
直到路径字符串上所有字符都在矩阵中找到合适的位置。
'''
class Solution:
def hasPath(self, matrix, rows, cols, path):
# write code here
for i in range(rows):
for j in range(cols):
if matrix[i*cols+j] == path[0]:
#print(i,j)
if self.find(list(matrix),rows,cols,path[1:],i,j):
return True
return False
def find(self,matrix,rows,cols,path,i,j):
if not path:
return True
matrix[i*cols+j]='0'#记录是否已经走过,0表示已经走过
if j+1<cols and matrix[i*cols+j+1]==path[0]:
return self.find(matrix,rows,cols,path[1:],i,j+1)#往右
elif j-1>=0 and matrix[i*cols+j-1]==path[0]:
return self.find(matrix,rows,cols,path[1:],i,j-1)#往左
elif i+1<rows and matrix[(i+1)*cols+j]==path[0]:
return self.find(matrix,rows,cols,path[1:],i+1,j)#往下
elif i-1>=0 and matrix[(i-1)*cols+j]==path[0]:
return self.find(matrix,rows,cols,path[1:],i-1,j)#往上
else:
return False
s = Solution()
res = s.hasPath(['a','b','c', 'e', 's' ,'f' ,'c', 's', 'a', 'd', 'e', 'e'],3,4,'bcced')
print(res)
|
[
"[email protected]"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.