hexsha
stringlengths 40
40
| size
int64 1
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
239
| max_stars_repo_name
stringlengths 5
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
sequencelengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
239
| max_issues_repo_name
stringlengths 5
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
sequencelengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
239
| max_forks_repo_name
stringlengths 5
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
sequencelengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 1
1.03M
| avg_line_length
float64 1
958k
| max_line_length
int64 1
1.03M
| alphanum_fraction
float64 0
1
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
794185a23fe0f8cbebee3a61f2f3486d4411e7f5 | 368 | py | Python | env/Lib/site-packages/jsonify/convert.py | iamagoogler/crud_flask | 42280fd33b7c34ee3bb24cf3fe6e0f830a8408ef | [
"MIT"
] | 20 | 2018-05-08T20:41:48.000Z | 2019-08-15T02:15:58.000Z | env/Lib/site-packages/jsonify/convert.py | iamagoogler/crud_flask | 42280fd33b7c34ee3bb24cf3fe6e0f830a8408ef | [
"MIT"
] | 7 | 2018-11-15T17:45:06.000Z | 2018-11-15T17:45:30.000Z | env/Lib/site-packages/jsonify/convert.py | iamagoogler/crud_flask | 42280fd33b7c34ee3bb24cf3fe6e0f830a8408ef | [
"MIT"
] | 5 | 2018-07-03T03:15:01.000Z | 2020-09-10T06:30:27.000Z | #!/usr/bin/python
import csv
import json
def jsonify(csv_name):
with open(csv_name, 'r') as f:
data = []
reader = csv.DictReader(f)
jsonf = open(csv_name[:len(csv_name)-3] + 'json', 'w')
for row in reader:
data.append(row)
jsonf.write(json.dumps(data, sort_keys=True, indent=4))
jsonf.close()
| 23 | 63 | 0.5625 |
794186525f942776c1a1fcc59335efc9bca31ace | 381 | py | Python | taobao-tianmao/top/api/rest/AlibabaWdkMarketingItembuygiftQueryactivityRequest.py | ScottLeeF/python-example | 0b230ba80fe5020d70329a9d73e058013f0ca111 | [
"Apache-2.0"
] | null | null | null | taobao-tianmao/top/api/rest/AlibabaWdkMarketingItembuygiftQueryactivityRequest.py | ScottLeeF/python-example | 0b230ba80fe5020d70329a9d73e058013f0ca111 | [
"Apache-2.0"
] | 7 | 2021-03-19T02:12:42.000Z | 2022-03-12T00:25:28.000Z | taobao-tianmao/top/api/rest/AlibabaWdkMarketingItembuygiftQueryactivityRequest.py | ScottLeeF/python-example | 0b230ba80fe5020d70329a9d73e058013f0ca111 | [
"Apache-2.0"
] | null | null | null | '''
Created by auto_sdk on 2019.04.15
'''
from top.api.base import RestApi
class AlibabaWdkMarketingItembuygiftQueryactivityRequest(RestApi):
def __init__(self, domain='gw.api.taobao.com', port=80):
RestApi.__init__(self, domain, port)
self.param = None
def getapiname(self):
return 'alibaba.wdk.marketing.itembuygift.queryactivity'
| 27.214286 | 67 | 0.695538 |
794186b504eb4000bc02a16f76e3f7226d86eaa2 | 2,175 | py | Python | FusionIIIT/applications/research_procedures/views.py | abhishek-geek/Fusion | b4ba6bf3ff1ee836489ed5686855598d1021bbb8 | [
"bzip2-1.0.6"
] | null | null | null | FusionIIIT/applications/research_procedures/views.py | abhishek-geek/Fusion | b4ba6bf3ff1ee836489ed5686855598d1021bbb8 | [
"bzip2-1.0.6"
] | null | null | null | FusionIIIT/applications/research_procedures/views.py | abhishek-geek/Fusion | b4ba6bf3ff1ee836489ed5686855598d1021bbb8 | [
"bzip2-1.0.6"
] | 1 | 2021-09-13T04:39:49.000Z | 2021-09-13T04:39:49.000Z | from django.shortcuts import render,HttpResponse
from django.contrib import messages
from applications.research_procedures.models import Patent
from applications.academic_information.models import Student
from applications.globals.models import ExtraInfo, HoldsDesignation, Designation
from django.contrib.auth.models import User
from django.core.files.storage import FileSystemStorage
# Faculty can file patent and view status of it.
def IPR(request):
user=request.user
extrainfo=ExtraInfo.objects.get(user=user)
holds_designations = HoldsDesignation.objects.filter(user=user)
desig = holds_designations
pat=Patent()
context={}
if request.method=='POST':
if(extrainfo.user_type == "faculty"):
pat.faculty_id=extrainfo
pat.title=request.POST.get('title')
pat.ipd_form=request.FILES['file1']
file1=request.FILES['file1']
fs=FileSystemStorage()
name1=fs.save(file1.name,file1)
pat.file1=fs.url(name1)
pat.project_details=request.FILES['file2']
file2=request.FILES['file2']
fs=FileSystemStorage()
name2=fs.save(file2.name,file2)
pat.file2=fs.url(name2)
pat.status='Pending'
pat.save()
pat=Patent.objects.all()
context['pat']=pat
context['use']=extrainfo
context['desig']=desig
return render(request ,"rs/research.html",context)
#dean_rspc can update status of patent.
def update(request):
user=request.user
extrainfo=ExtraInfo.objects.get(user=user)
holds_designations = HoldsDesignation.objects.filter(user=user)
desig = holds_designations
pat=Patent()
if request.method=='POST':
if(desig.exists()):
if(desig.first().designation.name == "dean_rspc" and extrainfo.user_type == "faculty"):
iid=request.POST.get('id')
pat=Patent.objects.get(application_id=iid)
pat.status=request.POST.get('status')
pat.save()
pat=Patent.objects.all()
return render(request ,"rs/research.html",{'pat':pat,'use':extrainfo,'desig':desig})
| 36.25 | 99 | 0.663908 |
794186cbe62660c26e4873cb7848a545d309bb44 | 11,459 | py | Python | AFRTest.py | funiuShop/face | 4723c30a785a11c1b197036dc81e0b94c1d70399 | [
"FSFAP"
] | null | null | null | AFRTest.py | funiuShop/face | 4723c30a785a11c1b197036dc81e0b94c1d70399 | [
"FSFAP"
] | null | null | null | AFRTest.py | funiuShop/face | 4723c30a785a11c1b197036dc81e0b94c1d70399 | [
"FSFAP"
] | null | null | null | # coding=utf-8
from arcsoft import CLibrary, ASVL_COLOR_FORMAT, ASVLOFFSCREEN, c_ubyte_p, FaceInfo
from arcsoft.utils import BufferInfo, ImageLoader
from arcsoft.AFD_FSDKLibrary import *
from arcsoft.AFR_FSDKLibrary import *
from ctypes import *
import traceback
APPID = c_char_p(b'xTaxfVjCxaLESdmx2czqfa6hK4YEwzmJLDo7M1ZvbAZ')
FD_SDKKEY = c_char_p(b'HgLo4dmftp1tQ8SFxygB4Lk4ufMvZNemgShBWKwQJ5Nf')
FR_SDKKEY = c_char_p(b'HgLo4dmftp1tQ8SFxygB4LkZZGQeorZcz4w6TQQNaJEa')
FD_WORKBUF_SIZE = 20 * 1024 * 1024
FR_WORKBUF_SIZE = 40 * 1024 * 1024
MAX_FACE_NUM = 50
bUseYUVFile = 0
def doFaceDetection(hFDEngine, inputImg):
faceInfo = []
pFaceRes = POINTER(AFD_FSDK_FACERES)()
ret = AFD_FSDK_StillImageFaceDetection(hFDEngine, byref(inputImg),
byref(pFaceRes))
if ret != 0:
print(u'AFD_FSDK_StillImageFaceDetection 0x{0:x}'.format(ret))
return faceInfo
faceRes = pFaceRes.contents
if faceRes.nFace > 0:
for i in range(0, faceRes.nFace):
rect = faceRes.rcFace[i]
orient = faceRes.lfaceOrient[i]
faceInfo.append(
FaceInfo(rect.left, rect.top, rect.right, rect.bottom, orient))
return faceInfo
def extractFRFeature(hFREngine, inputImg, faceInfo):
"""
获取人脸特征值
"""
faceinput = AFR_FSDK_FACEINPUT()
faceinput.lOrient = faceInfo.orient
faceinput.rcFace.left = faceInfo.left
faceinput.rcFace.top = faceInfo.top
faceinput.rcFace.right = faceInfo.right
faceinput.rcFace.bottom = faceInfo.bottom
faceFeature = AFR_FSDK_FACEMODEL()
ret = AFR_FSDK_ExtractFRFeature(hFREngine, inputImg, faceinput,
faceFeature)
if ret != 0:
print(u'AFR_FSDK_ExtractFRFeature ret 0x{0:x}'.format(ret))
return None
try:
return faceFeature.deepCopy()
except Exception as e:
traceback.print_exc()
print(e.message)
return None
def compareFaceSimilarity(hFDEngine, hFREngine, inputImgA, inputImgB):
"""
人脸相似度对比
"""
# Do Face Detect
faceInfosA = doFaceDetection(hFDEngine, inputImgA)
if len(faceInfosA) < 1:
print(u'no face in Image A ')
return 0.0
faceInfosB = doFaceDetection(hFDEngine, inputImgB)
if len(faceInfosB) < 1:
print(u'no face in Image B ')
return 0.0
# Extract Face Feature
faceFeatureA = extractFRFeature(hFREngine, inputImgA, faceInfosA[0])
if faceFeatureA == None:
print(u'extract face feature in Image A faile')
return 0.0
faceFeatureB = extractFRFeature(hFREngine, inputImgB, faceInfosB[0])
if faceFeatureB == None:
print(u'extract face feature in Image B failed')
faceFeatureA.freeUnmanaged()
return 0.0
# calc similarity between faceA and faceB
fSimilScore = c_float(0.0)
ret = AFR_FSDK_FacePairMatching(hFREngine, faceFeatureA, faceFeatureB,
byref(fSimilScore))
faceFeatureA.freeUnmanaged()
faceFeatureB.freeUnmanaged()
if ret != 0:
print(u'AFR_FSDK_FacePairMatching failed:ret 0x{0:x}'.format(ret))
return 0.0
return fSimilScore
def loadYUVImage(yuv_filePath, yuv_width, yuv_height, yuv_format):
"""
加载YUV图片
"""
yuv_rawdata_size = 0
inputImg = ASVLOFFSCREEN()
inputImg.u32PixelArrayFormat = yuv_format
inputImg.i32Width = yuv_width
inputImg.i32Height = yuv_height
if ASVL_COLOR_FORMAT.ASVL_PAF_I420 == inputImg.u32PixelArrayFormat:
inputImg.pi32Pitch[0] = inputImg.i32Width
inputImg.pi32Pitch[1] = inputImg.i32Width // 2
inputImg.pi32Pitch[2] = inputImg.i32Width // 2
yuv_rawdata_size = inputImg.i32Width * inputImg.i32Height * 3 // 2
elif ASVL_COLOR_FORMAT.ASVL_PAF_NV12 == inputImg.u32PixelArrayFormat:
inputImg.pi32Pitch[0] = inputImg.i32Width
inputImg.pi32Pitch[1] = inputImg.i32Width
yuv_rawdata_size = inputImg.i32Width * inputImg.i32Height * 3 // 2
elif ASVL_COLOR_FORMAT.ASVL_PAF_NV21 == inputImg.u32PixelArrayFormat:
inputImg.pi32Pitch[0] = inputImg.i32Width
inputImg.pi32Pitch[1] = inputImg.i32Width
yuv_rawdata_size = inputImg.i32Width * inputImg.i32Height * 3 // 2
elif ASVL_COLOR_FORMAT.ASVL_PAF_YUYV == inputImg.u32PixelArrayFormat:
inputImg.pi32Pitch[0] = inputImg.i32Width * 2
yuv_rawdata_size = inputImg.i32Width * inputImg.i32Height * 2
else:
print(u'unsupported yuv format')
exit(0)
# load YUV Image Data from File
f = None
try:
f = open(yuv_filePath, u'rb')
imagedata = f.read(yuv_rawdata_size)
except Exception as e:
traceback.print_exc()
print(e.message)
exit(0)
finally:
if f is not None:
f.close()
if ASVL_COLOR_FORMAT.ASVL_PAF_I420 == inputImg.u32PixelArrayFormat:
inputImg.ppu8Plane[0] = cast(imagedata, c_ubyte_p)
inputImg.ppu8Plane[1] = cast(
addressof(inputImg.ppu8Plane[0].contents) +
(inputImg.pi32Pitch[0] * inputImg.i32Height), c_ubyte_p)
inputImg.ppu8Plane[2] = cast(
addressof(inputImg.ppu8Plane[1].contents) +
(inputImg.pi32Pitch[1] * inputImg.i32Height // 2), c_ubyte_p)
inputImg.ppu8Plane[3] = cast(0, c_ubyte_p)
elif ASVL_COLOR_FORMAT.ASVL_PAF_NV12 == inputImg.u32PixelArrayFormat:
inputImg.ppu8Plane[0] = cast(imagedata, c_ubyte_p)
inputImg.ppu8Plane[1] = cast(
addressof(inputImg.ppu8Plane[0].contents) +
(inputImg.pi32Pitch[0] * inputImg.i32Height), c_ubyte_p)
inputImg.ppu8Plane[2] = cast(0, c_ubyte_p)
inputImg.ppu8Plane[3] = cast(0, c_ubyte_p)
elif ASVL_COLOR_FORMAT.ASVL_PAF_NV21 == inputImg.u32PixelArrayFormat:
inputImg.ppu8Plane[0] = cast(imagedata, c_ubyte_p)
inputImg.ppu8Plane[1] = cast(
addressof(inputImg.ppu8Plane[0].contents) +
(inputImg.pi32Pitch[0] * inputImg.i32Height), c_ubyte_p)
inputImg.ppu8Plane[2] = cast(0, c_ubyte_p)
inputImg.ppu8Plane[3] = cast(0, c_ubyte_p)
elif ASVL_COLOR_FORMAT.ASVL_PAF_YUYV == inputImg.u32PixelArrayFormat:
inputImg.ppu8Plane[0] = cast(imagedata, c_ubyte_p)
inputImg.ppu8Plane[1] = cast(0, c_ubyte_p)
inputImg.ppu8Plane[2] = cast(0, c_ubyte_p)
inputImg.ppu8Plane[3] = cast(0, c_ubyte_p)
else:
print(u'unsupported yuv format')
exit(0)
inputImg.gc_ppu8Plane0 = imagedata
return inputImg
def loadImage(filePath):
"""
加载图片
"""
bufferInfo = ImageLoader.getI420FromFile(filePath)
inputImg = ASVLOFFSCREEN()
inputImg.u32PixelArrayFormat = ASVL_COLOR_FORMAT.ASVL_PAF_I420
inputImg.i32Width = bufferInfo.width
inputImg.i32Height = bufferInfo.height
inputImg.pi32Pitch[0] = inputImg.i32Width
inputImg.pi32Pitch[1] = inputImg.i32Width // 2
inputImg.pi32Pitch[2] = inputImg.i32Width // 2
inputImg.ppu8Plane[0] = cast(bufferInfo.buffer, c_ubyte_p)
inputImg.ppu8Plane[1] = cast(
addressof(inputImg.ppu8Plane[0].contents) +
(inputImg.pi32Pitch[0] * inputImg.i32Height), c_ubyte_p)
inputImg.ppu8Plane[2] = cast(
addressof(inputImg.ppu8Plane[1].contents) +
(inputImg.pi32Pitch[1] * inputImg.i32Height // 2), c_ubyte_p)
inputImg.ppu8Plane[3] = cast(0, c_ubyte_p)
inputImg.gc_ppu8Plane0 = bufferInfo.buffer
return inputImg
def get_feature_test(hFDEngine, hFREngine, imgFile):
"""
获取图片特征值测试
"""
faceInfos = doFaceDetection(hFDEngine, inputImgA)
if len(faceInfos) < 1:
print(u'no face in Image ')
return 0.0
# Extract Face Feature
faceFeature = extractFRFeature(hFREngine, imgFile, faceInfos[0])
if faceFeature == None:
print(u'extract face feature in Image faile')
return 0.0
feature = faceFeature.toByteArray()
filename = "feature.dat"
with open(filename, "wb") as f:
f.write(feature)
print("save face feature in feature.dat")
print("feature length:", len(feature), "Byte")
faceFeature.freeUnmanaged()
if __name__ == u'__main__':
print(u'#####################################################')
# init Engine
pFDWorkMem = CLibrary.malloc(c_size_t(FD_WORKBUF_SIZE))
pFRWorkMem = CLibrary.malloc(c_size_t(FR_WORKBUF_SIZE))
hFDEngine = c_void_p()
ret = AFD_FSDK_InitialFaceEngine(
APPID, FD_SDKKEY, pFDWorkMem, c_int32(FD_WORKBUF_SIZE),
byref(hFDEngine), AFD_FSDK_OPF_0_HIGHER_EXT, 16, MAX_FACE_NUM)
if ret != 0:
CLibrary.free(pFDWorkMem)
print(u'AFD_FSDK_InitialFaceEngine ret 0x{:x}'.format(ret))
exit(0)
# print FDEngine version
versionFD = AFD_FSDK_GetVersion(hFDEngine)
print(u'AFD Version: {} {} {} {}'.format(
versionFD.contents.lCodebase, versionFD.contents.lMajor,
versionFD.contents.lMinor, versionFD.contents.lBuild))
print("Version:",
c_char_p(versionFD.contents.Version).value.decode(u'utf-8'))
print("BuildDate:",
c_char_p(versionFD.contents.BuildDate).value.decode(u'utf-8'))
print("CopyRight:",
c_char_p(versionFD.contents.CopyRight).value.decode(u'utf-8'))
hFREngine = c_void_p()
ret = AFR_FSDK_InitialEngine(APPID, FR_SDKKEY, pFRWorkMem,
c_int32(FR_WORKBUF_SIZE), byref(hFREngine))
if ret != 0:
AFD_FSDKLibrary.AFD_FSDK_UninitialFaceEngine(hFDEngine)
CLibrary.free(pFDWorkMem)
CLibrary.free(pFRWorkMem)
print(u'AFR_FSDK_InitialEngine ret 0x{:x}'.format(ret))
System.exit(0)
print("\r\n")
# print FREngine version
versionFR = AFR_FSDK_GetVersion(hFREngine)
print(u'AFR Version: {} {} {} {}'.format(
versionFR.contents.lCodebase, versionFR.contents.lMajor,
versionFR.contents.lMinor, versionFR.contents.lBuild))
print("Version:",
c_char_p(versionFR.contents.Version).value.decode(u'utf-8'))
print("BuildDate:",
c_char_p(versionFR.contents.BuildDate).value.decode(u'utf-8'))
print("CopyRight:",
c_char_p(versionFR.contents.CopyRight).value.decode(u'utf-8'))
# load Image Data
if bUseYUVFile:
filePathA = u'lena_I420_fromBMP.yuv'
yuv_widthA = 512
yuv_heightA = 512
yuv_formatA = ASVL_COLOR_FORMAT.ASVL_PAF_I420
filePathB = u'lena_I420_fromBMP.yuv'
yuv_widthB = 512
yuv_heightB = 512
yuv_formatB = ASVL_COLOR_FORMAT.ASVL_PAF_I420
inputImgA = loadYUVImage(filePathA, yuv_widthA, yuv_heightA,
yuv_formatA)
inputImgB = loadYUVImage(filePathB, yuv_widthB, yuv_heightB,
yuv_formatB)
else:
filePathA = u'lena.bmp'
filePathB = u'lena.bmp'
inputImgA = loadImage(filePathA)
inputImgB = loadImage(filePathB)
print(u'\r\nsimilarity between faceA and faceB is {0}'.format(
compareFaceSimilarity(hFDEngine, hFREngine, inputImgA, inputImgB)))
print("\r\n")
# 特征值获取
get_feature_test(hFDEngine, hFREngine, inputImgA)
# release Engine
AFD_FSDK_UninitialFaceEngine(hFDEngine)
AFR_FSDK_UninitialEngine(hFREngine)
CLibrary.free(pFDWorkMem)
CLibrary.free(pFRWorkMem)
print(u'#####################################################')
| 35.042813 | 83 | 0.659831 |
7941872c0c3d8883ecfbf515734810dddccdcab0 | 871 | py | Python | EventFilter/L1TRawToDigi/python/gtStage2Raw_cfi.py | SWuchterl/cmssw | 769b4a7ef81796579af7d626da6039dfa0347b8e | [
"Apache-2.0"
] | 6 | 2017-09-08T14:12:56.000Z | 2022-03-09T23:57:01.000Z | EventFilter/L1TRawToDigi/python/gtStage2Raw_cfi.py | SWuchterl/cmssw | 769b4a7ef81796579af7d626da6039dfa0347b8e | [
"Apache-2.0"
] | 545 | 2017-09-19T17:10:19.000Z | 2022-03-07T16:55:27.000Z | EventFilter/L1TRawToDigi/python/gtStage2Raw_cfi.py | SWuchterl/cmssw | 769b4a7ef81796579af7d626da6039dfa0347b8e | [
"Apache-2.0"
] | 14 | 2017-10-04T09:47:21.000Z | 2019-10-23T18:04:45.000Z | import FWCore.ParameterSet.Config as cms
gtStage2Raw = cms.EDProducer(
"L1TDigiToRaw",
Setup = cms.string("stage2::GTSetup"),
# TowerInputLabel = cms.InputTag("simCaloStage2Digis"),
GtInputTag = cms.InputTag("simGtStage2Digis"),
ExtInputTag = cms.InputTag("simGtExtFakeStage2Digis"),
MuonInputTag = cms.InputTag("simGmtStage2Digis"),
EGammaInputTag = cms.InputTag("simCaloStage2Digis"),
TauInputTag = cms.InputTag("simCaloStage2Digis"),
JetInputTag = cms.InputTag("simCaloStage2Digis"),
EtSumInputTag = cms.InputTag("simCaloStage2Digis"),
FedId = cms.int32(1404),
## FWId = cms.uint32(0x10A6), # FW version in GMT with vtx-etrapolation
FWId = cms.uint32(0x10F2), # FW version for packing new HI centrality variables
lenSlinkHeader = cms.untracked.int32(8),
lenSlinkTrailer = cms.untracked.int32(8)
)
| 43.55 | 84 | 0.717566 |
7941878985381ba4a5569e2f38f02db5106607f3 | 867 | py | Python | scripts/stf/translated_values.py | SFDO-Alliances/NPSP | 3711a3cf8e3124bc2d7e61644d6abecb4042004e | [
"BSD-3-Clause"
] | 413 | 2015-01-02T09:53:04.000Z | 2019-12-05T15:31:25.000Z | scripts/stf/translated_values.py | SFDO-Alliances/NPSP | 3711a3cf8e3124bc2d7e61644d6abecb4042004e | [
"BSD-3-Clause"
] | 2,471 | 2015-01-02T03:33:55.000Z | 2019-12-13T17:55:10.000Z | scripts/stf/translated_values.py | SFDO-Alliances/NPSP | 3711a3cf8e3124bc2d7e61644d6abecb4042004e | [
"BSD-3-Clause"
] | 296 | 2015-01-06T13:03:33.000Z | 2019-12-11T14:19:31.000Z | #!/usr/bin/env python
from sys import argv, stdin, stdout, stderr
TRANSLATED_MARKER = '------------------TRANSLATED-------------------'
UNTRANSLATED_MARKER = '------------------UNTRANSLATED-----------------'
lines_emitted = 0
in_translated_section = False
for line in stdin:
sline = line.strip()
if not sline:
# ignore empty lines
pass
elif sline.startswith('#'):
# ignore comments
pass
elif not in_translated_section and sline == TRANSLATED_MARKER:
in_translated_section = True
pass
elif in_translated_section and sline == UNTRANSLATED_MARKER:
in_translated_section = False
pass
elif in_translated_section:
lines_emitted += 1
stdout.write(line)
pass
stderr.write(
"{0}: {1} lines emitted\n".format(
argv[0],
lines_emitted
)
)
| 24.771429 | 71 | 0.592849 |
794188b8aa11af024e41ee937d7af88c41ea3b97 | 715 | py | Python | appengine/standard/memcache/best_practices/migration_step1/migration1_test.py | ammumaddy/sample | 3be22b92919af3d08584ee0cb849a356f63e602b | [
"Apache-2.0"
] | 1 | 2018-09-24T04:54:26.000Z | 2018-09-24T04:54:26.000Z | appengine/standard/memcache/best_practices/migration_step1/migration1_test.py | DalavanCloud/python-docs-samples | 439ca4c552940284743f5f22a590cc4b6dae1bef | [
"Apache-2.0"
] | 2 | 2021-06-10T23:54:32.000Z | 2021-06-10T23:54:33.000Z | appengine/standard/memcache/best_practices/migration_step1/migration1_test.py | DalavanCloud/python-docs-samples | 439ca4c552940284743f5f22a590cc4b6dae1bef | [
"Apache-2.0"
] | 1 | 2018-09-24T04:53:12.000Z | 2018-09-24T04:53:12.000Z | # Copyright 2016 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import migration1
import webtest
def test_get(testbed):
app = webtest.TestApp(migration1.app)
app.get('/')
| 31.086957 | 74 | 0.752448 |
794188d19bba0d4175e1faaeb0fce24e1c4deb32 | 1,267 | py | Python | mabel/utils/text.py | mabel-dev/mabel | 4b06e9e5ce108e8a3267e44685fd61fc9802eb0a | [
"Apache-2.0"
] | null | null | null | mabel/utils/text.py | mabel-dev/mabel | 4b06e9e5ce108e8a3267e44685fd61fc9802eb0a | [
"Apache-2.0"
] | 287 | 2021-05-14T21:25:26.000Z | 2022-03-30T12:02:51.000Z | mabel/utils/text.py | mabel-dev/mabel | 4b06e9e5ce108e8a3267e44685fd61fc9802eb0a | [
"Apache-2.0"
] | null | null | null | import re
import string
from functools import lru_cache
VALID_CHARACTERS = string.ascii_letters + string.digits + string.whitespace
REGEX_CHARACTERS = {ch: "\\" + ch for ch in ".^$*+?{}[]|()\\"}
def tokenize(text):
text = text.lower()
text = "".join([c for c in text if c in VALID_CHARACTERS])
return text.split()
def sanitize(text, safe_characters: str = VALID_CHARACTERS):
return "".join([c for c in text if c in safe_characters])
def wrap_text(text, line_len):
from textwrap import fill
def _inner(text):
for line in text.splitlines():
yield fill(line, line_len)
return "\n".join(list(_inner(text)))
# https://codereview.stackexchange.com/a/248421
@lru_cache(4)
def _sql_like_fragment_to_regex(fragment):
"""
Allows us to accepts LIKE statements to search data
"""
# https://codereview.stackexchange.com/a/36864/229677
safe_fragment = "".join([REGEX_CHARACTERS.get(ch, ch) for ch in fragment])
return re.compile("^" + safe_fragment.replace("%", ".*?").replace("_", ".") + "$")
def like(x, y):
return _sql_like_fragment_to_regex(y.lower()).match(str(x).lower())
def not_like(x, y):
return not like(x, y)
def matches(x, y):
return re.compile(y).search(x) != None
| 24.365385 | 86 | 0.661405 |
79418a0f9d646b4ec22a628bc8a3b7d43a8c764a | 952 | py | Python | setup.py | donavanyieh/hi-kyong | f76153d6a92e6af795f79059f09129f0bc026959 | [
"MIT"
] | null | null | null | setup.py | donavanyieh/hi-kyong | f76153d6a92e6af795f79059f09129f0bc026959 | [
"MIT"
] | null | null | null | setup.py | donavanyieh/hi-kyong | f76153d6a92e6af795f79059f09129f0bc026959 | [
"MIT"
] | null | null | null | from setuptools import setup, find_packages
VERSION = '0.0.1'
DESCRIPTION = 'Hi Kyong!'
LONG_DESCRIPTION = 'A pythonic way to say hi to our prof, prof Kyong. Inspired by https://github.com/tsivinsky/hi-mom.'
# Setting up
setup(
# the name must match the folder name 'verysimplemodule'
name="hikyong",
version=VERSION,
author="Yieh Yuheng",
author_email="[email protected]",
description=DESCRIPTION,
long_description=LONG_DESCRIPTION,
packages=find_packages(),
install_requires=[],
keywords=['python', 'first package'],
classifiers= [
"Development Status :: 3 - Alpha",
"Intended Audience :: Education",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 3",
"Operating System :: MacOS :: MacOS X",
"Operating System :: Microsoft :: Windows",
]
) | 34 | 119 | 0.60084 |
79418b170a7568cf4af71e22615733d5aeb1e9a7 | 11,510 | py | Python | REDSI_1160929_1161573/boost_1_67_0/tools/build/src/contrib/boost.py | Wultyc/ISEP_1718_2A2S_REDSI_TrabalhoGrupo | eb0f7ef64e188fe871f47c2ef9cdef36d8a66bc8 | [
"MIT"
] | 1 | 2018-12-15T19:57:24.000Z | 2018-12-15T19:57:24.000Z | REDSI_1160929_1161573/boost_1_67_0/tools/build/src/contrib/boost.py | Wultyc/ISEP_1718_2A2S_REDSI_TrabalhoGrupo | eb0f7ef64e188fe871f47c2ef9cdef36d8a66bc8 | [
"MIT"
] | null | null | null | REDSI_1160929_1161573/boost_1_67_0/tools/build/src/contrib/boost.py | Wultyc/ISEP_1718_2A2S_REDSI_TrabalhoGrupo | eb0f7ef64e188fe871f47c2ef9cdef36d8a66bc8 | [
"MIT"
] | 1 | 2019-03-08T11:06:22.000Z | 2019-03-08T11:06:22.000Z | # $Id: boost.jam 62249 2010-05-26 19:05:19Z steven_watanabe $
# Copyright 2008 Roland Schwarz
# Distributed under the Boost Software License, Version 1.0.
# (See accompanying file LICENSE_1_0.txt or http://www.boost.org/LICENSE_1_0.txt)
# Boost library support module.
#
# This module allows to use the boost library from boost-build projects.
# The location of a boost source tree or the path to a pre-built
# version of the library can be configured from either site-config.jam
# or user-config.jam. If no location is configured the module looks for
# a BOOST_ROOT environment variable, which should point to a boost source
# tree. As a last resort it tries to use pre-built libraries from the standard
# search path of the compiler.
#
# If the location to a source tree is known, the module can be configured
# from the *-config.jam files:
#
# using boost : 1.35 : <root>/path-to-boost-root ;
#
# If the location to a pre-built version is known:
#
# using boost : 1.34
# : <include>/usr/local/include/boost_1_34
# <library>/usr/local/lib
# ;
#
# It is legal to configure more than one boost library version in the config
# files. The version identifier is used to disambiguate between them.
# The first configured version becomes the default.
#
# To use a boost library you need to put a 'use' statement into your
# Jamfile:
#
# import boost ;
#
# boost.use-project 1.35 ;
#
# If you don't care about a specific version you just can omit the version
# part, in which case the default is picked up:
#
# boost.use-project ;
#
# The library can be referenced with the project identifier '/boost'. To
# reference the program_options you would specify:
#
# exe myexe : mysrc.cpp : <library>/boost//program_options ;
#
# Note that the requirements are automatically transformed into suitable
# tags to find the correct pre-built library.
#
import re
import bjam
from b2.build import alias, property, property_set, feature
from b2.manager import get_manager
from b2.tools import builtin, common
from b2.util import bjam_signature, regex
# TODO: This is currently necessary in Python Port, but was not in Jam.
feature.feature('layout', ['system', 'versioned', 'tag'], ['optional'])
feature.feature('root', [], ['optional', 'free'])
feature.feature('build-id', [], ['optional', 'free'])
__initialized = None
__boost_auto_config = property_set.create([property.Property('layout', 'system')])
__boost_configured = {}
__boost_default = None
__build_id = None
__debug = None
def debug():
global __debug
if __debug is None:
__debug = "--debug-configuration" in bjam.variable("ARGV")
return __debug
# Configuration of the boost library to use.
#
# This can either be a boost source tree or
# pre-built libraries. The 'version' parameter must be a valid boost
# version number, e.g. 1.35, if specifying a pre-built version with
# versioned layout. It may be a symbolic name, e.g. 'trunk' if specifying
# a source tree. The options are specified as named parameters (like
# properties). The following paramters are available:
#
# <root>/path-to-boost-root: Specify a source tree.
#
# <include>/path-to-include: The include directory to search.
#
# <library>/path-to-library: The library directory to search.
#
# <layout>system or <layout>versioned.
#
# <build-id>my_build_id: The custom build id to use.
#
def init(version, options = None):
assert(isinstance(version,list))
assert(len(version)==1)
version = version[0]
if version in __boost_configured:
get_manager().errors()("Boost {} already configured.".format(version));
else:
global __boost_default
if debug():
if not __boost_default:
print "notice: configuring default boost library {}".format(version)
print "notice: configuring boost library {}".format(version)
if not __boost_default:
__boost_default = version
properties = []
for option in options:
properties.append(property.create_from_string(option))
__boost_configured[ version ] = property_set.PropertySet(properties)
projects = get_manager().projects()
rules = projects.project_rules()
# Use a certain version of the library.
#
# The use-project rule causes the module to define a boost project of
# searchable pre-built boost libraries, or references a source tree
# of the boost library. If the 'version' parameter is omitted either
# the configured default (first in config files) is used or an auto
# configuration will be attempted.
#
@bjam_signature(([ "version", "?" ], ))
def use_project(version = None):
projects.push_current( projects.current() )
if not version:
version = __boost_default
if not version:
version = "auto_config"
global __initialized
if __initialized:
if __initialized != version:
get_manager().errors()('Attempt to use {} with different parameters'.format('boost'))
else:
if version in __boost_configured:
opts = __boost_configured[ version ]
root = opts.get('<root>' )
inc = opts.get('<include>')
lib = opts.get('<library>')
if debug():
print "notice: using boost library {} {}".format( version, opt.raw() )
global __layout
global __version_tag
__layout = opts.get('<layout>')
if not __layout:
__layout = 'versioned'
__build_id = opts.get('<build-id>')
__version_tag = re.sub("[*\\/:.\"\' ]", "_", version)
__initialized = version
if ( root and inc ) or \
( root and lib ) or \
( lib and not inc ) or \
( not lib and inc ):
get_manager().errors()("Ambiguous parameters, use either <root> or <inlude> with <library>.")
elif not root and not inc:
root = bjam.variable("BOOST_ROOT")
module = projects.current().project_module()
if root:
bjam.call('call-in-module', module, 'use-project', ['boost', root])
else:
projects.initialize(__name__)
if version == '0.0.1':
boost_0_0_1( inc, lib )
else:
boost_std( inc, lib )
else:
get_manager().errors()("Reference to unconfigured boost version.")
projects.pop_current()
rules.add_rule( 'boost.use-project', use_project )
def boost_std(inc = None, lib = None):
# The default definitions for pre-built libraries.
rules.project(
['boost'],
['usage-requirements'] + ['<include>{}'.format(i) for i in inc] + ['<define>BOOST_ALL_NO_LIB'],
['requirements'] + ['<search>{}'.format(l) for l in lib])
# TODO: There should be a better way to add a Python function into a
# project requirements property set.
tag_prop_set = property_set.create([property.Property('<tag>', tag_std)])
attributes = projects.attributes(projects.current().project_module())
attributes.requirements = attributes.requirements.refine(tag_prop_set)
alias('headers')
def boost_lib(lib_name, dyn_link_macro):
if (isinstance(lib_name,str)):
lib_name = [lib_name]
builtin.lib(lib_name, usage_requirements=['<link>shared:<define>{}'.format(dyn_link_macro)])
boost_lib('container' , 'BOOST_CONTAINER_DYN_LINK' )
boost_lib('date_time' , 'BOOST_DATE_TIME_DYN_LINK' )
boost_lib('filesystem' , 'BOOST_FILE_SYSTEM_DYN_LINK' )
boost_lib('graph' , 'BOOST_GRAPH_DYN_LINK' )
boost_lib('graph_parallel' , 'BOOST_GRAPH_DYN_LINK' )
boost_lib('iostreams' , 'BOOST_IOSTREAMS_DYN_LINK' )
boost_lib('locale' , 'BOOST_LOG_DYN_LINK' )
boost_lib('log' , 'BOOST_LOG_DYN_LINK' )
boost_lib('log_setup' , 'BOOST_LOG_DYN_LINK' )
boost_lib('math_tr1' , 'BOOST_MATH_TR1_DYN_LINK' )
boost_lib('math_tr1f' , 'BOOST_MATH_TR1_DYN_LINK' )
boost_lib('math_tr1l' , 'BOOST_MATH_TR1_DYN_LINK' )
boost_lib('math_c99' , 'BOOST_MATH_TR1_DYN_LINK' )
boost_lib('math_c99f' , 'BOOST_MATH_TR1_DYN_LINK' )
boost_lib('math_c99l' , 'BOOST_MATH_TR1_DYN_LINK' )
boost_lib('mpi' , 'BOOST_MPI_DYN_LINK' )
boost_lib('program_options' , 'BOOST_PROGRAM_OPTIONS_DYN_LINK')
boost_lib('python' , 'BOOST_PYTHON_DYN_LINK' )
boost_lib('python3' , 'BOOST_PYTHON_DYN_LINK' )
boost_lib('random' , 'BOOST_RANDOM_DYN_LINK' )
boost_lib('regex' , 'BOOST_REGEX_DYN_LINK' )
boost_lib('serialization' , 'BOOST_SERIALIZATION_DYN_LINK' )
boost_lib('wserialization' , 'BOOST_SERIALIZATION_DYN_LINK' )
boost_lib('signals' , 'BOOST_SIGNALS_DYN_LINK' )
boost_lib('system' , 'BOOST_SYSTEM_DYN_LINK' )
boost_lib('unit_test_framework' , 'BOOST_TEST_DYN_LINK' )
boost_lib('prg_exec_monitor' , 'BOOST_TEST_DYN_LINK' )
boost_lib('test_exec_monitor' , 'BOOST_TEST_DYN_LINK' )
boost_lib('thread' , 'BOOST_THREAD_DYN_DLL' )
boost_lib('wave' , 'BOOST_WAVE_DYN_LINK' )
def boost_0_0_1( inc, lib ):
print "You are trying to use an example placeholder for boost libs." ;
# Copy this template to another place (in the file boost.jam)
# and define a project and libraries modelled after the
# boost_std rule. Please note that it is also possible to have
# a per version taging rule in case they are different between
# versions.
def tag_std(name, type, prop_set):
name = 'boost_' + name
if 'static' in prop_set.get('<link>') and 'windows' in prop_set.get('<target-os>'):
name = 'lib' + name
result = None
if __layout == 'system':
versionRe = re.search('^([0-9]+)_([0-9]+)', __version_tag)
if versionRe and versionRe.group(1) == '1' and int(versionRe.group(2)) < 39:
result = tag_tagged(name, type, prop_set)
else:
result = tag_system(name, type, prop_set)
elif __layout == 'tagged':
result = tag_tagged(name, type, prop_set)
elif __layout == 'versioned':
result = tag_versioned(name, type, prop_set)
else:
get_manager().errors()("Missing layout")
return result
def tag_maybe(param):
return ['-{}'.format(param)] if param else []
def tag_system(name, type, prop_set):
return common.format_name(['<base>'] + tag_maybe(__build_id), name, type, prop_set)
def tag_tagged(name, type, prop_set):
return common.format_name(['<base>', '<threading>', '<runtime>'] + tag_maybe(__build_id), name, type, prop_set)
def tag_versioned(name, type, prop_set):
return common.format_name(['<base>', '<toolset>', '<threading>', '<runtime>'] + tag_maybe(__version_tag) + tag_maybe(__build_id),
name, type, prop_set)
| 40.960854 | 134 | 0.624674 |
79418b267eb3bfe8f8b125ed0b0dd52429cb3133 | 4,836 | py | Python | launchsalsa.py | Uberroot/launchsalsa | ec26cbffd5ab078e9916baf891f5eb1f44ba2d12 | [
"BSD-2-Clause"
] | 1 | 2016-05-05T19:47:59.000Z | 2016-05-05T19:47:59.000Z | launchsalsa.py | Uberroot/launchsalsa | ec26cbffd5ab078e9916baf891f5eb1f44ba2d12 | [
"BSD-2-Clause"
] | null | null | null | launchsalsa.py | Uberroot/launchsalsa | ec26cbffd5ab078e9916baf891f5eb1f44ba2d12 | [
"BSD-2-Clause"
] | null | null | null | #!/usr/bin/python2
#https://pypi.python.org/pypi/alsaseq
import alsaseq
import time
# Button maps - These are the actual MIDI notes
UP = 91
DOWN = 92
LEFT = 93
RIGHT = 94
SESSION = 95
NOTE = 96
DEVICE = 97
USER = 98
SHIFT = 80
CLICK = 70
UNDO = 60
DELETE = 50
QUANTISE = 40
DUPLICATE = 30
DOUBLE = 20
RECORD = 10
ARM = 1
SELECT = 2
MUTE = 3
SOLO = 4
VOLUME = 5
PAN = 6
SENDS = 7
STOP = 8
# Psuedo-notes for buttons that map to arrays
PLAY = 128
GRID = 129
# MIDI types
_MIDI_CONNECT = 66
_MIDI_DISCONNECT = 67;
_MIDI_ON = 6
_MIDI_OFF = 7
_MIDI_CC = 10
_MIDI_MAFTER = 12
_MIDI_PAFTER = 8
#asla connect numbers
_devi = 0
_devo = 0
# TODO: create functions for each MIDI message
def _midiOut(port, mtype, params):
alsaseq.output((mtype, 1, 0, 253, (0, 0), (_devi, port), (_devo, 0), params))
def __b2idx(but):
r = int(but / 10)
c = int(but - r * 10)
return r, c
# TODO: seperate classes for including border buttons in grid indexing
# TODO: implement screen regions for scrolling
# TODO: split screens from views (essentially sprites)
class ScreenView:
def __init__(self, rows = 8, columns = 8):
self.__rows = rows
self.__columns = columns
self._offset = (0, 0)
self.__grid = []
self.__dirty = []
for i in range(0, rows):
g = []
d = []
for i in range(0, columns):
g.append(0)
d.append(True)
self.__grid.append(g)
self.__dirty.append(d)
# TODO: THE SEMANTICS OF THIS WILL CHANGE
# TODO: Only grid updates are working
def update(self, but, val = 0, row = 0, col = 0):
if but == GRID:
self.__grid[row - 1][col - 1] = val
self.__dirty[row - 1][col - 1] = True
def scroll(self, rows, columns):
r = self._offset[0] + rows
c = self._offset[1] + columns
if r > self.__rows - 8:
r = self.__rows - 8
if c > self.__columns - 8:
c = self.__columns - 8
if r < 0:
r = 0
if c < 0:
c = 0
self._offset = (r, c)
self.redraw()
def redraw(self):
self.draw(True)
def draw(self, redraw = False):
for r in range(1, 9):
for c in range(1, 9):
grow = 9 - r - 1 + self._offset[0]
gcol = c - 1 + self._offset[1]
if redraw or self.__dirty[grow][gcol]:
_midiOut(1, _MIDI_ON, (0, r * 10 + c, self.__grid[grow][gcol], 0, 0))
self.__dirty[grow][gcol] = False
# TODO: figure out how to trigger a redraw event after all connections are made
class ScreenController:
def onButtonDown(self, but, vel, row, col):
#print("%s - %s, %s @ %s down" % (but, row, col, vel))
return
def onButtonUp(self, but, row, col):
#print("%s - %s, %s up" % (but, row, col))
return
def onMonoAftertouch(self, pressure):
#print("%s aftertouch" % (pressure))
return
def onPolyAftertouch(self, row, col, pressure):
#print("%d, %d: %s aftertouch" % (col, row, pressure))
return
# TODO: break ALSA away into a seperate module to standardize I/O (Jack on the way?)
def run(clientName, inPorts, outPorts, controller):
curScreen = controller
alsaseq.client(clientName, inPorts, outPorts, True)
initd = False
while True:
if not alsaseq.inputpending():
time.sleep(0.001)
continue
val = alsaseq.input()
mtype = val[0]
if mtype == _MIDI_CONNECT:
__devi = val[7][0]
__devo = val[7][2]
initd = True
elif not initd:
continue
elif mtype == _MIDI_CC:
but = val[7][4]
vel = val[7][5]
r,c = __b2idx(but)
if c == 9:
but = PLAY
if vel == 0:
curScreen.onButtonUp(but, 9 - r, c)
else:
curScreen.onButtonDown(but, vel, 9 - r, c)
elif mtype == _MIDI_ON:
but = val[7][1]
vel = val[7][2]
r,c = __b2idx(but)
if vel == 0:
curScreen.onButtonUp(but, 9 - r, c)
else:
curScreen.onButtonDown(GRID, vel, 9 - r, c)
elif mtype == _MIDI_OFF:
but = val[7][1]
vel = val[7][2]
r,c = __b2idx(but)
curScreen.onButtonUp(GRID, 9 - r, c)
elif mtype == _MIDI_MAFTER:
vel = val[7][5]
curScreen.onMonoAftertouch(vel)
elif mtype == _MIDI_PAFTER:
but = val[7][1]
vel = val[7][2]
r,c = __b2idx(but)
curScreen.onPolyAftertouch(9 - r, c, vel)
#else:
# print val
| 26.42623 | 90 | 0.525227 |
79418bf7cb38ae8bcd7209e98e2bf559395e8240 | 6,892 | bzl | Python | repo.bzl | dayanruben/android-test | 2ed50d534cb7e48433dcfa5f5e9e793e99506d84 | [
"Apache-2.0"
] | 3 | 2020-09-20T12:29:38.000Z | 2022-03-07T23:48:03.000Z | repo.bzl | dayanruben/android-test | 2ed50d534cb7e48433dcfa5f5e9e793e99506d84 | [
"Apache-2.0"
] | 1 | 2021-03-30T11:26:36.000Z | 2021-03-30T11:26:36.000Z | repo.bzl | dayanruben/android-test | 2ed50d534cb7e48433dcfa5f5e9e793e99506d84 | [
"Apache-2.0"
] | null | null | null | """Skylark rules to setup the WORKSPACE in the opensource bazel world."""
load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive")
# These dependencies are required for *developing* this project.
def _development_repositories():
# Needed by @com_google_protobuf//:protobuf_java_util
native.bind(
name = "guava",
actual = "@maven//:com_google_guava_guava",
)
# Needed by @com_google_protobuf//:protobuf_java_util
native.bind(
name = "gson",
actual = "@maven//:com_google_code_gson_gson",
)
http_archive(
name = "robolectric",
sha256 = "d4f2eb078a51f4e534ebf5e18b6cd4646d05eae9b362ac40b93831bdf46112c7",
strip_prefix = "robolectric-bazel-4.4",
urls = ["https://github.com/robolectric/robolectric-bazel/archive/4.4.tar.gz"],
)
# uncomment to test with new robolectric version. Change path to point to local filesystem
# clone of https://github.com/robolectric/robolectric-bazel
# native.local_repository(
# name = "robolectric",
# path = "~/github/robolectric-bazel/",
# )
http_archive(
name = "jsr330",
build_file_content = """
package(default_visibility = ["//visibility:public"])
java_import(
name = "jsr330",
jars = ["javax.inject.jar"],
)""",
url = "https://github.com/javax-inject/javax-inject/releases/download/1/javax.inject.zip",
)
# These dependencies are for *users* of the Android Test repo,
# i.e. specifying this repository as @androidx_test in their
# WORKSPACE using a repository_rule like git_repository or http_archive.
# Use parameter `with_dev_repositories = True` to download the dev
# repositories as well.
def android_test_repositories(with_dev_repositories = False):
"""Loads the workspace by downloading the required dependencies."""
if with_dev_repositories:
_development_repositories()
# Several of the repositories here end in _archive. This is due to an issue
# with the Bazel python rules that occurs if the repository name is the same
# as the top level directory of that repository. For example, suppose //:a
# depends on @b//:b and @b//:b is defined by
#
# filegroup(
# name = "b",
# srcs = ["b/c.py"],
# )
#
# Then the execroot looks like
#
# bazel-bin/
# - a
# - a.runfiles/
# - b/
# - b/
# - c.py
# - __init__.py
# - __init__.py
# - __main__/
# - a
# - a.py
# - __init__.py
#
# The Python path for a.py contains, in order, the following:
#
# [a.runfiles, a.runfiles/b, a.runfiles/__main__]
#
# If a.py contains `from b import c`, then python will find a.runfiles on
# the Python path, see that it contains both __init__.py and b/ but does not
# contain c.py and will error out.
#
# On the other hand, if we call @b//:b @b_archive//:b, then the first entry on
# the python path containing b/ is a.runfiles/b_archive which contains all of
# __init__.py, b/ and b/c.py so the import will succeed.
http_archive(
name = "google_apputils",
build_file = str(Label("//opensource:google-apputils.BUILD")),
sha256 = "47959d0651c32102c10ad919b8a0ffe0ae85f44b8457ddcf2bdc0358fb03dc29",
strip_prefix = "google-apputils-0.4.2",
url = "https://pypi.python.org/packages/69/66/a511c428fef8591c5adfa432a257a333e0d14184b6c5d03f1450827f7fe7/google-apputils-0.4.2.tar.gz",
)
http_archive(
name = "gflags_archive",
build_file = str(Label("//opensource:gflags.BUILD")),
sha256 = "3377d9dbeedb99c0325beb1f535f8fa9fa131d1d8b50db7481006f0a4c6919b4",
strip_prefix = "python-gflags-3.1.0",
url = "https://github.com/google/python-gflags/releases/download/3.1.0/python-gflags-3.1.0.tar.gz",
)
http_archive(
name = "portpicker_archive",
build_file = str(Label("//opensource:portpicker.BUILD")),
sha256 = "2f88edf7c6406034d7577846f224aff6e53c5f4250e3294b1904d8db250f27ec",
strip_prefix = "portpicker-1.1.1/src",
url = "https://pypi.python.org/packages/96/48/0e1f20fdc0b85cc8722284da3c5b80222ae4036ad73210a97d5362beaa6d/portpicker-1.1.1.tar.gz",
)
http_archive(
name = "mox_archive",
build_file = str(Label("//opensource:mox.BUILD")),
sha256 = "424ee725ee12652802b4e86571f816059b0d392401ceae70bf6487d65602cba9",
strip_prefix = "mox-0.5.3",
url = "https://pypi.python.org/packages/0c/a1/64740c638cc5fae807022368f4141700518ee343b53eb3e90bf3cc15a4d4/mox-0.5.3.tar.gz#md5=6de7371e7e8bd9e2dad3fef2646f4a43",
)
# Six provides simple utilities for wrapping over differences between Python 2 and Python 3.
http_archive(
name = "six_archive",
build_file = str(Label("//opensource:six.BUILD")),
sha256 = "105f8d68616f8248e24bf0e9372ef04d3cc10104f1980f54d57b2ce73a5ad56a",
strip_prefix = "six-1.10.0",
url = "https://pypi.python.org/packages/source/s/six/six-1.10.0.tar.gz",
)
# Needed by protobuf
native.bind(name = "six", actual = "@six_archive//:six")
# Protobuf
http_archive(
name = "com_google_protobuf",
sha256 = "d82eb0141ad18e98de47ed7ed415daabead6d5d1bef1b8cccb6aa4d108a9008f",
strip_prefix = "protobuf-b4f193788c9f0f05d7e0879ea96cd738630e5d51",
# Commit from 2019-05-15, update to protobuf 3.8 when available.
url = "https://github.com/protocolbuffers/protobuf/archive/b4f193788c9f0f05d7e0879ea96cd738630e5d51.tar.gz",
)
# Protobuf's dependencies
# Inlined protobuf's deps so we don't need users to add protobuf_deps() to their local WORKSPACE.
# From load("@com_google_protobuf//:protobuf_deps.bzl", "protobuf_deps").
http_archive(
name = "zlib",
build_file = "@com_google_protobuf//:third_party/zlib.BUILD",
sha256 = "c3e5e9fdd5004dcb542feda5ee4f0ff0744628baf8ed2dd5d66f8ca1197cb1a1",
strip_prefix = "zlib-1.2.11",
urls = ["https://zlib.net/zlib-1.2.11.tar.gz"],
)
http_archive(
name = "bazel_skylib",
url = "https://github.com/bazelbuild/bazel-skylib/releases/download/0.8.0/bazel-skylib.0.8.0.tar.gz",
sha256 = "2ef429f5d7ce7111263289644d233707dba35e39696377ebab8b0bc701f7818e",
)
# Open source version of the google python flags library.
http_archive(
name = "absl_py",
sha256 = "980ce58c34dfa75a9d20d45c355658191c166557f1de41ab52f208bd00604c2b",
strip_prefix = "abseil-py-b347ba6022370f895d3133241ed96965b95ecb40",
urls = ["https://github.com/abseil/abseil-py/archive/b347ba6022370f895d3133241ed96965b95ecb40.tar.gz"],
)
| 40.781065 | 170 | 0.663088 |
79418c7cdc6515a1785dc6ad692082d4329750e8 | 1,858 | py | Python | lib/extra_char_class.py | tylerjereddy/regex-improve | 526ac8ae1bb97bbc5401f3e1796a065ca6d30d98 | [
"MIT"
] | null | null | null | lib/extra_char_class.py | tylerjereddy/regex-improve | 526ac8ae1bb97bbc5401f3e1796a065ca6d30d98 | [
"MIT"
] | 1 | 2020-12-28T23:01:49.000Z | 2020-12-29T15:47:39.000Z | lib/extra_char_class.py | tylerjereddy/regex-improve | 526ac8ae1bb97bbc5401f3e1796a065ca6d30d98 | [
"MIT"
] | null | null | null | """
Regular expressions for policing the inefficient
usage of character classes to hold single
(meta)characters.
This is an efficiency issue noted in Chapter 6 of:
Friedl, Jeffrey. Mastering Regular Expressions. 3rd ed.,
O’Reilly Media, 2009.
See section: 'Don’t use superfluous character classes'
In particular, there's an overhead associated with placement
of the single (meta)characters in the class.
"""
import re
class FileOperatorExtraCharClass:
def __init__(self):
self.pattern_compile = r'(?P<start>re\.compile\(\n*\s*r[\'"].*?)(?P<offender>\[\\?.\])(?P<end>\n*\s*.*?[\'"].*?\n*\s*\))'
self.pattern_match = r'(?P<start>re\.match\(\n*\s*r[\'"].*?)(?P<offender>\[\\?.\])(?P<end>.*\n*\s*,\s)'
self.pattern_list = [self.pattern_compile,
self.pattern_match]
self.prog_list = [re.compile(pattern) for pattern in self.pattern_list]
def replacer(self, match):
# even if [\w], always start at second
# position and go to second last to
# find the element to be removed from
# class
single_char = match.group('offender')[1:-1]
if single_char in '.*+?()[]|':
single_char = '\\' + single_char
return match.group('start') + single_char + match.group('end')
def per_file_operator(self, filepath):
with open(filepath, 'r') as infile:
file_string = infile.read()
new_file_string = None
for prog in self.prog_list:
if prog.search(file_string):
while prog.search(file_string):
file_string = prog.sub(self.replacer, file_string)
new_file_string = file_string
if new_file_string is not None:
with open(filepath, 'w') as outfile:
outfile.write(new_file_string)
| 37.918367 | 129 | 0.60549 |
79418cd592d702e04f7c4d94e1e7793885e02888 | 1,398 | py | Python | kisti/test/read_json_test.py | listinc/osf.io | b9a0357f3e9b6e905b732e750a16e9452c459d78 | [
"Apache-2.0"
] | null | null | null | kisti/test/read_json_test.py | listinc/osf.io | b9a0357f3e9b6e905b732e750a16e9452c459d78 | [
"Apache-2.0"
] | null | null | null | kisti/test/read_json_test.py | listinc/osf.io | b9a0357f3e9b6e905b732e750a16e9452c459d78 | [
"Apache-2.0"
] | null | null | null | from osf import read_json
def read_kci_paper():
rj = read_json.ReadJson()
data = rj.read_json('json/kci_paper.json')
for i in data:
if i.get('author_main') == '오삼균':
print(i)
def read_author():
rj = read_json.ReadJson()
data = rj.read_json('json/author_email_mapping.json')
print(data.get('홍재현'))
def read_title():
rj = read_json.ReadJson()
data = rj.read_json('json/kci_paper.json')
for i in data:
if i.get('title01') == '국내 학술지 논문의 오픈 액세스와 아카이빙을 위한 저작권 귀속 연구: 한국학술진흥재단 등재 학술지를 중심으로':
print(i)
def read_po():
rj = read_json.ReadJson()
data = rj.read_json('json/kci_paper.json')
for i in data:
print(i.get('po'))
def read_po_condition():
rj = read_json.ReadJson()
data = rj.read_json('json/kci_paper.json')
count = 0
author_list = []
print(len(data))
for i in data:
if i.get('po') == '한국정보관리학회':
if '기계학습' in i.get('title01'):
print(i)
author_list.append(i.get('author_main'))
count+=1
print(count)
print(len(author_list))
author_list = list(set(author_list))
print(sorted(author_list))
print(len(author_list))
def read_total():
rj = read_json.ReadJson()
data = rj.read_json('json/kci_paper_library_one.json')
print(len(data))
if __name__ == '__main__':
read_total()
| 22.918033 | 94 | 0.602289 |
79418d7801d2ab381374dba909684e70d2416d6c | 1,023 | py | Python | PythonBasics/Files,Exceptions and functions/exceptionsDemo.py | AnyMoment/PracticePython | 34c1b998a3dd10f3d44650d51a335e350e3e220e | [
"MIT"
] | null | null | null | PythonBasics/Files,Exceptions and functions/exceptionsDemo.py | AnyMoment/PracticePython | 34c1b998a3dd10f3d44650d51a335e350e3e220e | [
"MIT"
] | null | null | null | PythonBasics/Files,Exceptions and functions/exceptionsDemo.py | AnyMoment/PracticePython | 34c1b998a3dd10f3d44650d51a335e350e3e220e | [
"MIT"
] | null | null | null | """
.........................assertions......................
def power(x,y):
assert x>0,"x must be a postive number not {0}"
assert y>0, "y must be a positive non zero number"
return x**y
print(power(1,2))
print(power(1,1))
print(power(1,-1))
....................Exception as arguments
"""
try:
x=int(input("enter any integer:"))
y=int(input("enter any integer:"))
z=x/y
except ValueError:
print("No alphabets")
except Exception as arg:
print("error", arg)
"""
........................try except else..........................
try:
x=int(input("enter any integer:"))
y=int(input("enter any integer:"))
z=x/y
print("printing z",z)
except:
pass
print("Error:Arithmetic operation ignored.Pass block")
else:
print("successful")
....................raise an error..........
x=int(input("enter any integer:"))
y=int(input("enter any integer:"))
if y==0:
raise ZeroDivisionError('Sorry Unable to compute')
else:
z=x/y
print(z)
""" | 22.733333 | 69 | 0.535679 |
79418d99862c520a1d132407b24b742608582828 | 5,811 | py | Python | sdks/python/apache_beam/runners/portability/fn_api_runner_transforms.py | mxm/incubator-beam | a076444706ec8a48e0bd616d234a2a4785233e37 | [
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null | sdks/python/apache_beam/runners/portability/fn_api_runner_transforms.py | mxm/incubator-beam | a076444706ec8a48e0bd616d234a2a4785233e37 | [
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null | sdks/python/apache_beam/runners/portability/fn_api_runner_transforms.py | mxm/incubator-beam | a076444706ec8a48e0bd616d234a2a4785233e37 | [
"Apache-2.0",
"BSD-3-Clause"
] | 1 | 2019-11-04T11:21:48.000Z | 2019-11-04T11:21:48.000Z | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Pipeline transformations for the FnApiRunner.
"""
from __future__ import absolute_import
from __future__ import print_function
from builtins import object
from apache_beam.portability import common_urns
from apache_beam.portability.api import beam_runner_api_pb2
from apache_beam.runners.worker import bundle_processor
from apache_beam.utils import proto_utils
# This module is experimental. No backwards-compatibility guarantees.
KNOWN_COMPOSITES = frozenset(
[common_urns.primitives.GROUP_BY_KEY.urn,
common_urns.composites.COMBINE_PER_KEY.urn])
class Stage(object):
"""A set of Transforms that can be sent to the worker for processing."""
def __init__(self, name, transforms,
downstream_side_inputs=None, must_follow=frozenset(),
parent=None):
self.name = name
self.transforms = transforms
self.downstream_side_inputs = downstream_side_inputs
self.must_follow = must_follow
self.timer_pcollections = []
self.parent = parent
def __repr__(self):
must_follow = ', '.join(prev.name for prev in self.must_follow)
if self.downstream_side_inputs is None:
downstream_side_inputs = '<unknown>'
else:
downstream_side_inputs = ', '.join(
str(si) for si in self.downstream_side_inputs)
return "%s\n %s\n must follow: %s\n downstream_side_inputs: %s" % (
self.name,
'\n'.join(["%s:%s" % (transform.unique_name, transform.spec.urn)
for transform in self.transforms]),
must_follow,
downstream_side_inputs)
def can_fuse(self, consumer):
def no_overlap(a, b):
return not a.intersection(b)
return (
not self in consumer.must_follow
and not self.is_flatten() and not consumer.is_flatten()
and no_overlap(self.downstream_side_inputs, consumer.side_inputs()))
def fuse(self, other):
return Stage(
"(%s)+(%s)" % (self.name, other.name),
self.transforms + other.transforms,
union(self.downstream_side_inputs, other.downstream_side_inputs),
union(self.must_follow, other.must_follow))
def is_flatten(self):
return any(transform.spec.urn == common_urns.primitives.FLATTEN.urn
for transform in self.transforms)
def side_inputs(self):
for transform in self.transforms:
if transform.spec.urn == common_urns.primitives.PAR_DO.urn:
payload = proto_utils.parse_Bytes(
transform.spec.payload, beam_runner_api_pb2.ParDoPayload)
for side_input in payload.side_inputs:
yield transform.inputs[side_input]
def has_as_main_input(self, pcoll):
for transform in self.transforms:
if transform.spec.urn == common_urns.primitives.PAR_DO.urn:
payload = proto_utils.parse_Bytes(
transform.spec.payload, beam_runner_api_pb2.ParDoPayload)
local_side_inputs = payload.side_inputs
else:
local_side_inputs = {}
for local_id, pipeline_id in transform.inputs.items():
if pcoll == pipeline_id and local_id not in local_side_inputs:
return True
def deduplicate_read(self):
seen_pcolls = set()
new_transforms = []
for transform in self.transforms:
if transform.spec.urn == bundle_processor.DATA_INPUT_URN:
pcoll = only_element(list(transform.outputs.items()))[1]
if pcoll in seen_pcolls:
continue
seen_pcolls.add(pcoll)
new_transforms.append(transform)
self.transforms = new_transforms
class TransformContext(object):
def __init__(self, components):
self.components = components
def add_or_get_coder_id(self, coder_proto):
for coder_id, coder in self.components.coders.items():
if coder == coder_proto:
return coder_id
new_coder_id = unique_name(self.components.coders, 'coder')
self.components.coders[new_coder_id].CopyFrom(coder_proto)
return new_coder_id
def leaf_transform_stages(
root_ids, components, parent=None, known_composites=KNOWN_COMPOSITES):
for root_id in root_ids:
root = components.transforms[root_id]
if root.spec.urn in known_composites:
yield Stage(root_id, [root], parent=parent)
elif not root.subtransforms:
# Make sure its outputs are not a subset of its inputs.
if set(root.outputs.values()) - set(root.inputs.values()):
yield Stage(root_id, [root], parent=parent)
else:
for stage in leaf_transform_stages(
root.subtransforms, components, root_id, known_composites):
yield stage
def union(a, b):
# Minimize the number of distinct sets.
if not a or a == b:
return b
elif not b:
return a
else:
return frozenset.union(a, b)
def unique_name(existing, prefix):
if prefix in existing:
counter = 0
while True:
counter += 1
prefix_counter = prefix + "_%s" % counter
if prefix_counter not in existing:
return prefix_counter
else:
return prefix
def only_element(iterable):
element, = iterable
return element
| 34.182353 | 76 | 0.707623 |
79418f3ef5afee1ba9682defed65d0dfd6063312 | 68 | py | Python | models/tilemap/tiles/ground.py | matumaros/BomberApe | d71616192fd54d9a595261c258e4c7367d2eac5d | [
"Apache-2.0"
] | null | null | null | models/tilemap/tiles/ground.py | matumaros/BomberApe | d71616192fd54d9a595261c258e4c7367d2eac5d | [
"Apache-2.0"
] | null | null | null | models/tilemap/tiles/ground.py | matumaros/BomberApe | d71616192fd54d9a595261c258e4c7367d2eac5d | [
"Apache-2.0"
] | null | null | null |
from .tile import Tile
class Ground(Tile):
LTYPE = 'ground'
| 8.5 | 22 | 0.647059 |
79419035bee43c8ecb7aad2d576044865c496acb | 2,690 | py | Python | google/ads/google_ads/v3/proto/services/product_bidding_category_constant_service_pb2_grpc.py | andy0937/google-ads-python | cb5da7f4a75076828d1fc3524b08cc167670435a | [
"Apache-2.0"
] | 1 | 2019-11-30T23:42:39.000Z | 2019-11-30T23:42:39.000Z | google/ads/google_ads/v3/proto/services/product_bidding_category_constant_service_pb2_grpc.py | andy0937/google-ads-python | cb5da7f4a75076828d1fc3524b08cc167670435a | [
"Apache-2.0"
] | null | null | null | google/ads/google_ads/v3/proto/services/product_bidding_category_constant_service_pb2_grpc.py | andy0937/google-ads-python | cb5da7f4a75076828d1fc3524b08cc167670435a | [
"Apache-2.0"
] | 1 | 2020-03-13T00:14:31.000Z | 2020-03-13T00:14:31.000Z | # Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
import grpc
from google.ads.google_ads.v3.proto.resources import product_bidding_category_constant_pb2 as google_dot_ads_dot_googleads__v3_dot_proto_dot_resources_dot_product__bidding__category__constant__pb2
from google.ads.google_ads.v3.proto.services import product_bidding_category_constant_service_pb2 as google_dot_ads_dot_googleads__v3_dot_proto_dot_services_dot_product__bidding__category__constant__service__pb2
class ProductBiddingCategoryConstantServiceStub(object):
"""Proto file describing the Product Bidding Category constant service
Service to fetch Product Bidding Categories.
"""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.GetProductBiddingCategoryConstant = channel.unary_unary(
'/google.ads.googleads.v3.services.ProductBiddingCategoryConstantService/GetProductBiddingCategoryConstant',
request_serializer=google_dot_ads_dot_googleads__v3_dot_proto_dot_services_dot_product__bidding__category__constant__service__pb2.GetProductBiddingCategoryConstantRequest.SerializeToString,
response_deserializer=google_dot_ads_dot_googleads__v3_dot_proto_dot_resources_dot_product__bidding__category__constant__pb2.ProductBiddingCategoryConstant.FromString,
)
class ProductBiddingCategoryConstantServiceServicer(object):
"""Proto file describing the Product Bidding Category constant service
Service to fetch Product Bidding Categories.
"""
def GetProductBiddingCategoryConstant(self, request, context):
"""Returns the requested Product Bidding Category in full detail.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_ProductBiddingCategoryConstantServiceServicer_to_server(servicer, server):
rpc_method_handlers = {
'GetProductBiddingCategoryConstant': grpc.unary_unary_rpc_method_handler(
servicer.GetProductBiddingCategoryConstant,
request_deserializer=google_dot_ads_dot_googleads__v3_dot_proto_dot_services_dot_product__bidding__category__constant__service__pb2.GetProductBiddingCategoryConstantRequest.FromString,
response_serializer=google_dot_ads_dot_googleads__v3_dot_proto_dot_resources_dot_product__bidding__category__constant__pb2.ProductBiddingCategoryConstant.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'google.ads.googleads.v3.services.ProductBiddingCategoryConstantService', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
| 51.730769 | 211 | 0.84461 |
794190a55d8223e75dd9c511f4597edf16942323 | 296 | py | Python | sbirez/migrations/0031_merge.py | kaitlin/afsbirez | 72fd39709adf8ba470151007739cc06e4d3d79dd | [
"CC0-1.0"
] | 8 | 2015-03-30T18:17:28.000Z | 2015-11-19T08:25:56.000Z | sbirez/migrations/0031_merge.py | kaitlin/afsbirez | 72fd39709adf8ba470151007739cc06e4d3d79dd | [
"CC0-1.0"
] | 76 | 2015-01-16T18:23:16.000Z | 2017-07-07T19:36:41.000Z | sbirez/migrations/0031_merge.py | kaitlin/afsbirez | 72fd39709adf8ba470151007739cc06e4d3d79dd | [
"CC0-1.0"
] | 16 | 2015-02-12T20:14:05.000Z | 2021-02-14T11:26:51.000Z | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('sbirez', '0030_auto_20150817_1927'),
('sbirez', '0030_element_report_text'),
]
operations = [
]
| 18.5 | 47 | 0.655405 |
79419145e2b7656f5a34b03878a2cc412c4c247b | 436 | py | Python | trip_planner/pages/forms.py | ankurahuja-tech/trip-planner | ed8123e69076efec624908723bae981e3d3fee22 | [
"MIT"
] | 1 | 2021-09-03T09:26:13.000Z | 2021-09-03T09:26:13.000Z | trip_planner/pages/forms.py | ankurahuja-tech/trip-planner | ed8123e69076efec624908723bae981e3d3fee22 | [
"MIT"
] | null | null | null | trip_planner/pages/forms.py | ankurahuja-tech/trip-planner | ed8123e69076efec624908723bae981e3d3fee22 | [
"MIT"
] | null | null | null | from django import forms
from django.forms.widgets import Textarea
class ContactForm(forms.Form):
name = forms.CharField(required=True, max_length=50)
email = forms.EmailField(
required=False, help_text="(optional) Kindly provide your email to allow me to respond to you."
)
subject = forms.CharField(required=True, max_length=75)
message = forms.CharField(widget=Textarea, required=True, max_length=1200)
| 36.333333 | 103 | 0.745413 |
7941918cf971abcbaa0d40d15d4d4be58b179ebd | 791 | py | Python | legume/__init__.py | shanham/legume | 859243a5523c6ea89ea2ed7520209c44d6f2dc1c | [
"MIT"
] | null | null | null | legume/__init__.py | shanham/legume | 859243a5523c6ea89ea2ed7520209c44d6f2dc1c | [
"MIT"
] | null | null | null | legume/__init__.py | shanham/legume | 859243a5523c6ea89ea2ed7520209c44d6f2dc1c | [
"MIT"
] | null | null | null | '''
Photonic crystal PWE/GME simulator with autograd support
Monkey-patching numpy/autograd backend inspired by Floris Laporte's FDTD
package at github.com/flaport/fdtd
'''
from .phc import Shape, Circle, Poly, Square, Hexagon, FourierShape
from .phc import PhotCryst, Layer, ShapesLayer, FreeformLayer, Lattice
from . import gds
from . import viz
from .pwe import PlaneWaveExp
from .gme import GuidedModeExp
from .gme.slab_modes import guided_modes, rad_modes
from .backend import backend, set_backend
__all__ = ['GuidedModeExp',
'PlaneWaveExp',
'PhotCryst',
'ShapesLayer',
'FreeformLayer',
'Lattice',
'Shape',
'Circle',
'Poly',
'Square',
'Hexagon']
__version__ = '0.1.5'
| 26.366667 | 72 | 0.65866 |
794191ecfc2ceb867552ea618b60bc4c304a2f7c | 937 | py | Python | caesar cipher/refactoring.py | yeshan333/Cryptography-learning-note | ea32fe95b0e66ce86c5bcebb74cde3ca7a626aee | [
"Apache-2.0"
] | 1 | 2019-12-04T05:03:36.000Z | 2019-12-04T05:03:36.000Z | caesar cipher/refactoring.py | yeshan333/Cryptography-learning-note | ea32fe95b0e66ce86c5bcebb74cde3ca7a626aee | [
"Apache-2.0"
] | null | null | null | caesar cipher/refactoring.py | yeshan333/Cryptography-learning-note | ea32fe95b0e66ce86c5bcebb74cde3ca7a626aee | [
"Apache-2.0"
] | null | null | null | # 优化凯撒加密解密程序结构
LETTERS = ' !"#$%&\'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\\]^_`abcdefghijklmnopqrstuvwxyz{|}~'
def caesarCipher(message, mode, key):
translated = ''
global LETTERS
for symbol in message:
if symbol in LETTERS:
num = LETTERS.find(symbol)
if mode == 'encrypt':
num = num + key
elif mode == 'decrypt':
num = num - key
if num >= len(LETTERS):
num = num - len(LETTERS)
elif num < 0:
num = num + len(LETTERS)
translated = translated + LETTERS[num]
else:
translated = translated + symbol
return translated
if __name__ == '__main__':
message = 'This is my secret message.'
key = 13
mode = 'encrypt'
new_message = caesarCipher(message, mode, key)
print(f'{new_message}') | 26.027778 | 109 | 0.514408 |
7941926954310da6a513c5073f4ee28198ffb11f | 260 | py | Python | ampoule/_version.py | glyph/ampoule | c2f4331f105acfac5869910c487e7e9e92072088 | [
"MIT"
] | 1 | 2017-12-24T22:49:17.000Z | 2017-12-24T22:49:17.000Z | ampoule/_version.py | glyph/ampoule | c2f4331f105acfac5869910c487e7e9e92072088 | [
"MIT"
] | 18 | 2017-12-14T04:04:17.000Z | 2017-12-18T05:56:31.000Z | ampoule/_version.py | glyph/ampoule | c2f4331f105acfac5869910c487e7e9e92072088 | [
"MIT"
] | null | null | null | """
Provides ampoule version information.
"""
# This file is auto-generated! Do not edit!
# Use `python -m incremental.update ampoule` to change this file.
from incremental import Version
__version__ = Version('ampoule', 22, 5, 0)
__all__ = ["__version__"]
| 21.666667 | 65 | 0.730769 |
794192b96e81e383b757044365169028bbaac159 | 3,472 | py | Python | engine/tests/test_collision.py | frlnx/melee | db2670453771c6d3635e97e28bb8667b14643b05 | [
"CC0-1.0"
] | null | null | null | engine/tests/test_collision.py | frlnx/melee | db2670453771c6d3635e97e28bb8667b14643b05 | [
"CC0-1.0"
] | null | null | null | engine/tests/test_collision.py | frlnx/melee | db2670453771c6d3635e97e28bb8667b14643b05 | [
"CC0-1.0"
] | null | null | null | from itertools import chain
import pytest
from engine.models.factories import ShipModelFactory, AsteroidModelFactory
from engine.physics.line import Line
factory = ShipModelFactory()
amf = AsteroidModelFactory()
class TestLine(object):
def setup(self):
self.target = Line([(0, 0), (10, 10)])
def test_set_position_rotation(self):
self.target.set_position_rotation(10, 0, 0)
assert self.target.x1 == 10
assert self.target.x2 == 20
assert self.target.y1 == 0
assert self.target.y2 == 10
def test_freeze(self):
self.target.set_position_rotation(10, 0, 0)
self.target.freeze()
assert self.target.x1 == 10
assert self.target.x2 == 20
assert self.target.y1 == 0
assert self.target.y2 == 10
def test_moving_after_freeze(self):
self.target.set_position_rotation(10, 0, 0)
self.target.freeze()
self.target.set_position_rotation(10, 0, 0)
assert self.target.x1 == 20
assert self.target.x2 == 30
assert self.target.y1 == 0
assert self.target.y2 == 10
class TestBoundingBox(object):
def setup(self):
self.ship1 = factory.manufacture("ship", position=[10, 0, 0])
self.ship2 = factory.manufacture("ship", position=[-10, 0, 0])
def test_ships_do_not_overlap(self):
collides, x, y = self.ship1.intersection_point(self.ship2)
assert not collides
def test_moving_ships_moves_bounding_box(self):
bb_xes = list(chain(*[[line.x1, line.x2] for line in self.ship1.bounding_box.lines]))
bb_yes = list(chain(*[[line.y1, line.y2] for line in self.ship1.bounding_box.lines]))
self.ship1.set_position_and_rotation(0, 0, 0, 0, 0, 0)
self.ship1.update_bounding_box()
moved_coords = list(chain(*[[line.x1, line.x2] for line in self.ship1.bounding_box.lines]))
assert bb_xes != list(chain(*[[line.x1, line.x2] for line in self.ship1.bounding_box.lines]))
assert [round(x - 10, 1) for x in bb_xes] == [round(x, 1) for x in moved_coords]
assert bb_yes == list(chain(*[[line.y1, line.y2] for line in self.ship1.bounding_box.lines]))
class TestAsteroidShipCollision(object):
def setup(self):
self.asteroid = amf.manufacture(position=[-50, 0, 0])
self.ship = factory.manufacture("ship", position=[10, 0, 0])
def test_asteroid_has_no_movement(self):
assert all(p[0] < 0 for p in self.asteroid.bounding_box._moving_points)
assert self.asteroid.bounding_box.moving_right < 0
for bb in self.asteroid.bounding_box:
assert all(p[0] < 0 for p in bb._moving_points)
assert bb.moving_right < 0
def test_collision(self):
self.ship.set_position(-50, 0, 0)
self.ship.update_bounding_box()
my_parts, asteroid_parts = self.ship.polygons_in_order_of_collision(self.asteroid)
assert 0 < len(my_parts)
assert 1 == len(asteroid_parts)
test_data = [
(
amf.manufacture(position=(-50, 0, 0), rotation=(0, d, 0)),
factory.manufacture("ship", position=(50, 0, 0))
) for d in range(0, 360, 10)]
@pytest.mark.parametrize("asteroid,ship", test_data)
def test_collision_360(asteroid, ship):
ship.set_position(-50, 0, 0)
ship.update_bounding_box()
my_parts, asteroid_parts = ship.polygons_in_order_of_collision(asteroid)
assert 0 < len(my_parts)
assert 1 == len(asteroid_parts)
| 35.428571 | 101 | 0.658986 |
7941940ab5e56c01b74206b466d1710132236112 | 1,642 | py | Python | accounts/migrations/0006_auto_20210813_1713.py | AungKMin/network-manager | 0db8234a8534731aa3cd0b9001c91ec390704103 | [
"MIT"
] | null | null | null | accounts/migrations/0006_auto_20210813_1713.py | AungKMin/network-manager | 0db8234a8534731aa3cd0b9001c91ec390704103 | [
"MIT"
] | null | null | null | accounts/migrations/0006_auto_20210813_1713.py | AungKMin/network-manager | 0db8234a8534731aa3cd0b9001c91ec390704103 | [
"MIT"
] | null | null | null | # Generated by Django 3.2.6 on 2021-08-13 21:13
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('accounts', '0005_auto_20210813_1623'),
]
operations = [
migrations.RenameModel(
old_name='Tag',
new_name='ContactTag',
),
migrations.AlterField(
model_name='contact',
name='description',
field=models.CharField(blank=True, max_length=1000, null=True),
),
migrations.AlterField(
model_name='contact',
name='organization',
field=models.CharField(blank=True, max_length=200, null=True),
),
migrations.AlterField(
model_name='contactpoint',
name='link',
field=models.CharField(blank=True, max_length=1000, null=True),
),
migrations.AlterField(
model_name='contactpoint',
name='notes',
field=models.CharField(blank=True, max_length=1000, null=True),
),
migrations.AlterField(
model_name='contactpointmethod',
name='description',
field=models.CharField(blank=True, max_length=1000, null=True),
),
migrations.AlterField(
model_name='contactpointmethod',
name='hours_for_response',
field=models.FloatField(blank=True, null=True),
),
migrations.AlterField(
model_name='contactpointmethod',
name='link',
field=models.CharField(blank=True, max_length=1000, null=True),
),
]
| 30.981132 | 75 | 0.573691 |
794194f2c79b24c5d5b4b7656ffcc7d8e13dbdbb | 6,823 | py | Python | djf_surveys/summary.py | irfanpule/django-form-surveys | ae27133f040e4bdf40c02944c6d5af65562b216d | [
"MIT"
] | 5 | 2022-03-22T08:10:07.000Z | 2022-03-30T11:52:01.000Z | djf_surveys/summary.py | irfanpule/django-form-surveys | ae27133f040e4bdf40c02944c6d5af65562b216d | [
"MIT"
] | 1 | 2022-03-14T08:49:49.000Z | 2022-03-14T09:40:10.000Z | djf_surveys/summary.py | irfanpule/django-form-surveys | ae27133f040e4bdf40c02944c6d5af65562b216d | [
"MIT"
] | null | null | null | import random
from djf_surveys.models import TYPE_FIELD, Survey, Question, Answer
from djf_surveys.utils import create_star
COLORS = [
'#64748b', '#a1a1aa', '#374151', '#78716c', '#d6d3d1', '#fca5a5', '#ef4444', '#7f1d1d',
'#fb923c', '#c2410c', '#fcd34d', '#b45309', '#fde047', '#bef264', '#ca8a04', '#65a30d',
'#86efac', '#15803d', '#059669', '#a7f3d0', '#14b8a6', '#06b6d4', '#155e75', '#0ea5e9',
'#075985', '#3b82f6', '#1e3a8a', '#818cf8', '#a78bfa', '#a855f7', '#6b21a8', '#c026d3',
'#db2777', '#fda4af', '#e11d48', '#9f1239'
]
class ChartJS:
"""
this class to generate chart https://www.chartjs.org
"""
chart_id = ""
chart_name = ""
element_html = ""
element_js = ""
width = 400
height = 400
data = []
labels = []
colors = COLORS
def __init__(self, chart_id: str, chart_name: str, *args, **kwargs):
self.chart_id = f"djfChart{chart_id}"
self.chart_name = chart_name
def _base_element_html(self):
self.element_html = f"""
<div class="swiper-slide">
<blockquote class="p-6 border border-gray-100 rounded-lg shadow-lg bg-white">
<canvas id="{self.chart_id}" width="{self.width}" height="{self.height}"></canvas>
</blocquote>
</div>
"""
def _shake_colors(self):
self.colors = random.choices(COLORS, k=len(self.labels))
def _config(self):
pass
def _setup(self):
pass
def render(self):
self._base_element_html()
self._shake_colors()
script = f"""
{self.element_html}
<script>
{self._setup()}
{self._config()}
const myChart{self.chart_id} = new Chart(
document.getElementById('{self.chart_id}'),
config{self.chart_id}
);
</script>
"""
return script
class ChartPie(ChartJS):
""" this class to generate pie chart"""
def _config(self):
script = """
const config%s = {
type: 'pie',
data: data%s,
options: {
responsive: true,
plugins: {
legend: {
position: 'top',
},
title: {
display: true,
text: '%s'
}
}
},
};
"""
return script % (self.chart_id, self.chart_id, self.chart_name)
def _setup(self):
script = """
const data%s = {
labels: %s,
datasets: [
{
label: 'Dataset 1',
data: %s,
backgroundColor: %s
}
]
};
"""
return script % (self.chart_id, self.labels, self.data, self.colors)
class ChartBar(ChartJS):
""" this class to generate bar chart"""
def _config(self):
script = """
const config%s = {
type: 'bar',
data: data%s,
options: {
scales: {
y: {
beginAtZero: true
}
}
},
};
"""
return script % (self.chart_id, self.chart_id)
def _setup(self):
script = """
const data%s = {
labels: %s,
datasets: [{
label: '%s',
data: %s,
backgroundColor: %s,
borderWidth: 1
}]
};
"""
return script % (self.chart_id, self.labels, self.chart_name, self.data, self.colors)
class ChartBarRating(ChartBar):
height = 200
rate_avg = 0
def _base_element_html(self):
stars = create_star(active_star=int(self.rate_avg))
self.element_html = f"""
<div class="swiper-slide">
<blockquote class="p-6 border border-gray-100 rounded-lg shadow-lg bg-white">
<div class="bg-yellow-100 space-y-1 py-5 rounded-md border border-yellow-200 text-center shadow-xs mb-2">
<h1 class="text-5xl font-semibold"> {self.rate_avg}</h1>
<div class="flex justify-center">
{stars}
</div>
<h5 class="mb-0 mt-1 text-sm"> Rate Average</h5>
</div>
<canvas id="{self.chart_id}" width="{self.width}" height="{self.height}"></canvas>
</blocquote>
</div>
"""
class SummaryResponse:
def __init__(self, survey: Survey):
self.survey = survey
def _process_radio_type(self, question: Question) -> str:
pie_chart = ChartPie(chart_id=f"chartpie_{question.id}", chart_name=question.label)
labels = question.choices.split(",")
data = []
for label in labels:
clean_label = label.strip().replace(' ', '_').lower()
count = Answer.objects.filter(question=question, value=clean_label).count()
data.append(count)
pie_chart.labels = labels
pie_chart.data = data
return pie_chart.render()
def _process_rating_type(self, question: Question):
bar_chart = ChartBarRating(chart_id=f"chartbar_{question.id}", chart_name=question.label)
labels = ['1', '2', '3', '4', '5']
data = []
for label in labels:
count = Answer.objects.filter(question=question, value=label).count()
data.append(count)
values_rating = Answer.objects.filter(question=question).values_list('value', flat=True)
values_convert = [int(v) for v in values_rating]
try:
rating_avg = round(sum(values_convert) / len(values_convert), 1)
except ZeroDevisionError:
rating_avg = 0
bar_chart.labels = labels
bar_chart.data = data
bar_chart.rate_avg = rating_avg
return bar_chart.render()
def _process_multiselect_type(self, question: Question) -> str:
bar_chart = ChartBar(chart_id=f"barchart_{question.id}", chart_name=question.label)
labels = question.choices.split(",")
str_value = []
for answer in Answer.objects.filter(question=question):
str_value.append(answer.value)
all_value = ",".join(str_value)
data_value = all_value.split(",")
data = []
for label in labels:
clean_label = label.strip().replace(' ', '_').lower()
data.append(data_value.count(clean_label))
bar_chart.labels = labels
bar_chart.data = data
return bar_chart.render()
def generate(self):
html_str = []
for question in self.survey.questions.all():
if question.type_field == TYPE_FIELD.radio or question.type_field == TYPE_FIELD.select:
html_str.append(self._process_radio_type(question))
elif question.type_field == TYPE_FIELD.multi_select:
html_str.append(self._process_multiselect_type(question))
elif question.type_field == TYPE_FIELD.rating:
html_str.append(self._process_rating_type(question))
if not html_str:
return """
<div class="bg-yellow-100 space-y-1 py-5 rounded-md border border-yellow-200 text-center shadow-xs mb-2">
<h1 class="text-2xl font-semibold">Have't summary</h1>
<h5 class="mb-0 mt-1 text-sm p-2">Summary just calculate type field "radio, select, multi_select, rating"</h5>
</div>
"""
return " ".join(html_str)
| 28.429167 | 114 | 0.597098 |
794195ad453e2a64801477d103e7c94551cd6269 | 853 | py | Python | tutorials/load_saved_model/tutorial_two.py | wangsd01/blackbox_mpc | 7876dee1bd85bde310e88741f5c63e3f7bd93916 | [
"MIT"
] | 29 | 2020-10-20T08:14:45.000Z | 2022-02-01T13:43:13.000Z | tutorials/load_saved_model/tutorial_two.py | wangsd01/blackbox_mpc | 7876dee1bd85bde310e88741f5c63e3f7bd93916 | [
"MIT"
] | 3 | 2020-11-27T13:25:08.000Z | 2021-12-12T04:30:41.000Z | tutorials/load_saved_model/tutorial_two.py | wangsd01/blackbox_mpc | 7876dee1bd85bde310e88741f5c63e3f7bd93916 | [
"MIT"
] | 3 | 2021-04-15T14:23:41.000Z | 2022-03-28T05:43:29.000Z | """
- instantiate an env for a pendulum
- instantiate an MPC by loading a previosuluy saved dynamics model.
- render the result.
"""
from blackbox_mpc.policies.mpc_policy import \
MPCPolicy
from blackbox_mpc.utils.pendulum import pendulum_reward_function
import gym
env = gym.make("Pendulum-v0")
mpc_policy = MPCPolicy(reward_function=pendulum_reward_function,
env_action_space=env.action_space,
env_observation_space=env.observation_space,
optimizer_name='CEM',
saved_model_dir='./saved_model',
num_agents=1)
current_obs = env.reset()
for t in range(200):
action_to_execute, expected_obs, expected_reward = mpc_policy.act(
current_obs, t)
current_obs, reward, _, info = env.step(action_to_execute)
env.render()
| 34.12 | 70 | 0.675264 |
79419790576fe9a11a74643d3848fdff5969b798 | 895 | py | Python | paml/lib/wait.py | palibhasataolamang/paml | f3f2c113f0925e42557820416199bd6eaaeb091c | [
"MIT"
] | null | null | null | paml/lib/wait.py | palibhasataolamang/paml | f3f2c113f0925e42557820416199bd6eaaeb091c | [
"MIT"
] | null | null | null | paml/lib/wait.py | palibhasataolamang/paml | f3f2c113f0925e42557820416199bd6eaaeb091c | [
"MIT"
] | null | null | null | import sbol3
import paml
#############################################
# Set up the document
doc = sbol3.Document()
LIBRARY_NAME = 'wait'
sbol3.set_namespace('https://bioprotocols.org/paml/primitives/'+LIBRARY_NAME)
#############################################
# Create the primitives
print('Making primitives for '+LIBRARY_NAME)
p = paml.Primitive('WaitForTime')
p.description = 'Waits for a set amount of time.'
p.add_input('amount', sbol3.OM_MEASURE)
doc.add(p)
p = paml.Primitive('WaitForTrue')
p.description = 'Waits for an expression to be true.'
p.add_input('expression', 'http://www.w3.org/2001/XMLSchema#boolean')
doc.add(p)
print('Library construction complete')
print('Validating library')
for e in doc.validate().errors: print(e);
for w in doc.validate().warnings: print(w);
filename = LIBRARY_NAME+'.ttl'
doc.write(filename,'turtle')
print('Library written as '+filename)
| 25.571429 | 77 | 0.669274 |
79419891b3571a08b249daf88c81de1b891026bc | 4,003 | py | Python | alipay/aop/api/request/AlipayMarketingRecruitEnrollCreateRequest.py | antopen/alipay-sdk-python-all | 8e51c54409b9452f8d46c7bb10eea7c8f7e8d30c | [
"Apache-2.0"
] | 213 | 2018-08-27T16:49:32.000Z | 2021-12-29T04:34:12.000Z | alipay/aop/api/request/AlipayMarketingRecruitEnrollCreateRequest.py | antopen/alipay-sdk-python-all | 8e51c54409b9452f8d46c7bb10eea7c8f7e8d30c | [
"Apache-2.0"
] | 29 | 2018-09-29T06:43:00.000Z | 2021-09-02T03:27:32.000Z | alipay/aop/api/request/AlipayMarketingRecruitEnrollCreateRequest.py | antopen/alipay-sdk-python-all | 8e51c54409b9452f8d46c7bb10eea7c8f7e8d30c | [
"Apache-2.0"
] | 59 | 2018-08-27T16:59:26.000Z | 2022-03-25T10:08:15.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.FileItem import FileItem
from alipay.aop.api.constant.ParamConstants import *
from alipay.aop.api.domain.AlipayMarketingRecruitEnrollCreateModel import AlipayMarketingRecruitEnrollCreateModel
class AlipayMarketingRecruitEnrollCreateRequest(object):
def __init__(self, biz_model=None):
self._biz_model = biz_model
self._biz_content = None
self._version = "1.0"
self._terminal_type = None
self._terminal_info = None
self._prod_code = None
self._notify_url = None
self._return_url = None
self._udf_params = None
self._need_encrypt = False
@property
def biz_model(self):
return self._biz_model
@biz_model.setter
def biz_model(self, value):
self._biz_model = value
@property
def biz_content(self):
return self._biz_content
@biz_content.setter
def biz_content(self, value):
if isinstance(value, AlipayMarketingRecruitEnrollCreateModel):
self._biz_content = value
else:
self._biz_content = AlipayMarketingRecruitEnrollCreateModel.from_alipay_dict(value)
@property
def version(self):
return self._version
@version.setter
def version(self, value):
self._version = value
@property
def terminal_type(self):
return self._terminal_type
@terminal_type.setter
def terminal_type(self, value):
self._terminal_type = value
@property
def terminal_info(self):
return self._terminal_info
@terminal_info.setter
def terminal_info(self, value):
self._terminal_info = value
@property
def prod_code(self):
return self._prod_code
@prod_code.setter
def prod_code(self, value):
self._prod_code = value
@property
def notify_url(self):
return self._notify_url
@notify_url.setter
def notify_url(self, value):
self._notify_url = value
@property
def return_url(self):
return self._return_url
@return_url.setter
def return_url(self, value):
self._return_url = value
@property
def udf_params(self):
return self._udf_params
@udf_params.setter
def udf_params(self, value):
if not isinstance(value, dict):
return
self._udf_params = value
@property
def need_encrypt(self):
return self._need_encrypt
@need_encrypt.setter
def need_encrypt(self, value):
self._need_encrypt = value
def add_other_text_param(self, key, value):
if not self.udf_params:
self.udf_params = dict()
self.udf_params[key] = value
def get_params(self):
params = dict()
params[P_METHOD] = 'alipay.marketing.recruit.enroll.create'
params[P_VERSION] = self.version
if self.biz_model:
params[P_BIZ_CONTENT] = json.dumps(obj=self.biz_model.to_alipay_dict(), ensure_ascii=False, sort_keys=True, separators=(',', ':'))
if self.biz_content:
if hasattr(self.biz_content, 'to_alipay_dict'):
params['biz_content'] = json.dumps(obj=self.biz_content.to_alipay_dict(), ensure_ascii=False, sort_keys=True, separators=(',', ':'))
else:
params['biz_content'] = self.biz_content
if self.terminal_type:
params['terminal_type'] = self.terminal_type
if self.terminal_info:
params['terminal_info'] = self.terminal_info
if self.prod_code:
params['prod_code'] = self.prod_code
if self.notify_url:
params['notify_url'] = self.notify_url
if self.return_url:
params['return_url'] = self.return_url
if self.udf_params:
params.update(self.udf_params)
return params
def get_multipart_params(self):
multipart_params = dict()
return multipart_params
| 27.606897 | 148 | 0.647514 |
794198d2b5e9614307f9d29fabd371dbfd531ef7 | 6,868 | py | Python | doc/source/conf.py | josephwinston/TileDB | 40ea8ba8354e1a8e5af904dfa641468643bee949 | [
"MIT"
] | 1 | 2020-10-21T08:14:42.000Z | 2020-10-21T08:14:42.000Z | doc/source/conf.py | georgeSkoumas/TileDB | 3b9f88002ef245afb93b041ba3c60188893f34a3 | [
"MIT"
] | null | null | null | doc/source/conf.py | georgeSkoumas/TileDB | 3b9f88002ef245afb93b041ba3c60188893f34a3 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# TileDB documentation build configuration file, created by
# sphinx-quickstart on Mon Feb 5 10:08:37 2018.
#
# -- Imports configuration -------------------------------------------------
import os
import subprocess
import sys
from os.path import abspath, join, dirname
sys.path.insert(0, abspath(join(dirname(__file__))))
# -- ReadTheDocs configuration ---------------------------------------------
# Special handling on ReadTheDocs builds.
# Some of this code is from https://github.com/robotpy/robotpy-docs/blob/master/conf.py
readthedocs = os.environ.get('READTHEDOCS', None) == 'True'
rtd_version = os.environ.get('READTHEDOCS_VERSION', 'latest')
rtd_version = rtd_version if rtd_version in ['stable', 'latest'] else 'stable'
# On RTD, build the Doxygen XML files.
if readthedocs:
# Build docs
subprocess.check_call('''
mkdir ../../build;
cd ../../build;
../bootstrap;
make doc;
''', shell=True)
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.intersphinx',
'sphinxcontrib.contentui',
'breathe'
]
if readthedocs:
# Mapping for linking between RTD subprojects.
intersphinx_mapping = {
'tiledb': ('https://tiledb-inc-tiledb.readthedocs-hosted.com/en/%s/' % rtd_version, None),
'tiledb-py': ('https://tiledb-inc-tiledb.readthedocs-hosted.com/projects/python-api/en/%s/' % rtd_version, None)
}
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'TileDB'
copyright = '2020 TileDB, Inc'
author = 'TileDB, Inc.'
# The short X.Y version.
version = '2.0'
# The full version, including alpha/beta/rc tags.
release = '2.0.0'
# Breathe extension configuration.
tiledb_dir = '../../'
doxygen_xml_dir = tiledb_dir + 'build/xml/'
breathe_projects = {'TileDB-C': doxygen_xml_dir, 'TileDB-C++': doxygen_xml_dir}
breathe_default_project = 'TileDB-C'
breathe_projects_source = {
'TileDB-C': (tiledb_dir + 'tiledb/sm/c_api/', ['tiledb.h']),
'TileDB-C++': (tiledb_dir + 'tiledb/sm/cpp_api/', ['tiledb'])
}
breathe_domain_by_file_pattern = {
'*/c_api/tiledb.h': 'c',
'*/cpp_api/tiledb': 'cpp'
}
# Allow parsing TILEDB_DEPRECATED in C++ function signatures.
cpp_id_attributes = ['TILEDB_DEPRECATED']
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = []
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'friendly'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
html_static_path = ['_static']
html_logo = '_static/[email protected]'
html_favicon = '_static/favicon.ico'
if readthedocs:
html_theme = 'default'
else:
import sphinx_rtd_theme
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'TileDBdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'TileDB.tex', 'TileDB Documentation',
'TileDB, Inc.', 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'tiledb', 'TileDB Documentation',
[author], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'TileDB', 'TileDB Documentation',
author, 'TileDB', 'One line description of project.',
'Miscellaneous'),
]
# -- Custom Document processing ----------------------------------------------
# Generate the sidebar automatically so that it is identical across all subprojects.
# This (and gensidebar.py) from https://github.com/robotpy/robotpy-docs
import gensidebar
gensidebar.generate_sidebar({'on_rtd': readthedocs, 'rtd_version': rtd_version}, 'tiledb')
# Replace C/C++ source examples path
def replaceText(app, docname, source):
result = source[0]
for key in app.config.text_replacements:
result = result.replace(key, app.config.text_replacements[key])
source[0] = result
text_replacements = {
"{source_examples_path}" : "../../examples",
"{tiledb_src_root_url}" : "https://github.com/TileDB-Inc/TileDB/blob/dev",
"{tiledb_py_src_root_url}" : "https://github.com/TileDB-Inc/TileDB-Py/blob/dev",
"{tiledb_R_src_root_url}" : "https://github.com/TileDB-Inc/TileDB-R/blob/master",
"{tiledb_go_src_root_url}" : "https://github.com/TileDB-Inc/TileDB-Go/blob/master"
}
# -- Custom setup -----------------------------------------------------------
def add_custom_js(app):
app.add_javascript('custom.js')
def setup(app):
app.add_config_value('text_replacements', {}, True)
app.connect('source-read', replaceText)
app.add_stylesheet('custom.css')
# Use this event so that our custom JS gets included *after* the ContentUI
# extension adds its JS, otherwise we can't override its behavior.
app.connect('builder-inited', add_custom_js)
| 31.504587 | 120 | 0.656814 |
79419c6c06be5a9c87a9d0d4b892c5a7dc6bb7f9 | 3,837 | py | Python | bsp/stm32/stm32f429-armfly-v6/rtconfig.py | Tigerots/rt-thread | c95987e75772ebc6a6fdd0c19d12001aef810cb2 | [
"Apache-2.0"
] | 3 | 2021-01-27T09:03:43.000Z | 2021-04-25T18:20:32.000Z | bsp/stm32/stm32f429-armfly-v6/rtconfig.py | zlzerg/rt-thread | c0a400ccbee720fc0e9ee904298f09bd07a21382 | [
"Apache-2.0"
] | 2 | 2019-09-23T01:59:04.000Z | 2019-12-06T01:24:37.000Z | bsp/stm32/stm32f429-armfly-v6/rtconfig.py | zlzerg/rt-thread | c0a400ccbee720fc0e9ee904298f09bd07a21382 | [
"Apache-2.0"
] | 7 | 2019-07-01T02:50:47.000Z | 2020-12-11T10:01:07.000Z | import os
# toolchains options
ARCH='arm'
CPU='cortex-m4'
CROSS_TOOL='gcc'
# bsp lib config
BSP_LIBRARY_TYPE = None
if os.getenv('RTT_CC'):
CROSS_TOOL = os.getenv('RTT_CC')
if os.getenv('RTT_ROOT'):
RTT_ROOT = os.getenv('RTT_ROOT')
# cross_tool provides the cross compiler
# EXEC_PATH is the compiler execute path, for example, CodeSourcery, Keil MDK, IAR
if CROSS_TOOL == 'gcc':
PLATFORM = 'gcc'
EXEC_PATH = r'C:\Users\XXYYZZ'
elif CROSS_TOOL == 'keil':
PLATFORM = 'armcc'
EXEC_PATH = r'C:/Keil_v5'
elif CROSS_TOOL == 'iar':
PLATFORM = 'iar'
EXEC_PATH = r'C:/Program Files (x86)/IAR Systems/Embedded Workbench 8.0'
if os.getenv('RTT_EXEC_PATH'):
EXEC_PATH = os.getenv('RTT_EXEC_PATH')
BUILD = 'debug'
if PLATFORM == 'gcc':
# toolchains
PREFIX = 'arm-none-eabi-'
CC = PREFIX + 'gcc'
AS = PREFIX + 'gcc'
CXX = PREFIX + 'g++'
AR = PREFIX + 'ar'
LINK = PREFIX + 'gcc'
TARGET_EXT = 'elf'
SIZE = PREFIX + 'size'
OBJDUMP = PREFIX + 'objdump'
OBJCPY = PREFIX + 'objcopy'
DEVICE = ' -mcpu=cortex-m4 -mthumb -mfpu=fpv4-sp-d16 -mfloat-abi=hard -ffunction-sections -fdata-sections'
CFLAGS = DEVICE + ' -Dgcc'
AFLAGS = ' -c' + DEVICE + ' -x assembler-with-cpp -Wa,-mimplicit-it=thumb '
LFLAGS = DEVICE + ' -Wl,--gc-sections,-Map=rt-thread.map,-cref,-u,Reset_Handler -T board/linker_scripts/link.lds'
CPATH = ''
LPATH = ''
if BUILD == 'debug':
CFLAGS += ' -O0 -gdwarf-2 -g'
AFLAGS += ' -gdwarf-2'
else:
CFLAGS += ' -O2'
CXXFLAGS = CFLAGS
POST_ACTION = OBJCPY + ' -O binary $TARGET rtthread.bin\n' + SIZE + ' $TARGET \n'
elif PLATFORM == 'armcc':
# toolchains
CC = 'armcc'
CXX = 'armcc'
AS = 'armasm'
AR = 'armar'
LINK = 'armlink'
TARGET_EXT = 'axf'
DEVICE = ' --cpu Cortex-M4.fp '
CFLAGS = '-c ' + DEVICE + ' --apcs=interwork --c99'
AFLAGS = DEVICE + ' --apcs=interwork '
LFLAGS = DEVICE + ' --scatter "board\linker_scripts\link.sct" --info sizes --info totals --info unused --info veneers --list rt-thread.map --strict'
CFLAGS += ' -I' + EXEC_PATH + '/ARM/ARMCC/include'
LFLAGS += ' --libpath=' + EXEC_PATH + '/ARM/ARMCC/lib'
CFLAGS += ' -D__MICROLIB '
AFLAGS += ' --pd "__MICROLIB SETA 1" '
LFLAGS += ' --library_type=microlib '
EXEC_PATH += '/ARM/ARMCC/bin/'
if BUILD == 'debug':
CFLAGS += ' -g -O0'
AFLAGS += ' -g'
else:
CFLAGS += ' -O2'
CXXFLAGS = CFLAGS
CFLAGS += ' -std=c99'
POST_ACTION = 'fromelf --bin $TARGET --output rtthread.bin \nfromelf -z $TARGET'
elif PLATFORM == 'iar':
# toolchains
CC = 'iccarm'
CXX = 'iccarm'
AS = 'iasmarm'
AR = 'iarchive'
LINK = 'ilinkarm'
TARGET_EXT = 'out'
DEVICE = '-Dewarm'
CFLAGS = DEVICE
CFLAGS += ' --diag_suppress Pa050'
CFLAGS += ' --no_cse'
CFLAGS += ' --no_unroll'
CFLAGS += ' --no_inline'
CFLAGS += ' --no_code_motion'
CFLAGS += ' --no_tbaa'
CFLAGS += ' --no_clustering'
CFLAGS += ' --no_scheduling'
CFLAGS += ' --endian=little'
CFLAGS += ' --cpu=Cortex-M4'
CFLAGS += ' -e'
CFLAGS += ' --fpu=VFPv4_sp'
CFLAGS += ' --dlib_config "' + EXEC_PATH + '/arm/INC/c/DLib_Config_Normal.h"'
CFLAGS += ' --silent'
AFLAGS = DEVICE
AFLAGS += ' -s+'
AFLAGS += ' -w+'
AFLAGS += ' -r'
AFLAGS += ' --cpu Cortex-M4'
AFLAGS += ' --fpu VFPv4_sp'
AFLAGS += ' -S'
if BUILD == 'debug':
CFLAGS += ' --debug'
CFLAGS += ' -On'
else:
CFLAGS += ' -Oh'
LFLAGS = ' --config "board/linker_scripts/link.icf"'
LFLAGS += ' --entry __iar_program_start'
CXXFLAGS = CFLAGS
EXEC_PATH = EXEC_PATH + '/arm/bin/'
POST_ACTION = 'ielftool --bin $TARGET rtthread.bin'
| 26.462069 | 152 | 0.572583 |
79419db959e4c2008138b1e4cf10be3492ddf832 | 1,713 | py | Python | tests/write_for_externals.py | hlibbabii/log-recommender | 03c975da4029676acb2c29f5915e30b2b29fce6c | [
"MIT"
] | 2 | 2019-04-02T13:46:55.000Z | 2022-03-18T02:33:51.000Z | tests/write_for_externals.py | hlibbabii/log-recommender | 03c975da4029676acb2c29f5915e30b2b29fce6c | [
"MIT"
] | 119 | 2018-09-10T13:45:41.000Z | 2022-03-11T23:55:07.000Z | tests/write_for_externals.py | hlibbabii/log-recommender | 03c975da4029676acb2c29f5915e30b2b29fce6c | [
"MIT"
] | 1 | 2019-04-02T14:29:24.000Z | 2019-04-02T14:29:24.000Z | from logrec.dataprep.preprocessors import apply_preprocessors
from logrec.dataprep.preprocessors.preprocessor_list import pp_params
from logrec.dataprep.preprocessors.general import from_file
__author__ = 'hlib'
import unittest
class PreprocessTest(unittest.TestCase):
@unittest.skip("outdated") # TODO update!!!
def test_process_full_identifiers(self):
inp = [
'public class WR3223Activator implements BundleActivator { // 6_89',
' private static Logger logger = LoggerFactory.getLogger(WR3223Activator.class);',
' @Override 0 1 2',
' public void start(BundleContext context) throws Exception {',
' logger.debug("WR3223 Binding has been started.");',
' }',
'',
' @Override',
' public void stop(BundleContext context) throws Exception {',
' /* context = null;',
' */ logger.debug("WR3223 Binding has been stopped 5677788888 .");'
]
expected = '''public class wr3223activator implements bundle <identifiersep> activator { <comment> \n \t1 private static logger logger = logger <identifiersep> factory . get <identifiersep> logger ( wr3223activator . class ) ; \n \t1 @ override 0 1 <number_literal> \n \t1 public void start ( bundle <identifiersep> context context ) throws exception { \n \t2 logger . debug ( <string_literal> ) ; \n \t1 } \n \n \t1 @ override \n \t1 public void stop ( bundle <identifiersep> context context ) throws exception { \n \t1 <comment> \t1 logger . debug ( <string_literal> ) ; \n <ect>'''
actual = apply_preprocessors(from_file(inp), pp_params["preprocessors"], {'interesting_context_words': []})
self.assertEqual(repr(expected)[1:-1] + "\n", actual)
if __name__ == '__main__':
unittest.main()
| 47.583333 | 592 | 0.705196 |
79419ec116e9b4ea4d4a076b505fc930917979be | 1,524 | py | Python | fbpmp/pid/service/credential_service/session_profile_cloud_credential_service.py | benliugithub/fbpcs | 7af984264428058645847135026d474d7e28144e | [
"MIT"
] | null | null | null | fbpmp/pid/service/credential_service/session_profile_cloud_credential_service.py | benliugithub/fbpcs | 7af984264428058645847135026d474d7e28144e | [
"MIT"
] | null | null | null | fbpmp/pid/service/credential_service/session_profile_cloud_credential_service.py | benliugithub/fbpcs | 7af984264428058645847135026d474d7e28144e | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import ast
import os
import pathlib
import subprocess
import sys
from typing import Dict
from fbpmp.pid.service.credential_service.cloud_credential_service import CloudCredentialService
class SessionProfileCloudCredentialService(CloudCredentialService):
def __init__(self, arn: str, session_name: str, profile: str):
self.arn = arn
self.session_name = session_name
self.profile = profile
def get_creds(self) -> Dict[str, str]:
cmd = ['aws', 'sts', 'assume-role', '--role-arn', self.arn, '--role-session-name', self.session_name, '--profile', self.profile]
operating_dir = pathlib.Path(os.getcwd())
proc = subprocess.Popen(
cmd, cwd=operating_dir, stdout=subprocess.PIPE, stderr=sys.stderr
)
out, err = proc.communicate()
if proc.returncode != 0:
raise Exception(f"Getting credentials with command {cmd} failed with return code {proc.returncode}")
credentials = ast.literal_eval(out.decode("utf-8"))["Credentials"]
creds = {
"AWS_ACCESS_KEY_ID": credentials["AccessKeyId"],
"AWS_SECRET_ACCESS_KEY": credentials["SecretAccessKey"],
}
if "SessionToken" in credentials:
creds["AWS_SESSION_TOKEN"] = credentials["SessionToken"]
return creds
| 35.44186 | 136 | 0.679134 |
79419ecdbcfd8d0fd1eaa8237e1290aa3a1e6610 | 5,279 | py | Python | hphp/hack/test/hh_codesynthesis/cross_verify.py | leikahing/hhvm | 26617c88ca35c5e078c3aef12c061d7996925375 | [
"PHP-3.01",
"Zend-2.0"
] | null | null | null | hphp/hack/test/hh_codesynthesis/cross_verify.py | leikahing/hhvm | 26617c88ca35c5e078c3aef12c061d7996925375 | [
"PHP-3.01",
"Zend-2.0"
] | null | null | null | hphp/hack/test/hh_codesynthesis/cross_verify.py | leikahing/hhvm | 26617c88ca35c5e078c3aef12c061d7996925375 | [
"PHP-3.01",
"Zend-2.0"
] | null | null | null | #!/usr/bin/env/python3
# pyre-strict
import argparse
import glob
import os
import subprocess
import tempfile
from typing import Dict, List, Set
class DependencyEdges(object):
"""A simple internal representation to categorize DependencyEdges"""
edge_types = ["Extends", "Type", "Method", "SMethod", "Fun"]
def __init__(self, lines: List[str]) -> None:
super(DependencyEdges, self).__init__()
self.objs: Dict[str, List[str]] = dict.fromkeys(self.edge_types, [])
self.category_edges(lines)
def category_edges(self, lines: List[str]) -> None:
for line in lines:
# Required input formatting to get "Extends A -> Type B, Type C, Type D".
# And split get
# lhs = "Extends A"
# rhs = "Type B, Type C, Type D"
# Skip the empty line at the end of the file.
if not line:
continue
result = line.strip().split("->")
(lhs, rhs) = result
lhs = lhs.split()
# The lhs length must be 2.
if len(lhs) != 2:
raise RuntimeError("Unexpected lhs.")
# T94428437 Temporary skipping all built-in functions for now.
if lhs[1].startswith("HH\\"):
continue
if lhs[0] in self.edge_types:
self.objs[lhs[0]].append(line)
def __le__(self, obj: "DependencyEdges") -> bool:
for edges in self.edge_types:
compare_result(set(self.objs[edges]), set(obj.objs[edges]))
return True
def compare_result(lhs: Set[str], rhs: Set[str]) -> None:
if not lhs.issubset(rhs):
RuntimeError("Unmatched lhs and rhs, expected lhs be a subset of rhs.")
def invoke_sub_process(cmd: List[str], std_in: str) -> str:
try:
output = subprocess.check_output(
cmd,
stderr=None,
cwd=".",
universal_newlines=True,
input=std_in,
timeout=60.0,
errors="replace",
)
except subprocess.TimeoutExpired as e:
output = "Timed out. " + str(e.output)
except subprocess.CalledProcessError as e:
# we don't care about nonzero exit codes... for instance, type
# errors cause hh_single_type_check to produce them
output = str(e.output)
return output
# Cross verify the hh_codesynthesis binary produce the Hack code has identical
# dependency graph as the sample Hack code.
def cross_verify(args: argparse.Namespace, file_name: str) -> bool:
# 0. Skip unsupported cases for now. (ToDo: T92593014)
if os.path.exists(file_name + ".skip_synthesis_tests"):
return True
# 1. Invoke hh_simple_type_checker to produce a dependency graph on sample.
with tempfile.NamedTemporaryFile(mode="w") as fp:
tmp_file_name = fp.name
cmd = [
args.typechecker,
file_name,
"--dump-deps",
"--no-builtins",
]
dep_graph = invoke_sub_process(cmd, "")
# 2. Invoke hh_codesynthesis binary to produce a Hack code given dep_graph.
cmd = [
args.synthesis,
"--target_lang=hack",
f"--output_file={tmp_file_name}",
]
invoke_sub_process(cmd, dep_graph)
# 3. Invoke hh_simple_type_checker on
cmd = [
args.typechecker,
f"{tmp_file_name}",
"--dump-deps",
"--no-builtins",
]
dep_output = invoke_sub_process(cmd, "")
# 4. Compare the dep_graph with dep_output.
dep_graph = dep_graph.replace(",\n", ",").split("\n")
original_extends_edges = DependencyEdges(dep_graph)
dep_output = dep_output.replace(",\n", ",").split("\n")
generate_extends_edges = DependencyEdges(dep_output)
return original_extends_edges <= generate_extends_edges
# Cross verify the hh_codesynthesis binary produce the Hack code has identical
# dependency graph as the sample parameters.
def cross_verify_with_parameters(args: argparse.Namespace) -> bool:
with tempfile.NamedTemporaryFile(mode="w") as fp:
tmp_file_name = fp.name
# 0. Invoke hh_codesynthesis binary to produce a Hack code from parameters.
cmd = [
args.synthesis,
"--target_lang=hack",
"--n=12",
"--avg_width=3",
"--min_classes=3",
"--min_interfaces=4",
"--lower_bound=1",
"--higher_bound=5",
"--min_depth=0",
f"--output_file={tmp_file_name}",
]
invoke_sub_process(cmd, "")
# 1. Reuse cross_verify using tmp_file_name as input.
return cross_verify(args, tmp_file_name)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("test_path", help="A file or a directory. ")
parser.add_argument("--typechecker", type=os.path.abspath)
parser.add_argument("--synthesis", type=os.path.abspath)
args: argparse.Namespace = parser.parse_args()
# Cross verify synthesized from given raph.
for file_name in glob.glob(args.test_path + "/*.php"):
cross_verify(args, file_name)
# Cross verify synthesized from parameters.
cross_verify_with_parameters(args)
| 33.411392 | 85 | 0.606744 |
79419ee58c41cb26176b5cac8c566f179301ee34 | 4,945 | py | Python | lib/flask_api/tests/test_renderers.py | imtiaz-emu/gcp-flask-test | 096f466242aa14941712ab8ea06ac4fb4eaeb993 | [
"Apache-2.0"
] | null | null | null | lib/flask_api/tests/test_renderers.py | imtiaz-emu/gcp-flask-test | 096f466242aa14941712ab8ea06ac4fb4eaeb993 | [
"Apache-2.0"
] | null | null | null | lib/flask_api/tests/test_renderers.py | imtiaz-emu/gcp-flask-test | 096f466242aa14941712ab8ea06ac4fb4eaeb993 | [
"Apache-2.0"
] | null | null | null | # coding: utf8
from __future__ import unicode_literals
from flask_api import renderers, status, FlaskAPI
from flask_api.decorators import set_renderers
from flask_api.mediatypes import MediaType
import unittest
class RendererTests(unittest.TestCase):
def test_render_json(self):
renderer = renderers.JSONRenderer()
content = renderer.render({'example': 'example'}, MediaType('application/json'))
expected = '{"example": "example"}'
self.assertEqual(content, expected)
def test_render_json_with_indent(self):
renderer = renderers.JSONRenderer()
content = renderer.render({'example': 'example'}, MediaType('application/json; indent=4'))
expected = '{\n "example": "example"\n}'
self.assertEqual(content, expected)
def test_render_browsable_encoding(self):
app = FlaskAPI(__name__)
@app.route('/_love', methods=['GET'])
def love():
return {"test": "I <3 Python"}
with app.test_client() as client:
response = client.get('/_love',
headers={"Accept": "text/html"})
html = str(response.get_data())
self.assertTrue('I <3 Python' in html)
self.assertTrue('<h1>Love</h1>' in html)
self.assertTrue('/_love' in html)
def test_render_browsable_linking(self):
app = FlaskAPI(__name__)
@app.route('/_happiness', methods=['GET'])
def happiness():
return {"url": "http://example.org",
"a tag": "<br />"}
with app.test_client() as client:
response = client.get('/_happiness',
headers={"Accept": "text/html"})
html = str(response.get_data())
self.assertTrue('<a href="http://example.org">http://example.org</a>' in html)
self.assertTrue('<br />'in html)
self.assertTrue('<h1>Happiness</h1>' in html)
self.assertTrue('/_happiness' in html)
def test_renderer_negotiation_not_implemented(self):
renderer = renderers.BaseRenderer()
with self.assertRaises(NotImplementedError) as context:
renderer.render(None, None)
msg = str(context.exception)
expected = '`render()` method must be implemented for class "BaseRenderer"'
self.assertEqual(msg, expected)
class OverrideParserSettings(unittest.TestCase):
def setUp(self):
class CustomRenderer1(renderers.BaseRenderer):
media_type = 'application/example1'
def render(self, data, media_type, **options):
return 'custom renderer 1'
class CustomRenderer2(renderers.BaseRenderer):
media_type = 'application/example2'
def render(self, data, media_type, **options):
return 'custom renderer 2'
app = FlaskAPI(__name__)
app.config['DEFAULT_RENDERERS'] = [CustomRenderer1]
app.config['PROPAGATE_EXCEPTIONS'] = True
@app.route('/custom_renderer_1/', methods=['GET'])
def custom_renderer_1():
return {'data': 'example'}
@app.route('/custom_renderer_2/', methods=['GET'])
@set_renderers([CustomRenderer2])
def custom_renderer_2():
return {'data': 'example'}
@app.route('/custom_renderer_2_as_args/', methods=['GET'])
@set_renderers(CustomRenderer2)
def custom_renderer_2_as_args():
return {'data': 'example'}
self.app = app
def test_overridden_parsers_with_settings(self):
with self.app.test_client() as client:
response = client.get('/custom_renderer_1/')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.headers['Content-Type'], 'application/example1')
data = response.get_data().decode('utf8')
self.assertEqual(data, "custom renderer 1")
def test_overridden_parsers_with_decorator(self):
with self.app.test_client() as client:
data = {'example': 'example'}
response = client.get('/custom_renderer_2/', data=data)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.headers['Content-Type'], 'application/example2')
data = response.get_data().decode('utf8')
self.assertEqual(data, "custom renderer 2")
def test_overridden_parsers_with_decorator_as_args(self):
with self.app.test_client() as client:
data = {'example': 'example'}
response = client.get('/custom_renderer_2_as_args/', data=data)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.headers['Content-Type'], 'application/example2')
data = response.get_data().decode('utf8')
self.assertEqual(data, "custom renderer 2")
| 40.532787 | 98 | 0.621436 |
79419f02caaf1ddb43da666e199368f7b8855af7 | 1,135 | py | Python | blog/2019-09-29/main.py | wrn14897/myBlog | 003db7c050456bf9690847a98fb513f453a2debb | [
"MIT"
] | null | null | null | blog/2019-09-29/main.py | wrn14897/myBlog | 003db7c050456bf9690847a98fb513f453a2debb | [
"MIT"
] | 9 | 2020-03-05T07:29:04.000Z | 2022-02-27T22:34:44.000Z | blog/2019-09-29/main.py | wrn14897/myBlog | 003db7c050456bf9690847a98fb513f453a2debb | [
"MIT"
] | null | null | null | import sys
import argparse
from requests_futures.sessions import FuturesSession
def pre_response_hook(n):
def response_hook(resp, *args, **kwargs):
print('Got response from %sth request' % n)
return response_hook
def main():
parser = argparse.ArgumentParser(description='Extremely simple DoS program')
parser.add_argument('--workers', '-w', default=1, help='number of workers')
parser.add_argument('--url', '-u', help='target URL')
parser.add_argument('--number', '-n', default=1, help='number of requests')
args = parser.parse_args()
if args.url:
num_of_workers = int(args.workers)
num_of_requests = int(args.number)
url = args.url
print('Number of workers %d' % num_of_workers)
print('Number of requests %d' % num_of_requests)
print('Target URL: %s' % url)
session = FuturesSession(max_workers=num_of_workers)
print('Attacking...')
responses = (session.get('https://chartmetric.com', hooks={
'response': pre_response_hook(i+1)
}) for i in range(num_of_requests))
for response in responses:
response.result()
if __name__ == '__main__':
main()
| 32.428571 | 78 | 0.689868 |
79419f1f0594ed58873af99a204a00242fe89850 | 2,854 | py | Python | Challenge2/geru_quotes/geru_quotes/views/quotes.py | zeciola/geru_challege | f4bb64e7c640bd707b1dc08239b68730358ed7c7 | [
"MIT"
] | null | null | null | Challenge2/geru_quotes/geru_quotes/views/quotes.py | zeciola/geru_challege | f4bb64e7c640bd707b1dc08239b68730358ed7c7 | [
"MIT"
] | 3 | 2019-12-26T17:31:51.000Z | 2022-03-21T22:17:41.000Z | Challenge2/geru_quotes/geru_quotes/views/quotes.py | zeciola/geru_challege | f4bb64e7c640bd707b1dc08239b68730358ed7c7 | [
"MIT"
] | null | null | null | from dataclasses import dataclass
from datetime import datetime
from geru_quotes.models import UserSession
from geru_quotes.utils.quotes_api_lib import Quote
from pyramid.request import Request
from pyramid.view import view_config, view_defaults
from random import randrange
from requests import Session
@view_config(route_name='home', renderer='../templates/mytemplate.jinja2')
def my_view(request):
return {'author': 'José Ricardo Ciola Bricio'}
@view_defaults(renderer='templates/quotes.jinja2')
@dataclass
class QuotesViews:
request: Request
def register_session(self, request: Request, session: Session):
user_session = UserSession()
user_session.session_date = datetime.now()
user_session.session_path = session.request.path
user_session.session_browser = request.user_agent
user_session.session_timestamp = session.created
user_session.session_pdtb_id = session.request.pdtb_id
request.dbsession.add(user_session)
request.dbsession.flush()
@view_config(route_name='quotes')
def show_quotes(self) -> dict:
try:
q = Quote()
session = self.request.session
session['origin_session'] = str(datetime.now())
self.register_session(self.request, session)
return q.get_quotes()
except Exception as e:
return dict(error=e)
@view_config(route_name='quotes_number')
def show_quote(self) -> dict:
try:
q = Quote()
number = self.request.matchdict['number']
session = self.request.session
session['origin_session'] = str(datetime.now())
self.register_session(self.request, session)
return q.get_quote(number)
except Exception as e:
return dict(error=e)
@view_config(route_name='quotes_random')
def show_quote_random(self) -> dict:
try:
q = Quote()
data = q.get_quotes()
number = randrange(0, len(data['quotes']))
session = self.request.session
session['origin_session'] = str(datetime.now())
self.register_session(self.request, session)
return q.get_quote(number)
except Exception as e:
return dict(error=e)
@view_defaults(renderer='templates/quote.jinja2')
@dataclass
class QuoteView:
request: str
@view_config(route_name='quote')
def show_quote(self) -> dict:
try:
q = Quote()
number = self.request.matchdict['number']
session = self.request.session
session['origin_session'] = str(datetime.now())
self.register_session(self.request, session)
return q.get_quote(number)
except Exception as e:
return dict(error=e)
| 26.425926 | 74 | 0.642957 |
7941a088092384124b8e8df8eb7605f4c748ca5a | 1,113 | py | Python | account/forms.py | irzaip/digiwakaf.id | 4b9a476f3079cdef0733e24da212d5d2c314fe7f | [
"MIT"
] | 1 | 2020-11-21T04:25:04.000Z | 2020-11-21T04:25:04.000Z | account/forms.py | irzaip/digiwakaf.id | 4b9a476f3079cdef0733e24da212d5d2c314fe7f | [
"MIT"
] | null | null | null | account/forms.py | irzaip/digiwakaf.id | 4b9a476f3079cdef0733e24da212d5d2c314fe7f | [
"MIT"
] | null | null | null | from django.forms import ModelForm
from django.contrib.auth.forms import UserCreationForm
from django.contrib.auth.models import User
from .models import Customer, Asset
from django import forms
from crispy_forms.helper import FormHelper
from crispy_forms.layout import Submit
class CreateUserForm(UserCreationForm):
class Meta:
model = User
fields = [ 'first_name','last_name','username', 'email', 'password1', 'password2']
labels = { 'first_name' : "Nama depan",
'email': 'Alamat Surel',
'last_name': 'Nama Belakang',
'username': 'Username',
'password2': 'Konfirmasi Password'}
help_texts = {'first_name': "Jangan gitu yaa", 'password1': ""}
class AssetForm(forms.ModelForm):
class Meta:
model = Asset
fields = '__all__'
exclude = ['downloadable', 'status']
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.helper = FormHelper()
self.helper.form_method = 'post'
self.helper.add_input(Submit('submit', 'Save person'))
| 31.8 | 90 | 0.639712 |
7941a0b188d37649d5fcccddaa019ac4d29d00c2 | 1,249 | py | Python | test/test_result_top_files.py | Atomicology/isilon_sdk_python | 91039da803ae37ed4abf8d2a3f59c333f3ef1866 | [
"MIT"
] | null | null | null | test/test_result_top_files.py | Atomicology/isilon_sdk_python | 91039da803ae37ed4abf8d2a3f59c333f3ef1866 | [
"MIT"
] | null | null | null | test/test_result_top_files.py | Atomicology/isilon_sdk_python | 91039da803ae37ed4abf8d2a3f59c333f3ef1866 | [
"MIT"
] | null | null | null | # coding: utf-8
"""
Copyright 2016 SmartBear Software
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
ref: https://github.com/swagger-api/swagger-codegen
"""
from __future__ import absolute_import
import os
import sys
import unittest
import swagger_client
from swagger_client.rest import ApiException
from swagger_client.models.result_top_files import ResultTopFiles
class TestResultTopFiles(unittest.TestCase):
""" ResultTopFiles unit test stubs """
def setUp(self):
pass
def tearDown(self):
pass
def testResultTopFiles(self):
"""
Test ResultTopFiles
"""
model = swagger_client.models.result_top_files.ResultTopFiles()
if __name__ == '__main__':
unittest.main() | 25.489796 | 75 | 0.730184 |
7941a20ecf7c680b17902328f0e15c4f7e189abc | 1,181 | py | Python | Task2E.py | mb2403/IA_computing_112 | eaa82ae79091b5a3df695878e3d77b2edf13dee9 | [
"MIT"
] | null | null | null | Task2E.py | mb2403/IA_computing_112 | eaa82ae79091b5a3df695878e3d77b2edf13dee9 | [
"MIT"
] | null | null | null | Task2E.py | mb2403/IA_computing_112 | eaa82ae79091b5a3df695878e3d77b2edf13dee9 | [
"MIT"
] | 1 | 2022-01-20T16:53:57.000Z | 2022-01-20T16:53:57.000Z | # Max Bowler/Lewis Clark Jan/Feb 2022
# Task 2E
#
#%%
import datetime
from floodsystem.datafetcher import fetch_measure_levels
from floodsystem.analysis import polyfit
from floodsystem.plot import plot_water_levels
from floodsystem.stationdata import build_station_list, update_water_levels
from floodsystem.flood import stations_highest_rel_level
import numpy as np
import matplotlib.pyplot as plt
def run():
stations = build_station_list()
update_water_levels(stations)
highest_relative_level_stations = stations_highest_rel_level(stations, 6) #chooses the 5 stations with the highest relative level
for item in highest_relative_level_stations:
station = item[0]
dates, levels = fetch_measure_levels(station.measure_id, dt=datetime.timedelta(days=2)) #fetches dates and levels using datafectcher
#print (levels)
#print (dates) #make sure that it prints exactly 5!!
plot_water_levels(station, dates, levels)
if __name__ == "__main__":
print("*** Task 2E: CUED Part IA Flood Warning System ***")
run() | 38.096774 | 142 | 0.69348 |
7941a273f5e7cf5c2e73d5d5a71e2680916f0456 | 1,451 | py | Python | python3/plesk_api_client.py | JhonathanH/api-plesk | 865ae38a87b20a556cca8f4eea03cc0244acca91 | [
"Apache-2.0"
] | 75 | 2015-01-06T13:29:43.000Z | 2021-11-21T19:57:07.000Z | python3/plesk_api_client.py | JhonathanH/api-plesk | 865ae38a87b20a556cca8f4eea03cc0244acca91 | [
"Apache-2.0"
] | 4 | 2015-06-01T06:53:19.000Z | 2020-03-18T13:27:36.000Z | python3/plesk_api_client.py | JhonathanH/api-plesk | 865ae38a87b20a556cca8f4eea03cc0244acca91 | [
"Apache-2.0"
] | 61 | 2015-01-22T16:20:33.000Z | 2021-08-10T11:55:59.000Z | # Copyright 1999-2016. Parallels IP Holdings GmbH. All Rights Reserved.
import http.client
import ssl
class PleskApiClient:
def __init__(self, host, port = 8443, protocol = 'https', ssl_unverified = False):
self.host = host
self.port = port
self.protocol = protocol
self.secret_key = None
self.ssl_unverified = ssl_unverified
def set_credentials(self, login, password):
self.login = login
self.password = password
def set_secret_key(self, secret_key):
self.secret_key = secret_key
def request(self, request):
headers = {}
headers["Content-type"] = "text/xml"
headers["HTTP_PRETTY_PRINT"] = "TRUE"
if self.secret_key:
headers["KEY"] = self.secret_key
else:
headers["HTTP_AUTH_LOGIN"] = self.login
headers["HTTP_AUTH_PASSWD"] = self.password
if 'https' == self.protocol:
if self.ssl_unverified:
conn = http.client.HTTPSConnection(self.host, self.port, context=ssl._create_unverified_context())
else:
conn = http.client.HTTPSConnection(self.host, self.port)
else:
conn = http.client.HTTPConnection(self.host, self.port)
conn.request("POST", "/enterprise/control/agent.php", request, headers)
response = conn.getresponse()
data = response.read()
return data.decode("utf-8")
| 32.244444 | 114 | 0.619573 |
7941a29531246e3ae14311d065277d6a6b5dd9f5 | 307 | py | Python | Image_Processing/webcam.py | VDHARV/self-driving-car-rasberrypi | 45e70ce97f00471d38aca87dc94dbae032823f24 | [
"MIT"
] | null | null | null | Image_Processing/webcam.py | VDHARV/self-driving-car-rasberrypi | 45e70ce97f00471d38aca87dc94dbae032823f24 | [
"MIT"
] | null | null | null | Image_Processing/webcam.py | VDHARV/self-driving-car-rasberrypi | 45e70ce97f00471d38aca87dc94dbae032823f24 | [
"MIT"
] | null | null | null | import cv2
cap = cv2.VideoCapture(0)
def getImg(display = False, size = [480, 240]):
_, img = cap.read()
img = cv2.resize(img, (size[0], size[1]))
if display:
cv2.imshow("Img", img)
return img
if __name__ == "__main__":
while True:
img = getImg(True)
| 19.1875 | 47 | 0.547231 |
7941a4034be8a6ac009c5b4caf27cfa9b641e5bc | 10,775 | py | Python | back-end/threats_monitoring/phishtank_summer.py | tzamalisp/saint-open-source-tool-for-cyberthreats-monitoring | c30e6da5358555d06413541b6d3893c62a475368 | [
"MIT"
] | null | null | null | back-end/threats_monitoring/phishtank_summer.py | tzamalisp/saint-open-source-tool-for-cyberthreats-monitoring | c30e6da5358555d06413541b6d3893c62a475368 | [
"MIT"
] | null | null | null | back-end/threats_monitoring/phishtank_summer.py | tzamalisp/saint-open-source-tool-for-cyberthreats-monitoring | c30e6da5358555d06413541b6d3893c62a475368 | [
"MIT"
] | null | null | null | import time
from datetime import datetime
import itertools
from pymongo import MongoClient
from downloader import Downloader
from descriptive_analysis import DescriptiveAnalysis
from export_collection_data import ExportCollectionData
# scraping libraries
from bs4 import BeautifulSoup
from lxml.html import fromstring
ts = datetime.utcnow().timestamp()
valueDt = datetime.fromtimestamp(ts)
dateTimeMongo = valueDt.strftime('%Y-%m-%d %H:%M:%S')
dateTimeMongoUTC = datetime.utcnow()
# -------------------------------------------------------------------------------------------------------------------- #
def connect_to_mongodb():
# connect to database
connection = MongoClient('XXX.XXX.XXX.XXX', 27017)
db = connection.admin
db.authenticate('xxxxxx', 'xxxXXXxxxXX')
return db
# -------------------------------------------------------------------------------------------------------------------- #
def getLinks(html):
""" This function downloads a web page and discovers all it's links
@param
html (string) webpage 's html code
@return
htmlLinks (list) contains all the discovers links
"""
soup = BeautifulSoup(html, 'lxml')
possible_links = soup.find_all('a')
htmlLinks = []
for link in possible_links:
if link.has_attr('href'):
if link.attrs['href'].startswith("phish_detail.php?phish_id="):
linkDefault = 'https://www.phishtank.com/'
linkConnect = '{}{}'.format(linkDefault, link.attrs['href'])
htmlLinks.append(linkConnect)
#print(link.attrs['href'])
return htmlLinks
# -------------------------------------------------------------------------------------------------------------------- #
def getContentItemID(url, downloader):
""" This function receives the url of a specific phish page (ID), then finds and returns it's http host link
@param
url (str) url page of a phish attack
@return
contentFromList (str) the host page of phish attack
"""
html = downloader(url)
# parse html
soup = BeautifulSoup(html, 'lxml')
# studying the page revealed that the phish url is in bold
valuesList = soup.find_all('b')
if valuesList[1] != '':
contentFromList = valuesList[1].text
else:
contentFromList = None
return contentFromList
# -------------------------------------------------------------------------------------------------------------------- #
def crawl_site(db, url, max_errors=5):
""" This is the main function for crawling Phishtank website
@param
db
url
max_errors
@return:
"""
num_errors = 0
listBadLinks = []
seenListBadLinks = set(listBadLinks)
idIDs = []
seenidIDs = set(idIDs)
dictlistMongo = []
downloader = Downloader(delay=1, user_agent='giorgos93', cache={})
for page in itertools.count(0):
pg_url = url.format(page)
# prepare Downloader object
# download a page
html = downloader(pg_url)
# parse page
soup = BeautifulSoup(html, 'lxml')
td = soup.find_all(attrs={'class': 'value'})
if not td:
print('no tables!')
num_errors += 1
if num_errors == max_errors:
# reached max number of errors, so exit
break
elif html is None:
num_errors += 1
if num_errors == max_errors:
# reached max number of errors, so exit
break
else:
num_errors = 0
# success - can scrape the result
# CSS Selectors - XPATH
print()
print('Content:')
listGetContentIDs = getLinks(html)
# print('List with ID links')
# print(listGetContentIDs)
print()
for itemList in listGetContentIDs:
if itemList not in seenListBadLinks:
seenListBadLinks.add(itemList)
listBadLinks.append(getContentItemID(itemList, downloader))
tree = fromstring(html)
td = tree.cssselect('td.value')
counter = 0
for i in range(0, len(td), 5):
# id
id = td[i].text_content()
id = int(id)
idCheck = db.threats.phishtank.find_one({"_id": id})
# print(idCheck)
if idCheck is None:
print('Not duplicate key found! Continue..')
else:
print('Duplicate key value found:', idCheck['_id'])
# datetime
date = td[i + 1].text_content()
date2 = date.split()
del date2[0:2]
dataClean = date2[1]
s1 = []
for s in dataClean:
if s.isdigit():
s1.append(s)
dataClean2 = ''.join(s1)
date2[1] = dataClean2
dataCleanFinal = ' '.join(date2)
print(dataCleanFinal)
try:
# valid_date = time.strptime(date, '%m/%d/%Y')
datetimeObjectUTC = datetime.strptime(dataCleanFinal, '%b %d %Y %I:%M %p')
print('datetimeObjectUTC:', datetimeObjectUTC)
except ValueError:
print('Invalid date!')
# datetimeObjectUTC = datetime.strptime(dataCleanFinal, '%b %d %Y %I:%M %p')
datetimeUTC = datetimeObjectUTC.strftime('%Y-%m-%d %H:%M:%S')
print('datetimeUTC:', datetimeUTC)
timestampUTC = int(time.mktime(datetimeObjectUTC.timetuple()))
# CTI datetime
datetimeObjectUTCCTI = dateTimeMongoUTC
datetimeUTCCTI = datetimeObjectUTCCTI.strftime('%Y-%m-%d %H:%M:%S')
# CTI timestamp
timestampUTCCTI = ts
# author
submitted = td[i + 2].text_content()
if submitted != '':
try:
submitted2 = submitted.split()
submittedFinal = submitted2[1]
except IndexError as e:
print("Error in crawl_site(), submittedFinal: ", e)
submittedFinal = None
else:
submittedFinal = None
# valid
valid = td[i + 3].text_content()
if valid == '':
valid = None
# online
online = td[i + 4].text_content()
if online == '':
online = None
# print(datetimeUTC)
dictionary = {}
if id not in seenidIDs:
dictionary['_id'] = id
dictionary['URL'] = listBadLinks[counter]
dictionary['Submitted-by'] = submittedFinal
dictionary['Valid'] = valid
dictionary['Online'] = online
dictionary['TimestampUTC'] = timestampUTC
dictionary["mongoDate"] = datetimeObjectUTC
dictionary['DatetimeUTC'] = datetimeUTC
dictionary['TimestampUTC-CTI'] = timestampUTCCTI
dictionary['mongoDate-CTI'] = datetimeObjectUTCCTI
dictionary['DatetimeUTC-CTI'] = datetimeUTCCTI
dictionary['Entity-type'] = 'URL'
dictionary['Category'] = 'Phishing'
seenidIDs.add(id)
idIDs.append(id)
dictlistMongo.append(dictionary)
counter += 1
# TODO: delete time.sleep()
# time.sleep(0.5)
print('Length of list IDs:', len(seenidIDs))
print('Length of Mongo Dictionary:', len(dictlistMongo))
print('next')
print()
print()
# time.sleep(0.5)
# drop collection if not empty
if db.threats.phishtank.count() != 0:
db.threats.phishtank.drop()
print('Database reset')
else:
print('Database is empty! Starting storing data..')
for dictionaryMongo in dictlistMongo:
print(dictionaryMongo)
# handle to web based attacks (1) collection
# phishtank = db.threats.phishtank
db.threats.phishtank.insert(dictionaryMongo)
print()
print("Data inserted successfully to MongoDB")
print()
print()
print('The End!')
# ---------------------------------------------------------------------------------------------------------------------#
if __name__ == "__main__":
print("Report on", datetime.utcnow().strftime('%Y-%m-%d %H:%M:%S'), '\n')
# parsing {0} to the string for iteration
indicator = 'https://www.phishtank.com/phish_search.php?page={0}&active=y&verified=u'
# ''' Crawl Phishtank'''
db = connect_to_mongodb()
crawl_site(db, indicator)
""" Set Path Of Data Exportation """
# to run on server
server_path = '/var/www/html/saint/indicators2018/phishing/'
# to run locally
local_path = ""
""" Descriptive Analysis and Result Exportation"""
analysis = DescriptiveAnalysis(collection=db.threats.phishtank, path=server_path)
analysis(query={}, projection={"_id": 0})
# returns a pandas data frame
data_frame = analysis.time_series_analysis('mongoDate')
# store analysis results
analysis.data_frame_to_csv(data_frame, "perdayTimeSeriesPhishingCurrentInstance")
analysis.data_frame_to_json(data_frame, "perdayTimeSeriesPhishingCurrentInstance")
''' Export current MongoDB collection instance '''
# exploitDataBase = ExportCollectionData(collection=db.threats.phishtank, path=server_path)
# exploitDataBase(query={}, projection={"_id": 0, "mongoDate": 0, "mongoDate-CTI": 0})
# exploitDataBase.export_collection_to_json("dataset-phishing-current-instance")
#
# csv_header = ["URL", "Submitted-by", "Valid", "Online", "DatetimeUTC", "TimestampUTC",
# "DatetimeUTC-CTI", "TimestampUTC-CTI", "Entity-type", "Category"]
# exploitDataBase.export_collection_to_csv("dataset-phishing-current-instance", csv_header)
| 35.212418 | 121 | 0.507749 |
7941a5e2b49846a8fceb352fb67332866b029456 | 2,905 | py | Python | airflow/contrib/operators/file_to_gcs.py | shuva10v/airflow | a6daeb544e815fe350a96d24ae3bb14aee4079a7 | [
"Apache-2.0"
] | 3 | 2019-10-03T21:38:59.000Z | 2019-10-04T00:39:03.000Z | airflow/contrib/operators/file_to_gcs.py | shuva10v/airflow | a6daeb544e815fe350a96d24ae3bb14aee4079a7 | [
"Apache-2.0"
] | 7 | 2019-03-27T07:58:14.000Z | 2020-02-12T17:42:33.000Z | airflow/contrib/operators/file_to_gcs.py | upjohnc/airflow-upjohn-k8s | caadbc1618d73e054de99138b0892cea3a9327c4 | [
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | 5 | 2017-06-19T19:55:47.000Z | 2020-10-10T00:49:20.000Z | # -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
from airflow.contrib.hooks.gcs_hook import GoogleCloudStorageHook
from airflow.models import BaseOperator
from airflow.utils.decorators import apply_defaults
class FileToGoogleCloudStorageOperator(BaseOperator):
"""
Uploads a file to Google Cloud Storage.
Optionally can compress the file for upload.
:param src: Path to the local file. (templated)
:type src: str
:param dst: Destination path within the specified bucket. (templated)
:type dst: str
:param bucket: The bucket to upload to. (templated)
:type bucket: str
:param google_cloud_storage_conn_id: The Airflow connection ID to upload with
:type google_cloud_storage_conn_id: str
:param mime_type: The mime-type string
:type mime_type: str
:param delegate_to: The account to impersonate, if any
:type delegate_to: str
:param gzip: Allows for file to be compressed and uploaded as gzip
:type gzip: bool
"""
template_fields = ('src', 'dst', 'bucket')
@apply_defaults
def __init__(self,
src,
dst,
bucket,
google_cloud_storage_conn_id='google_cloud_default',
mime_type='application/octet-stream',
delegate_to=None,
gzip=False,
*args,
**kwargs):
super().__init__(*args, **kwargs)
self.src = src
self.dst = dst
self.bucket = bucket
self.google_cloud_storage_conn_id = google_cloud_storage_conn_id
self.mime_type = mime_type
self.delegate_to = delegate_to
self.gzip = gzip
def execute(self, context):
"""
Uploads the file to Google cloud storage
"""
hook = GoogleCloudStorageHook(
google_cloud_storage_conn_id=self.google_cloud_storage_conn_id,
delegate_to=self.delegate_to)
hook.upload(
bucket_name=self.bucket,
object_name=self.dst,
mime_type=self.mime_type,
filename=self.src,
gzip=self.gzip,
)
| 35 | 81 | 0.66747 |
7941a6f6deced3c613dbbc7d9731acd582d448c1 | 2,503 | py | Python | main.py | eduardorezaghi/jogo-da-velha-fatec | 6068c4c9e55a120b90397ecdba78e3d3764e285b | [
"MIT"
] | null | null | null | main.py | eduardorezaghi/jogo-da-velha-fatec | 6068c4c9e55a120b90397ecdba78e3d3764e285b | [
"MIT"
] | null | null | null | main.py | eduardorezaghi/jogo-da-velha-fatec | 6068c4c9e55a120b90397ecdba78e3d3764e285b | [
"MIT"
] | null | null | null | tabuleiro = [' ', ' ', ' ',
' ', ' ', ' ',
' ', ' ', ' '
]
def coordenada_esta_no_limite(c, texto):
while (c < 0) or (c > 2):
c = int(input('Digite o valor da '+ str(texto) + ': ' ))
return c
def entrada_do_usuario(): # Esta função representa a entrada do usuário.
i = int(input('Digite o valor da linha: '))
i = coordenada_esta_no_limite(i, 'linha')
j = int(input('Digite o valor da coluna: '))
j = coordenada_esta_no_limite(j, 'coluna')
return [i, j]
def fazer_jogada(posicao, tabuleiro): # Esta função representa a jogada do usuário.
if posicao == [0,0]:
posicao = 0
if posicao == [0,1]:
posicao = 1
if posicao == [0,2]:
posicao = 2
if posicao == [1,0]:
posicao = 3
if posicao == [1,1]:
posicao = 4
if posicao == [1,2]:
posicao = 5
if posicao == [2,0]:
posicao = 6
if posicao == [2,1]:
posicao = 7
if posicao == [2,2]:
posicao = 8
tabuleiro[posicao] = "X"
return tabuleiro
def mostrar_gui(tabuleiro): # Essa função mostra o tabuleiro para o usuário realizar as jogadas
print(' ' + ' | '.join(tabuleiro[0:3]))
print('+'.join(['---', '---', '---']))
print(' ' + ' | '.join(tabuleiro[3:6]))
print('+'.join(['---', '---', '---']))
print(' ' + ' | '.join(tabuleiro[6:9]))
def detectar_resultado (tabuleiro): # Essa função representa a detecção responsável por determinar o resultado da partida.
#Linhas Verticais
for marcador in range(0,3,1):
if tabuleiro[marcador] == tabuleiro[marcador+3]==tabuleiro[marcador+6]:
if tabuleiro[marcador] == 'X' or tabuleiro[marcador] == 'O':
return tabuleiro[marcador]
#Linhas Horizontais
for horizontal in range(0,9,3):
if tabuleiro[horizontal] == tabuleiro[horizontal+1]==tabuleiro[horizontal+2]:
if tabuleiro[marcador] == 'X' or tabuleiro[marcador] == 'O':
return tabuleiro[marcador]
#Linhas Diagonais
if tabuleiro[0] == tabuleiro[4] == tabuleiro[8]:
if tabuleiro[marcador] == 'X' or tabuleiro[marcador] == 'O':
return tabuleiro[marcador]
if tabuleiro[2] == tabuleiro[4] == tabuleiro[6]:
if tabuleiro[marcador] == 'X' or tabuleiro[marcador] == 'O':
return tabuleiro[marcador]
return False
# Esta etapa determina as jogadas que serão feitas ao longo do jogo atual.
fim_de_jogo = False
while fim_de_jogo == False:
jogada = entrada_do_usuario()
tabuleiro = fazer_jogada(jogada, tabuleiro)
fim_de_jogo = detectar_resultado (tabuleiro)
mostrar_gui(tabuleiro)
print("Jogador " + fim_de_jogo + " ganhou.") | 27.811111 | 124 | 0.63324 |
7941a7a7cebe0697e0855f63e2064087cd758fc8 | 700 | py | Python | python/image_moderation_batch_aksk_demo.py | MeekoI/ais-sdk | 76240abc49795e914988f3cafb6d08f60dbdcb4c | [
"Apache-2.0"
] | null | null | null | python/image_moderation_batch_aksk_demo.py | MeekoI/ais-sdk | 76240abc49795e914988f3cafb6d08f60dbdcb4c | [
"Apache-2.0"
] | null | null | null | python/image_moderation_batch_aksk_demo.py | MeekoI/ais-sdk | 76240abc49795e914988f3cafb6d08f60dbdcb4c | [
"Apache-2.0"
] | null | null | null | # -*- coding:utf-8 -*-
from ais_sdk.image_moderation_batch import image_content_batch_aksk
from ais_sdk.utils import init_global_env
if __name__ == '__main__':
#
# access moderation image of batch jobs,post data by ak,sk
#
app_key = '*************'
app_secret = '************'
init_global_env(region='cn-north-1')
demo_data_url1 = 'https://ais-sample-data.obs.cn-north-1.myhuaweicloud.com/terrorism.jpg'
demo_data_url2 = 'https://ais-sample-data.obs.cn-north-1.myhuaweicloud.com/antiporn.jpg'
# call interface use the url
result = image_content_batch_aksk(app_key, app_secret, [demo_data_url1, demo_data_url2], ['politics', 'terrorism'],0)
print result
| 36.842105 | 121 | 0.695714 |
7941a8963b1ab448f1899f6e79af1c1227f0a562 | 1,705 | py | Python | mayan/apps/dynamic_search/api_views.py | garrans/mayan-edms | e95e90cc47447a1ae72629271652824aa9868572 | [
"Apache-2.0"
] | 1 | 2020-07-15T02:56:02.000Z | 2020-07-15T02:56:02.000Z | mayan/apps/dynamic_search/api_views.py | kyper999/mayan-edms | ca7b8301a1f68548e8e718d42a728a500d67286e | [
"Apache-2.0"
] | null | null | null | mayan/apps/dynamic_search/api_views.py | kyper999/mayan-edms | ca7b8301a1f68548e8e718d42a728a500d67286e | [
"Apache-2.0"
] | 2 | 2020-02-24T21:02:31.000Z | 2021-01-05T23:52:01.000Z | from __future__ import unicode_literals
from rest_framework import generics
from rest_framework.exceptions import ParseError
from rest_api.filters import MayanObjectPermissionsFilter
from .classes import SearchModel
from .filters import RecentSearchUserFilter
from .models import RecentSearch
from .serializers import RecentSearchSerializer
class APIRecentSearchListView(generics.ListAPIView):
"""
Returns a list of all the recent searches for the logged user.
"""
filter_backends = (RecentSearchUserFilter,)
queryset = RecentSearch.objects.all()
serializer_class = RecentSearchSerializer
class APIRecentSearchView(generics.RetrieveAPIView):
"""
Returns the selected recent search details.
"""
filter_backends = (RecentSearchUserFilter,)
queryset = RecentSearch.objects.all()
serializer_class = RecentSearchSerializer
class APISearchView(generics.ListAPIView):
"""
Perform a search operaton
q -- Term that will be used for the search.
"""
filter_backends = (MayanObjectPermissionsFilter,)
def get_queryset(self):
search_class = self.get_search_class()
if search_class.permission:
self.mayan_object_permissions = {'GET': (search_class.permission,)}
try:
queryset, ids, timedelta = search_class.search(
query_string=self.request.GET, user=self.request.user
)
except Exception as exception:
raise ParseError(unicode(exception))
return queryset
def get_search_class(self):
return SearchModel.get('documents.Document')
def get_serializer_class(self):
return self.get_search_class().serializer
| 27.95082 | 79 | 0.72434 |
7941a931c9c3e300e63733ef472a965cc488d5fe | 5,862 | py | Python | sdk/python/pulumi_azure_native/botservice/list_bot_connection_with_secrets.py | sebtelko/pulumi-azure-native | 711ec021b5c73da05611c56c8a35adb0ce3244e4 | [
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_azure_native/botservice/list_bot_connection_with_secrets.py | sebtelko/pulumi-azure-native | 711ec021b5c73da05611c56c8a35adb0ce3244e4 | [
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_azure_native/botservice/list_bot_connection_with_secrets.py | sebtelko/pulumi-azure-native | 711ec021b5c73da05611c56c8a35adb0ce3244e4 | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
__all__ = [
'ListBotConnectionWithSecretsResult',
'AwaitableListBotConnectionWithSecretsResult',
'list_bot_connection_with_secrets',
]
@pulumi.output_type
class ListBotConnectionWithSecretsResult:
"""
Bot channel resource definition
"""
def __init__(__self__, etag=None, id=None, kind=None, location=None, name=None, properties=None, sku=None, tags=None, type=None):
if etag and not isinstance(etag, str):
raise TypeError("Expected argument 'etag' to be a str")
pulumi.set(__self__, "etag", etag)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if kind and not isinstance(kind, str):
raise TypeError("Expected argument 'kind' to be a str")
pulumi.set(__self__, "kind", kind)
if location and not isinstance(location, str):
raise TypeError("Expected argument 'location' to be a str")
pulumi.set(__self__, "location", location)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if properties and not isinstance(properties, dict):
raise TypeError("Expected argument 'properties' to be a dict")
pulumi.set(__self__, "properties", properties)
if sku and not isinstance(sku, dict):
raise TypeError("Expected argument 'sku' to be a dict")
pulumi.set(__self__, "sku", sku)
if tags and not isinstance(tags, dict):
raise TypeError("Expected argument 'tags' to be a dict")
pulumi.set(__self__, "tags", tags)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def etag(self) -> Optional[str]:
"""
Entity Tag
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter
def id(self) -> str:
"""
Specifies the resource ID.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def kind(self) -> Optional[str]:
"""
Required. Gets or sets the Kind of the resource.
"""
return pulumi.get(self, "kind")
@property
@pulumi.getter
def location(self) -> Optional[str]:
"""
Specifies the location of the resource.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> str:
"""
Specifies the name of the resource.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def properties(self) -> 'outputs.ConnectionSettingPropertiesResponse':
"""
The set of properties specific to bot channel resource
"""
return pulumi.get(self, "properties")
@property
@pulumi.getter
def sku(self) -> Optional['outputs.SkuResponse']:
"""
Gets or sets the SKU of the resource.
"""
return pulumi.get(self, "sku")
@property
@pulumi.getter
def tags(self) -> Optional[Mapping[str, str]]:
"""
Contains resource tags defined as key/value pairs.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> str:
"""
Specifies the type of the resource.
"""
return pulumi.get(self, "type")
class AwaitableListBotConnectionWithSecretsResult(ListBotConnectionWithSecretsResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return ListBotConnectionWithSecretsResult(
etag=self.etag,
id=self.id,
kind=self.kind,
location=self.location,
name=self.name,
properties=self.properties,
sku=self.sku,
tags=self.tags,
type=self.type)
def list_bot_connection_with_secrets(connection_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
resource_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableListBotConnectionWithSecretsResult:
"""
Bot channel resource definition
API Version: 2021-03-01.
:param str connection_name: The name of the Bot Service Connection Setting resource.
:param str resource_group_name: The name of the Bot resource group in the user subscription.
:param str resource_name: The name of the Bot resource.
"""
__args__ = dict()
__args__['connectionName'] = connection_name
__args__['resourceGroupName'] = resource_group_name
__args__['resourceName'] = resource_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-native:botservice:listBotConnectionWithSecrets', __args__, opts=opts, typ=ListBotConnectionWithSecretsResult).value
return AwaitableListBotConnectionWithSecretsResult(
etag=__ret__.etag,
id=__ret__.id,
kind=__ret__.kind,
location=__ret__.location,
name=__ret__.name,
properties=__ret__.properties,
sku=__ret__.sku,
tags=__ret__.tags,
type=__ret__.type)
| 33.497143 | 158 | 0.623166 |
7941a978116e90e542d0d8ea22dfb4dbb7514103 | 3,118 | py | Python | src/primaires/scripting/commandes/scripting/alerte_liste.py | stormi/tsunami | bdc853229834b52b2ee8ed54a3161a1a3133d926 | [
"BSD-3-Clause"
] | null | null | null | src/primaires/scripting/commandes/scripting/alerte_liste.py | stormi/tsunami | bdc853229834b52b2ee8ed54a3161a1a3133d926 | [
"BSD-3-Clause"
] | null | null | null | src/primaires/scripting/commandes/scripting/alerte_liste.py | stormi/tsunami | bdc853229834b52b2ee8ed54a3161a1a3133d926 | [
"BSD-3-Clause"
] | null | null | null | # -*-coding:Utf-8 -*
# Copyright (c) 2010 LE GOFF Vincent
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
# OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Package contenant la commande 'scripting alerte liste'."""
from primaires.interpreteur.masque.parametre import Parametre
from primaires.format.fonctions import echapper_accolades
class PrmListe(Parametre):
"""Commande 'scripting alerte liste'"""
def __init__(self):
"""Constructeur du paramètre."""
Parametre.__init__(self, "liste", "list")
self.aide_courte = "affiche la liste des alertes"
self.aide_longue = \
"Affiche la liste des alertes existantes. Notez " \
"que les alertes, lues ou non, sont affichées. " \
"Si une erreur est corrigée, utilisez le paramètre " \
"%scripting:alerte:resoudre%."
def interpreter(self, personnage, dic_masques):
"""Méthode d'interprétation de commande"""
alertes = []
for alerte in sorted(importeur.scripting.alertes.values(),
key=lambda alerte: alerte.no):
message = alerte.message
if len(message) > 30:
message = message[:30] + "..."
message = echapper_accolades(message)
msg = str(alerte.no).rjust(3) + " "
msg += alerte.objet + "[" + alerte.evenement + "]"
msg += " " + str(alerte.date.date())
msg += " " + message
alertes.append(msg)
if alertes:
personnage << "Liste des alertes non résolues :\n\n " + \
"\n ".join(alertes)
else:
personnage << "|att|Aucune alerte non résolue n'est conservée.|ff|"
| 43.915493 | 79 | 0.680564 |
7941a9e9dcc0a9dcf2e6950ae2d1b157894063ce | 591 | py | Python | domain/preprocessing/preprocessor.py | nazarimilad/open-intelligence-backend | 9b38db0f52ce78abeb83601f05d1987ad6a2b0e8 | [
"MIT"
] | 39 | 2020-07-01T03:00:49.000Z | 2022-02-17T07:39:49.000Z | domain/preprocessing/preprocessor.py | nazarimilad/open-intelligence-backend | 9b38db0f52ce78abeb83601f05d1987ad6a2b0e8 | [
"MIT"
] | 2 | 2020-08-12T04:31:10.000Z | 2020-09-09T14:27:27.000Z | domain/preprocessing/preprocessor.py | nazarimilad/open-intelligence-backend | 9b38db0f52ce78abeb83601f05d1987ad6a2b0e8 | [
"MIT"
] | 5 | 2020-11-16T09:45:16.000Z | 2022-02-17T07:39:51.000Z | import cv2
import numpy as np
def _add_padding(bordersize, image):
row, col = image.shape[:2]
bottom = image[row-2:row, 0:col]
mean = cv2.mean(bottom)[0]
return cv2.copyMakeBorder(
image,
top=bordersize,
bottom=bordersize,
left=bordersize,
right=bordersize,
borderType=cv2.BORDER_CONSTANT,
value=[mean, mean, mean]
)
def preprocess(image_path, write_directory):
image = cv2.imread(image_path)
image = _add_padding(bordersize=300, image=image)
cv2.imwrite(write_directory + "/preprocessed.png", image)
| 26.863636 | 61 | 0.658206 |
7941a9ea3dfaa17a98e91aee37b2af429e186f7b | 1,044 | py | Python | opmodaq/opmodaq/devices/plot.py | MenloSystems/opMoDAQ | 8e35eeb028ae8001fdbebeeeb95a15b98f0fd4ba | [
"MIT"
] | null | null | null | opmodaq/opmodaq/devices/plot.py | MenloSystems/opMoDAQ | 8e35eeb028ae8001fdbebeeeb95a15b98f0fd4ba | [
"MIT"
] | null | null | null | opmodaq/opmodaq/devices/plot.py | MenloSystems/opMoDAQ | 8e35eeb028ae8001fdbebeeeb95a15b98f0fd4ba | [
"MIT"
] | null | null | null | # Windows DAQ device interface
import opmodaq.device as dev
import opmodaq.parameters as params
import opmodaq.generators as gen
import matplotlib.pyplot as plt
class PlotDeviceChannel(dev.Channel):
def __init__(self, device, channel_idx):
dev.Channel.__init__(self, device, channel_idx)
def analog_out(self,
generator: gen.SignalGenerator,
sample_range: range):
x_values = []
for x in sample_range:
x_values.append(x)
y_values = []
for y in generator.samples(self.device.sampling, sample_range):
y_values.append(y)
self.device.plot_obj.plot(x_values, y_values)
class PlotDevice(dev.Device):
def __init__(self, sampling: params.Sampling, plot_obj = plt):
self.plot_obj = plot_obj
self.sampling = sampling
dev.Device.__init__(self, board_num = 0, dev_handle = plot_obj)
def channel(self, channel_idx):
return PlotDeviceChannel(self, channel_idx)
| 26.1 | 71 | 0.648467 |
7941aa5af5462edbfca3d4335f4726f121ab8295 | 10,354 | py | Python | openrave/sandbox/mintime/MintimeProblemGeneric.py | jdsika/TUM_HOly | a2ac55fa1751a3a8038cf61d29b95005f36d6264 | [
"MIT"
] | 2 | 2015-11-13T16:40:57.000Z | 2017-09-15T15:37:19.000Z | openrave/sandbox/mintime/MintimeProblemGeneric.py | jdsika/holy | a2ac55fa1751a3a8038cf61d29b95005f36d6264 | [
"MIT"
] | 1 | 2016-06-13T01:29:51.000Z | 2016-06-14T00:38:27.000Z | openrave/sandbox/mintime/MintimeProblemGeneric.py | jdsika/holy | a2ac55fa1751a3a8038cf61d29b95005f36d6264 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright (C) 2012 Quang-Cuong Pham <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
General class of time-optimal path parameterization problems
All new problem classes must be derived from this class
For examples, see MintimeProblemManip.py and MintimeProblemZMP.py
"""
from numpy import *
from pylab import *
import bisect
class MintimeProblemGeneric():
def __init__(self,robot,traj):
self.robot=robot
self.traj=traj
self.dim=traj.dim
self.t_step=traj.t_step
self.n_steps=traj.n_steps
self.duration=traj.duration
self.t_vect=traj.t_vect
self.q_vect=traj.q_vect
self.qd_vect=traj.qd_vect
self.qdd_vect=traj.qdd_vect
self.isset_dynamics_limits=False
self.isset_velocity_limits=False
def preprocess(self):
"""Preprocess, must be called before running the ProfileIntegrator"""
# Sample the dynamics
self.sample_dynamics()
# Compute the max velocity curve caused by accelerations limits
if(self.isset_dynamics_limits):
self.compute_maxvel_accel_curve()
self.maxvel_curve=array(self.maxvel_accel_curve)
else:
raise NameError('Second order (torques, zmp,...) limits are required')
# Compute the max velocity curve caused by velocity limits
if(self.isset_velocity_limits):
self.compute_maxvel_velocity_curve()
for i in range(self.n_steps):
self.maxvel_curve[i]=min(self.maxvel_accel_curve[i],self.maxvel_velocity_curve[i])
# Compute the switch points
self.find_tangent_disc_points()
self.find_zero_inertia_points()
self.merge_switch_points_lists()
############################ Velocity limits ##############################
def set_velocity_limits(self,limits):
self.qd_max=limits
self.isset_velocity_limits=True
def compute_maxvel_velocity_curve(self):
"""Compute the max velocity curve caused by velocity limits"""
qd_max=self.qd_max
n_steps=self.n_steps
self.maxvel_velocity_curve=ones(n_steps)*1e5
for i in range(n_steps):
qd=self.qd_vect[:,i]
for j in range(self.dim):
self.maxvel_velocity_curve[i]=min(self.maxvel_velocity_curve[i],qd_max[j]/abs(qd[j]))
def compute_maxvel_accel_curve(self):
"""Compute the max velocity curve caused by torque limits"""
self.maxvel_accel_curve=zeros(self.n_steps)
for i in range(self.n_steps):
self.maxvel_accel_curve[i]=self.maxvel_accel(self.t_vect[i])
def maxvel_velocity_interp(self,s):
"""Compute the max velocity caused by velocity limits
s -- point on the trajectory
"""
return self.linear_interpolate(s,self.maxvel_velocity_curve)
def maxvel_accel_interp(self,s):
"""Compute the max velocity caused by accel limits
s -- point on the trajectory
"""
return self.linear_interpolate(s,self.maxvel_accel_curve)
def maxvel_interp(self,s):
"""Compute the overall max velocity limits
s -- point on the trajectory
"""
return self.linear_interpolate(s,self.maxvel_curve)
############################ Prototypes ##############################
def set_dynamics_limits(self,limits):
"""Set dynamics limits"""
raise NameError('Some virtual methods need be implemented')
def sample_dynamics(self):
"""Sample the dynamics coefficients along the trajectory"""
raise NameError('Some virtual methods need be implemented')
def dynamics_coefficients(self,s):
"""Compute the dynamics coefficients at a given point by interpolation
s -- point on the trajectory
"""
raise NameError('Some virtual methods need be implemented')
def accel_limits(self,s,sdot):
"""Compute the acceleration limits caused by torque limits
(s,sdot) -- point of the phase plane
"""
raise NameError('Some virtual methods need be implemented')
def maxvel_accel(self,s):
"""Compute the maximum velocity caused by torque limits
s -- point on the trajectory
"""
raise NameError('Some virtual methods need be implemented')
def find_zero_inertia_points(self):
"""Find all zero-inertia points and assign to the list self.sw_zero_inertia"""
raise NameError('Some virtual methods need be implemented')
def correct_accel_zi(self,s):
"""Compute the correct acceleration at a zero-inertia point
s -- a zero-inertia point on the trajectory
"""
raise NameError('Some virtual methods need be implemented')
############################ Switch points ##############################
# The search for tangent and discontinuity points is the same for everyone
# The search for zero-inertia points depends on the dynamics, so this is
# computed in the children classes
def find_tangent_disc_points(self):
"""Find all tangent and discontinuity points and assign to the list self.sw_tangent_disc"""
if self.n_steps<3:
self.sw_tangent_disc=[]
return
maxvel_curve=self.maxvel_curve
i=1
s=self.t_vect[i]
sdot=maxvel_curve[i]
[alpha,beta,k_alpha,k_beta]=self.accel_limits(s,sdot)
diffp=alpha/sdot-(maxvel_curve[i+1]-maxvel_curve[i])/self.t_step
i_list=[]
for i in range(1,self.n_steps-1):
# Test whether i is a discontinuity points
if abs(maxvel_curve[i+1]-maxvel_curve[i])>self.disc_thr:
if maxvel_curve[i+1]>maxvel_curve[i]:
i_list.append(i)
else:
i_list.append(i+1)
# Test whether i is a tangent points
s=self.t_vect[i]
sdot=maxvel_curve[i]
[alpha,beta,k_alpha,k_beta]=self.accel_limits(s,sdot)
diff=alpha/sdot-(maxvel_curve[i+1]-maxvel_curve[i])/self.t_step
if diff*diffp<0:
if i>2 and i<self.n_steps-3:
# A switch point cannot be a local maximum
if maxvel_curve[i-2]>sdot or maxvel_curve[i+2]>sdot:
if maxvel_curve[i]<maxvel_curve[i-1]:
i_list.append(i)
else:
i_list.append(i-1)
else:
if maxvel_curve[i]<maxvel_curve[i-1]:
i_list.append(i)
else:
i_list.append(i-1)
diffp=diff
self.sw_tangent_disc=i_list
def merge_switch_points_lists(self):
"""Find all switch points and assign to the list self.sw_s_list
by merging the zero-inertia list and tangent-disc list
"""
i_list=self.sw_zero_inertia # Zero-inertia points 'z'
i_list2=self.sw_tangent_disc # Tangent or discontinuity points 't'
# Merge the lists
type_list=['z']*len(i_list)
for x in i_list2:
if not (x in i_list):
index=bisect.bisect_left(i_list,x)
i_list.insert(index,x)
type_list.insert(index,'t')
# Find the corresponding values s and sdot
s_list=[]
sdot_list=[]
for j in range(len(i_list)):
s_list.append(self.t_vect[i_list[j]])
sdot=self.maxvel_curve[i_list[j]]
sdot_list.append(sdot)
self.sw_i_list=i_list
self.sw_type_list=type_list
self.sw_s_list=s_list
self.sw_sdot_list=sdot_list
############################ Interpolation ##############################
def linear_interpolate(self,s,value_vect,t_vect=None,elim_out=False):
if t_vect==None:
t_vect=self.t_vect
n_steps=self.n_steps
else:
n_steps=len(t_vect)
if n_steps==0:
return 1e15
if s<t_vect[0]:
if elim_out:
return 1e15
else:
s=t_vect[0]+1e-5
if s>t_vect[n_steps-1]:
if elim_out:
return 1e15
else:
s=t_vect[n_steps-1]-1e-5
i=bisect.bisect_left(t_vect,s)
if i==0:
return value_vect[i]
r=(s-t_vect[i-1])/(t_vect[i]-t_vect[i-1])
return (1-r)*value_vect[i-1]+r*value_vect[i]
def linear_interpolate_multi(self,s,value_vect_list,t_vect=None):
if t_vect==None:
t_vect=self.t_vect
n_steps=self.n_steps
else:
n_steps=len(t_vect)
if s<t_vect[0]: s=t_vect[0]+1e-5
if s>t_vect[n_steps-1]: s=t_vect[n_steps-1]-1e-5
i=bisect.bisect_left(t_vect,s)
if i==0:
return [k[:,i] for k in value_vect_list]
r=(s-t_vect[i-1])/(t_vect[i]-t_vect[i-1])
return [(1-r)*k[:,i-1]+r*k[:,i] for k in value_vect_list]
########################## Plotting ############################
def plot_maxvel_curves(self,h_offset=0):
plot(self.t_vect+h_offset,self.maxvel_curve,'c',linewidth=3)
plot(self.t_vect+h_offset,self.maxvel_accel_curve,'r')
if(self.isset_velocity_limits):
plot(self.t_vect+h_offset,self.maxvel_velocity_curve,'g')
plot([h_offset,h_offset],[0,1e2],'r--')
for i in range(len(self.sw_s_list)):
if self.sw_type_list[i]=='t':
plot(self.sw_s_list[i]+h_offset,self.sw_sdot_list[i],'ro')
if self.sw_type_list[i]=='z':
plot(self.sw_s_list[i]+h_offset,self.sw_sdot_list[i],'go')
| 31.280967 | 101 | 0.598319 |
7941ab0b8607898e90d08708b1c307ee8533298e | 2,326 | py | Python | deepcell/layers/__init__.py | jizhouh/deepcell-tf | 491ece59f5024d73429477ebdcb437a6e67d766b | [
"Apache-2.0"
] | 1 | 2021-03-21T13:47:35.000Z | 2021-03-21T13:47:35.000Z | deepcell/layers/__init__.py | jizhouh/deepcell-tf | 491ece59f5024d73429477ebdcb437a6e67d766b | [
"Apache-2.0"
] | null | null | null | deepcell/layers/__init__.py | jizhouh/deepcell-tf | 491ece59f5024d73429477ebdcb437a6e67d766b | [
"Apache-2.0"
] | null | null | null | # Copyright 2016-2021 The Van Valen Lab at the California Institute of
# Technology (Caltech), with support from the Paul Allen Family Foundation,
# Google, & National Institutes of Health (NIH) under Grant U24CA224309-01.
# All rights reserved.
#
# Licensed under a modified Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.github.com/vanvalenlab/deepcell-tf/LICENSE
#
# The Work provided may be used for non-commercial academic purposes only.
# For any other use of the Work, including commercial use, please contact:
# [email protected]
#
# Neither the name of Caltech nor the names of its contributors may be used
# to endorse or promote products derived from this software without specific
# prior written permission.
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Custom Layers"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from deepcell.layers import convolutional_recurrent
from deepcell.layers import location
from deepcell.layers import normalization
from deepcell.layers import pooling
from deepcell.layers import tensor_product
from deepcell.layers import padding
from deepcell.layers import upsample
from deepcell.layers.convolutional_recurrent import ConvGRU2D
from deepcell.layers.location import Location2D
from deepcell.layers.location import Location3D
from deepcell.layers.normalization import ImageNormalization2D
from deepcell.layers.normalization import ImageNormalization3D
from deepcell.layers.pooling import DilatedMaxPool2D
from deepcell.layers.pooling import DilatedMaxPool3D
from deepcell.layers.tensor_product import TensorProduct
from deepcell.layers.padding import ReflectionPadding2D
from deepcell.layers.padding import ReflectionPadding3D
from deepcell.layers.upsample import UpsampleLike
del absolute_import
del division
del print_function
| 43.074074 | 80 | 0.797077 |
7941ab847f1e6cb68e3f6b7139a875fa77188605 | 2,360 | py | Python | packet_sniff.py | YoussefEmad99/Real-Time-Whatsapp-Controller-Bot-For-Productivity | 8399a40a36222fad62b0026795ace9fdbeac6e29 | [
"MIT"
] | null | null | null | packet_sniff.py | YoussefEmad99/Real-Time-Whatsapp-Controller-Bot-For-Productivity | 8399a40a36222fad62b0026795ace9fdbeac6e29 | [
"MIT"
] | null | null | null | packet_sniff.py | YoussefEmad99/Real-Time-Whatsapp-Controller-Bot-For-Productivity | 8399a40a36222fad62b0026795ace9fdbeac6e29 | [
"MIT"
] | null | null | null | import socket
import struct
from struct import *
import sys
def ethernet_head(raw_data):
dest, src, prototype = struct.unpack('! 6s 6s H', raw_data[:14])
dest_mac = get_mac_addr(dest)
src_mac = get_mac_addr(src)
proto = socket.htons(prototype)
data = raw_data[14:]
return dest_mac, src_mac, proto, data
def get_ip(addr):
return '.'.join(map(str, addr))
def main():
s = socket.socket(socket.AF_PACKET, socket.SOCK_RAW, socket.ntohs(3))
while True:
raw_data, addr = s.recvfrom(65535)
eth = ethernet_head(raw_data)
print('\nEthernet Frame:')
print('Destination: {}, Source: {}, Protocol: {}'.format(eth[0], eth[1], eth[2]))
if eth[2] == 8:
ipv4 = ipv4(eth[4])
print('\t - ' + 'IPv4 Packet:')
print('\t\t - ' + 'Version: {}, Header Length: {}, TTL:{},'.format(ipv4[1], ipv4[2], ipv4[3]))
print('\t\t - ' + 'Protocol: {}, Source: {}, Target:{}'.format(ipv4[4], ipv4[5], ipv4[6]))
def tcp_head(raw_data):
(src_port, dest_port, sequence, acknowledgment, offset_reserved_flags) = struct.unpack('! H H L L H', raw_data[:14])
offset = (offset_reserved_flags >> 12) * 4
flag_urg = (offset_reserved_flags & 32) >> 5
flag_ack = (offset_reserved_flags & 16) >> 4
flag_psh = (offset_reserved_flags & 8) >> 3
flag_rst = (offset_reserved_flags & 4) >> 2
flag_syn = (offset_reserved_flags & 2) >> 1
flag_fin = offset_reserved_flags & 1
data = raw_data[offset:]
return src_port, dest_port, sequence, acknowledgment, flag_urg, flag_ack, flag_psh, flag_rst, flag_syn, flag_fin, data
def ipv4_head(raw_data):
version_header_length = raw_data[0]
version = version_header_length >> 4
header_length = (version_header_length & 15) * 4
ttl, proto, src, target = struct.unpack('! 8x B B 2x 4s 4s', raw_data[:20])
data = raw_data[header_length:]
src = get_ip(src)
target = get_ip(target)
return version, header_length, ttl, proto, src, target, data
HOST = socket.gethostbyname(socket.gethostname())
s = socket.socket(socket.AF_INET, socket.SOCK_RAW, socket.IPPROTO_IP)
s.bind((HOST, 0))
s.setsockopt(socket.IPPROTO_IP, socket.IP_HDRINCL, 1)
s.ioctl(socket.SIO_RCVALL, socket.RCVALL_ON)
while True:
print(s.recvfrom(65565))
#
# s.ioctl(socket.SIO_RCVALL, socket.RCVALL_OFF)
| 33.239437 | 122 | 0.65339 |
7941ad23d0e4598ff55148d210804698a757cf05 | 16,025 | py | Python | build/PureCloudPlatformClientV2/models/queue_conversation_event_topic_chat.py | cjohnson-ctl/platform-client-sdk-python | 38ce53bb8012b66e8a43cc8bd6ff00cf6cc99100 | [
"MIT"
] | 10 | 2019-02-22T00:27:08.000Z | 2021-09-12T23:23:44.000Z | libs/PureCloudPlatformClientV2/models/queue_conversation_event_topic_chat.py | rocketbot-cl/genesysCloud | dd9d9b5ebb90a82bab98c0d88b9585c22c91f333 | [
"MIT"
] | 5 | 2018-06-07T08:32:00.000Z | 2021-07-28T17:37:26.000Z | libs/PureCloudPlatformClientV2/models/queue_conversation_event_topic_chat.py | rocketbot-cl/genesysCloud | dd9d9b5ebb90a82bab98c0d88b9585c22c91f333 | [
"MIT"
] | 6 | 2020-04-09T17:43:07.000Z | 2022-02-17T08:48:05.000Z | # coding: utf-8
"""
Copyright 2016 SmartBear Software
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Ref: https://github.com/swagger-api/swagger-codegen
"""
from pprint import pformat
from six import iteritems
import re
import json
from ..utils import sanitize_for_serialization
class QueueConversationEventTopicChat(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self):
"""
QueueConversationEventTopicChat - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'state': 'str',
'id': 'str',
'provider': 'str',
'script_id': 'str',
'peer_id': 'str',
'room_id': 'str',
'avatar_image_url': 'str',
'held': 'bool',
'disconnect_type': 'str',
'start_hold_time': 'datetime',
'connected_time': 'datetime',
'disconnected_time': 'datetime',
'journey_context': 'QueueConversationEventTopicJourneyContext',
'wrapup': 'QueueConversationEventTopicWrapup',
'after_call_work': 'QueueConversationEventTopicAfterCallWork',
'after_call_work_required': 'bool',
'additional_properties': 'object'
}
self.attribute_map = {
'state': 'state',
'id': 'id',
'provider': 'provider',
'script_id': 'scriptId',
'peer_id': 'peerId',
'room_id': 'roomId',
'avatar_image_url': 'avatarImageUrl',
'held': 'held',
'disconnect_type': 'disconnectType',
'start_hold_time': 'startHoldTime',
'connected_time': 'connectedTime',
'disconnected_time': 'disconnectedTime',
'journey_context': 'journeyContext',
'wrapup': 'wrapup',
'after_call_work': 'afterCallWork',
'after_call_work_required': 'afterCallWorkRequired',
'additional_properties': 'additionalProperties'
}
self._state = None
self._id = None
self._provider = None
self._script_id = None
self._peer_id = None
self._room_id = None
self._avatar_image_url = None
self._held = None
self._disconnect_type = None
self._start_hold_time = None
self._connected_time = None
self._disconnected_time = None
self._journey_context = None
self._wrapup = None
self._after_call_work = None
self._after_call_work_required = None
self._additional_properties = None
@property
def state(self):
"""
Gets the state of this QueueConversationEventTopicChat.
:return: The state of this QueueConversationEventTopicChat.
:rtype: str
"""
return self._state
@state.setter
def state(self, state):
"""
Sets the state of this QueueConversationEventTopicChat.
:param state: The state of this QueueConversationEventTopicChat.
:type: str
"""
allowed_values = ["ALERTING", "DIALING", "CONTACTING", "OFFERING", "CONNECTED", "DISCONNECTED", "TERMINATED", "NONE"]
if state.lower() not in map(str.lower, allowed_values):
# print("Invalid value for state -> " + state)
self._state = "outdated_sdk_version"
else:
self._state = state
@property
def id(self):
"""
Gets the id of this QueueConversationEventTopicChat.
:return: The id of this QueueConversationEventTopicChat.
:rtype: str
"""
return self._id
@id.setter
def id(self, id):
"""
Sets the id of this QueueConversationEventTopicChat.
:param id: The id of this QueueConversationEventTopicChat.
:type: str
"""
self._id = id
@property
def provider(self):
"""
Gets the provider of this QueueConversationEventTopicChat.
:return: The provider of this QueueConversationEventTopicChat.
:rtype: str
"""
return self._provider
@provider.setter
def provider(self, provider):
"""
Sets the provider of this QueueConversationEventTopicChat.
:param provider: The provider of this QueueConversationEventTopicChat.
:type: str
"""
self._provider = provider
@property
def script_id(self):
"""
Gets the script_id of this QueueConversationEventTopicChat.
:return: The script_id of this QueueConversationEventTopicChat.
:rtype: str
"""
return self._script_id
@script_id.setter
def script_id(self, script_id):
"""
Sets the script_id of this QueueConversationEventTopicChat.
:param script_id: The script_id of this QueueConversationEventTopicChat.
:type: str
"""
self._script_id = script_id
@property
def peer_id(self):
"""
Gets the peer_id of this QueueConversationEventTopicChat.
:return: The peer_id of this QueueConversationEventTopicChat.
:rtype: str
"""
return self._peer_id
@peer_id.setter
def peer_id(self, peer_id):
"""
Sets the peer_id of this QueueConversationEventTopicChat.
:param peer_id: The peer_id of this QueueConversationEventTopicChat.
:type: str
"""
self._peer_id = peer_id
@property
def room_id(self):
"""
Gets the room_id of this QueueConversationEventTopicChat.
:return: The room_id of this QueueConversationEventTopicChat.
:rtype: str
"""
return self._room_id
@room_id.setter
def room_id(self, room_id):
"""
Sets the room_id of this QueueConversationEventTopicChat.
:param room_id: The room_id of this QueueConversationEventTopicChat.
:type: str
"""
self._room_id = room_id
@property
def avatar_image_url(self):
"""
Gets the avatar_image_url of this QueueConversationEventTopicChat.
:return: The avatar_image_url of this QueueConversationEventTopicChat.
:rtype: str
"""
return self._avatar_image_url
@avatar_image_url.setter
def avatar_image_url(self, avatar_image_url):
"""
Sets the avatar_image_url of this QueueConversationEventTopicChat.
:param avatar_image_url: The avatar_image_url of this QueueConversationEventTopicChat.
:type: str
"""
self._avatar_image_url = avatar_image_url
@property
def held(self):
"""
Gets the held of this QueueConversationEventTopicChat.
:return: The held of this QueueConversationEventTopicChat.
:rtype: bool
"""
return self._held
@held.setter
def held(self, held):
"""
Sets the held of this QueueConversationEventTopicChat.
:param held: The held of this QueueConversationEventTopicChat.
:type: bool
"""
self._held = held
@property
def disconnect_type(self):
"""
Gets the disconnect_type of this QueueConversationEventTopicChat.
:return: The disconnect_type of this QueueConversationEventTopicChat.
:rtype: str
"""
return self._disconnect_type
@disconnect_type.setter
def disconnect_type(self, disconnect_type):
"""
Sets the disconnect_type of this QueueConversationEventTopicChat.
:param disconnect_type: The disconnect_type of this QueueConversationEventTopicChat.
:type: str
"""
allowed_values = ["ENDPOINT", "CLIENT", "SYSTEM", "TIMEOUT", "TRANSFER", "TRANSFER_CONFERENCE", "TRANSFER_CONSULT", "TRANSFER_NOANSWER", "TRANSFER_NOTAVAILABLE", "TRANSFER_FORWARD", "TRANSPORT_FAILURE", "ERROR", "PEER", "OTHER", "SPAM", "UNCALLABLE"]
if disconnect_type.lower() not in map(str.lower, allowed_values):
# print("Invalid value for disconnect_type -> " + disconnect_type)
self._disconnect_type = "outdated_sdk_version"
else:
self._disconnect_type = disconnect_type
@property
def start_hold_time(self):
"""
Gets the start_hold_time of this QueueConversationEventTopicChat.
:return: The start_hold_time of this QueueConversationEventTopicChat.
:rtype: datetime
"""
return self._start_hold_time
@start_hold_time.setter
def start_hold_time(self, start_hold_time):
"""
Sets the start_hold_time of this QueueConversationEventTopicChat.
:param start_hold_time: The start_hold_time of this QueueConversationEventTopicChat.
:type: datetime
"""
self._start_hold_time = start_hold_time
@property
def connected_time(self):
"""
Gets the connected_time of this QueueConversationEventTopicChat.
:return: The connected_time of this QueueConversationEventTopicChat.
:rtype: datetime
"""
return self._connected_time
@connected_time.setter
def connected_time(self, connected_time):
"""
Sets the connected_time of this QueueConversationEventTopicChat.
:param connected_time: The connected_time of this QueueConversationEventTopicChat.
:type: datetime
"""
self._connected_time = connected_time
@property
def disconnected_time(self):
"""
Gets the disconnected_time of this QueueConversationEventTopicChat.
:return: The disconnected_time of this QueueConversationEventTopicChat.
:rtype: datetime
"""
return self._disconnected_time
@disconnected_time.setter
def disconnected_time(self, disconnected_time):
"""
Sets the disconnected_time of this QueueConversationEventTopicChat.
:param disconnected_time: The disconnected_time of this QueueConversationEventTopicChat.
:type: datetime
"""
self._disconnected_time = disconnected_time
@property
def journey_context(self):
"""
Gets the journey_context of this QueueConversationEventTopicChat.
:return: The journey_context of this QueueConversationEventTopicChat.
:rtype: QueueConversationEventTopicJourneyContext
"""
return self._journey_context
@journey_context.setter
def journey_context(self, journey_context):
"""
Sets the journey_context of this QueueConversationEventTopicChat.
:param journey_context: The journey_context of this QueueConversationEventTopicChat.
:type: QueueConversationEventTopicJourneyContext
"""
self._journey_context = journey_context
@property
def wrapup(self):
"""
Gets the wrapup of this QueueConversationEventTopicChat.
:return: The wrapup of this QueueConversationEventTopicChat.
:rtype: QueueConversationEventTopicWrapup
"""
return self._wrapup
@wrapup.setter
def wrapup(self, wrapup):
"""
Sets the wrapup of this QueueConversationEventTopicChat.
:param wrapup: The wrapup of this QueueConversationEventTopicChat.
:type: QueueConversationEventTopicWrapup
"""
self._wrapup = wrapup
@property
def after_call_work(self):
"""
Gets the after_call_work of this QueueConversationEventTopicChat.
:return: The after_call_work of this QueueConversationEventTopicChat.
:rtype: QueueConversationEventTopicAfterCallWork
"""
return self._after_call_work
@after_call_work.setter
def after_call_work(self, after_call_work):
"""
Sets the after_call_work of this QueueConversationEventTopicChat.
:param after_call_work: The after_call_work of this QueueConversationEventTopicChat.
:type: QueueConversationEventTopicAfterCallWork
"""
self._after_call_work = after_call_work
@property
def after_call_work_required(self):
"""
Gets the after_call_work_required of this QueueConversationEventTopicChat.
:return: The after_call_work_required of this QueueConversationEventTopicChat.
:rtype: bool
"""
return self._after_call_work_required
@after_call_work_required.setter
def after_call_work_required(self, after_call_work_required):
"""
Sets the after_call_work_required of this QueueConversationEventTopicChat.
:param after_call_work_required: The after_call_work_required of this QueueConversationEventTopicChat.
:type: bool
"""
self._after_call_work_required = after_call_work_required
@property
def additional_properties(self):
"""
Gets the additional_properties of this QueueConversationEventTopicChat.
:return: The additional_properties of this QueueConversationEventTopicChat.
:rtype: object
"""
return self._additional_properties
@additional_properties.setter
def additional_properties(self, additional_properties):
"""
Sets the additional_properties of this QueueConversationEventTopicChat.
:param additional_properties: The additional_properties of this QueueConversationEventTopicChat.
:type: object
"""
self._additional_properties = additional_properties
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_json(self):
"""
Returns the model as raw JSON
"""
return json.dumps(sanitize_for_serialization(self.to_dict()))
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| 28.873874 | 258 | 0.626147 |
7941ae8187ce6780b57d172f93a1be15e139a494 | 2,159 | py | Python | helpers.py | espogabe/Textbook-Server | 6f6ee1640ffa7b25b33ad562eab9b5504362da63 | [
"MIT"
] | null | null | null | helpers.py | espogabe/Textbook-Server | 6f6ee1640ffa7b25b33ad562eab9b5504362da63 | [
"MIT"
] | null | null | null | helpers.py | espogabe/Textbook-Server | 6f6ee1640ffa7b25b33ad562eab9b5504362da63 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# Copyright (c) 2016 Aaron Zhao
# Copyright (c) 2016 Gabriel Esposito
# See LICENSE for details.
"""
Helper functions for the server.
"""
import decimal
from math import acos, cos, sin
import ConfigParser
EARTH_R = 3959 # Miles
def point_near(lat, lng, rlat, rlng, r):
"""Returns true if the point for lat, lng is within r miles of rlat, rlng"""
# Shortest distance between two given points
return acos(sin(lat) * sin(rlat) + cos(lat) * cos(rlat) * cos(lng - float(rlng))) * EARTH_R < r
def dec_default(obj):
"""Default function for json.dumps() to allow serialization of Decimal() from pymysql."""
if isinstance(obj, decimal.Decimal):
return float(obj)
raise TypeError
def create_config():
"""
Creates the config for the server.
Example config:
[mysql]
user=textbooks
pass=textbooks
host=localhost
db=textbooks
[oAuth]
server_client_id=validtoken
android_client_id=validtoken
[http]
host=localhost
port=8080
"""
def check_number(prompt):
while True:
try:
input = int(raw_input(prompt))
return input
except ValueError:
print "Must be a number."
print "Config not present. Creating config."
config = ConfigParser.RawConfigParser()
config.add_section('http')
config.set('http', 'host', str(raw_input("Hostname: ")))
config.set('http', 'port', str(check_number("Port: ")))
config.add_section('oAuth')
config.set('oAuth', 'server_client_id', str(raw_input("oAuth Server Client ID: ")))
config.set('oAuth', 'android_client_id', str(raw_input("oAuth Android Client ID: ")))
config.add_section('mysql')
config.set('mysql', 'user', str(raw_input("MySQL User: ")))
config.set('mysql', 'pass', str(raw_input("MySQL Password: ")))
config.set('mysql', 'host', str(raw_input("MySQL Host: ")))
config.set('mysql', 'db', str(raw_input("MySQL Database: ")))
with open('server.cfg', 'wb') as configfile:
config.write(configfile)
print "Config created."
| 26.9875 | 99 | 0.630848 |
7941aef39f6521ba5f763ac285cd6b53b3a60403 | 2,797 | py | Python | src/visual_func/visualize.py | VincentGaoHJ/Nonnegative-Matrix-Factorization | c272bfd6de86b9e2c815f4e4e8629cc27073edd6 | [
"MIT"
] | 1 | 2021-01-29T14:43:58.000Z | 2021-01-29T14:43:58.000Z | src/visual_func/visualize.py | VincentGaoHJ/Nonnegative-Matrix-Factorization | c272bfd6de86b9e2c815f4e4e8629cc27073edd6 | [
"MIT"
] | null | null | null | src/visual_func/visualize.py | VincentGaoHJ/Nonnegative-Matrix-Factorization | c272bfd6de86b9e2c815f4e4e8629cc27073edd6 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on 2019/3/18
@author: Haojun Gao
"""
import os
import heapq
import pickle
import numpy as np
import matplotlib.pyplot as plt
from sklearn.decomposition import PCA
from mpl_toolkits.mplot3d import Axes3D
from src.config import (
VISUAL_TYPE)
def normalize(data):
for i in range(len(data)):
m = np.sum(data[i])
data[i] /= m
return data
def visualize(U, V_convert, loss_matrix, node, step):
target_names = ["dimension-1", "dimension-2", "dimension-3"]
feature_names = ["class-1", "class-2", "class-3"]
figure_names = ["Loss of Matrix U", "Loss of Matrix V", "Loss of Matrix X", "Loss of Over-all"]
label_names = ["Matrix U", "Matrix V", "Matrix X", "Over-all"]
X_U = U.toarray()
X_V = V_convert.toarray()
X_U = normalize(X_U)
X_V = normalize(X_V)
if VISUAL_TYPE == 0:
pca = PCA(n_components=3)
X_U = pca.fit_transform(X_U)
X_V = pca.fit_transform(X_V)
else:
X_U_reduce = np.nansum(X_U, 0)
X_V_reduce = np.nansum(X_V, 0)
X_U_red_sma = map(X_U_reduce.tolist().index, heapq.nsmallest(len(X_U_reduce) - 3, X_U_reduce))
X_V_red_sma = map(X_V_reduce.tolist().index, heapq.nsmallest(len(X_V_reduce) - 3, X_V_reduce))
X_U_red_sma = list(X_U_red_sma)
X_V_red_sma = list(X_V_red_sma)
X_U = np.delete(X_U, X_U_red_sma, axis=1)
X_V = np.delete(X_V, X_V_red_sma, axis=1)
y_U = np.zeros(len(X_U))
y_V = np.zeros(len(X_V))
for i in range(len(X_U)):
y_U[i] = np.argmax(X_U[i])
for i in range(len(X_V)):
y_V[i] = np.argmax(X_V[i])
fig = plt.figure(figsize=(12, 10))
for k in range(2):
ax = fig.add_subplot(221 + k, projection='3d')
for c, i, target_name in zip('rgb', [0, 1, 2], target_names):
if k == 0:
ax.scatter(X_U[y_U == i, 0], X_U[y_U == i, 1], X_U[y_U == i, 2], c=c, label=target_name)
ax.set_title("Matrix-U")
else:
ax.scatter(X_V[y_V == i, 0], X_V[y_V == i, 1], X_V[y_V == i, 2], c=c, label=target_name)
ax.set_title("Matrix-V")
ax.set_xlabel(feature_names[0])
ax.set_ylabel(feature_names[1])
ax.set_zlabel(feature_names[2])
ax.view_init(55, 60)
plt.legend()
value_x = np.linspace(0, len(loss_matrix), len(loss_matrix))
for i, color in enumerate("rgby"):
plt.subplot(425 + i)
plt.plot(value_x, loss_matrix[:, i], color + "--", linewidth=1, label=label_names[i])
plt.xlabel('Iterations')
plt.ylabel('Loss')
plt.title(figure_names[i])
plt.legend()
file_path = os.path.join(node.image_dir, str(step + 1) + ".png")
plt.savefig(file_path)
plt.show()
| 30.402174 | 104 | 0.592778 |
7941afeb1814bce5c95d4ac3fc25de0d4439bb66 | 3,030 | py | Python | samples/03.welcome-user/app.py | hangdong/botbuilder-python | 8ff979a58fadc4356d76b9ce577f94da3245f664 | [
"MIT"
] | null | null | null | samples/03.welcome-user/app.py | hangdong/botbuilder-python | 8ff979a58fadc4356d76b9ce577f94da3245f664 | [
"MIT"
] | null | null | null | samples/03.welcome-user/app.py | hangdong/botbuilder-python | 8ff979a58fadc4356d76b9ce577f94da3245f664 | [
"MIT"
] | null | null | null | # Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
import asyncio
import sys
from datetime import datetime
from flask import Flask, request, Response
from botbuilder.core import (
BotFrameworkAdapter,
BotFrameworkAdapterSettings,
MemoryStorage,
TurnContext,
UserState,
)
from botbuilder.schema import Activity, ActivityTypes
from bots import WelcomeUserBot
# Create the loop and Flask app
LOOP = asyncio.get_event_loop()
APP = Flask(__name__, instance_relative_config=True)
APP.config.from_object("config.DefaultConfig")
# Create adapter.
# See https://aka.ms/about-bot-adapter to learn more about how bots work.
SETTINGS = BotFrameworkAdapterSettings(APP.config["APP_ID"], APP.config["APP_PASSWORD"])
ADAPTER = BotFrameworkAdapter(SETTINGS)
# Catch-all for errors.
async def on_error(context: TurnContext, error: Exception):
# This check writes out errors to console log .vs. app insights.
# NOTE: In production environment, you should consider logging this to Azure
# application insights.
print(f"\n [on_turn_error] unhandled error: {error}", file=sys.stderr)
# Send a message to the user
await context.send_activity("The bot encountered an error or bug.")
await context.send_activity(
"To continue to run this bot, please fix the bot source code."
)
# Send a trace activity if we're talking to the Bot Framework Emulator
if context.activity.channel_id == "emulator":
# Create a trace activity that contains the error object
trace_activity = Activity(
label="TurnError",
name="on_turn_error Trace",
timestamp=datetime.utcnow(),
type=ActivityTypes.trace,
value=f"{error}",
value_type="https://www.botframework.com/schemas/error",
)
# Send a trace activity, which will be displayed in Bot Framework Emulator
await context.send_activity(trace_activity)
ADAPTER.on_turn_error = on_error
# Create MemoryStorage, UserState
MEMORY = MemoryStorage()
USER_STATE = UserState(MEMORY)
# Create the Bot
BOT = WelcomeUserBot(USER_STATE)
# Listen for incoming requests on /api/messages.
@APP.route("/api/messages", methods=["POST"])
def messages():
# Main bot message handler.
if "application/json" in request.headers["Content-Type"]:
body = request.json
else:
return Response(status=415)
activity = Activity().deserialize(body)
auth_header = (
request.headers["Authorization"] if "Authorization" in request.headers else ""
)
try:
task = LOOP.create_task(
ADAPTER.process_activity(activity, auth_header, BOT.on_turn)
)
LOOP.run_until_complete(task)
return Response(status=201)
except Exception as exception:
raise exception
if __name__ == "__main__":
try:
APP.run(debug=False, port=APP.config["PORT"]) # nosec debug
except Exception as exception:
raise exception
| 31.5625 | 88 | 0.70099 |
7941aff7295eb87aa9c2575e9780b8d422b98de8 | 739 | py | Python | broken/qt5.py | FnControlOption/winbrew | 0fcf0d46fa22c48c6d92115b17234650dcb9ff0f | [
"MIT"
] | 19 | 2015-03-18T01:11:20.000Z | 2021-08-01T18:51:54.000Z | broken/qt5.py | FnControlOption/winbrew | 0fcf0d46fa22c48c6d92115b17234650dcb9ff0f | [
"MIT"
] | 11 | 2015-12-17T02:13:10.000Z | 2017-07-21T14:15:13.000Z | broken/qt5.py | FnControlOption/winbrew | 0fcf0d46fa22c48c6d92115b17234650dcb9ff0f | [
"MIT"
] | 6 | 2015-05-14T19:50:15.000Z | 2020-11-22T08:55:03.000Z | import winbrew
import os
class Qt5(winbrew.Formula):
url = 'https://github.com/qt/qt5.git#v5.9.0-alpha1'
homepage = 'https://www.qt.io'
sha1 = '020954eb0240fa18e488afb8adc3e948b0e08907'
build_deps = ()
deps = ()
def build(self):
os.environ.update({
'QTMAKESPEC': 'win32-msvc2017',
'PATH': os.pathsep.join((
'{0}\\qt5\\qtbase\\bin'.format(os.getcwd()),
'{0}\\qt5\\gnuwin32\\bin'.format(os.getcwd()),
'{0}\\qt5\\qtrepotools\\bin'.format(os.getcwd()),
os.environ['PATH'],
)),
})
self.system('configure.bat -nomake examples -opensource')
self.nmake()
def test(self):
pass
| 28.423077 | 65 | 0.533153 |
7941b069c00c95d2043ebc2cb546b592fe695e1a | 4,552 | py | Python | otcextensions/sdk/css/v1/flavor.py | kucerakk/python-otcextensions | d74d6aaa6dcf7c46d2c5fbe3676656baaf8e81d6 | [
"Apache-2.0"
] | null | null | null | otcextensions/sdk/css/v1/flavor.py | kucerakk/python-otcextensions | d74d6aaa6dcf7c46d2c5fbe3676656baaf8e81d6 | [
"Apache-2.0"
] | null | null | null | otcextensions/sdk/css/v1/flavor.py | kucerakk/python-otcextensions | d74d6aaa6dcf7c46d2c5fbe3676656baaf8e81d6 | [
"Apache-2.0"
] | null | null | null | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from openstack import exceptions
from openstack import resource
class Flavor(resource.Resource):
base_path = '/flavors'
resources_key = 'flavors'
allow_list = True
# Properties
#: Version. *Type: str*
version = resource.Body('version')
#: cpu count. *Type: int*
vcpus = resource.Body('cpu', type=int)
#: ram. *Type: int*
ram = resource.Body('ram', type=int)
#: region. *Type: str*
region = resource.Body('region')
#: Disk capacity range (min,max). *Type: str*
disk_range = resource.Body('diskrange', type=str)
#: ID of a flavor. *Type: ID*
flavor_id = resource.Body('flavor_id', alternate_id=True)
@classmethod
def list(cls, session, paginated=True, base_path=None, **params):
"""This method is a generator which yields resource objects.
This resource object list generator handles pagination and takes query
params for response filtering.
:param session: The session to use for making this request.
:type session: :class:`~keystoneauth1.adapter.Adapter`
:param bool paginated: ``True`` if a GET to this resource returns
a paginated series of responses, or ``False``
if a GET returns only one page of data.
**When paginated is False only one
page of data will be returned regardless
of the API's support of pagination.**
:param str base_path: Base part of the URI for listing resources, if
different from
:data:`~openstack.resource.Resource.base_path`.
:param dict params: These keyword arguments are passed through the
:meth:`~openstack.resource.QueryParamter._transpose` method
to find if any of them match expected query parameters to be
sent in the *params* argument to
:meth:`~keystoneauth1.adapter.Adapter.get`. They are additionally
checked against the
:data:`~openstack.resource.Resource.base_path` format string
to see if any path fragments need to be filled in by the contents
of this argument.
:return: A generator of :class:`Resource` objects.
:raises: :exc:`~openstack.exceptions.MethodNotSupported` if
:data:`Resource.allow_list` is not set to ``True``.
:raises: :exc:`~openstack.exceptions.InvalidResourceQuery` if query
contains invalid params.
"""
if not cls.allow_list:
raise exceptions.MethodNotSupported(cls, "list")
session = cls._get_session(session)
microversion = cls._get_microversion_for_list(session)
if base_path is None:
base_path = cls.base_path
cls._query_mapping._validate(params, base_path=base_path)
query_params = cls._query_mapping._transpose(params)
uri = base_path % params
# Copy query_params due to weird mock unittest interactions
response = session.get(
uri,
headers={"Accept": "application/json"},
params=query_params.copy(),
microversion=microversion)
exceptions.raise_from_response(response)
data = response.json()
if 'versions' in data:
for ver in data['versions']:
version = ver['version']
resources = ver[cls.resources_key]
if not isinstance(resources, list):
resources = [resources]
for raw_resource in resources:
raw_resource.pop("self", None)
value = cls.existing(
microversion=microversion,
connection=session._get_connection(),
version=version,
**raw_resource)
yield value
else:
return
| 41.381818 | 78 | 0.610062 |
7941b069e2bf0ad401c5db5604fb1b188fde0f7f | 398 | py | Python | env/Lib/site-packages/plotly/validators/scattergl/_x.py | andresgreen-byte/Laboratorio-1--Inversion-de-Capital | 8a4707301d19c3826c31026c4077930bcd6a8182 | [
"MIT"
] | 11,750 | 2015-10-12T07:03:39.000Z | 2022-03-31T20:43:15.000Z | venv/Lib/site-packages/plotly/validators/scattergl/_x.py | wakisalvador/constructed-misdirection | 74779e9ec640a11bc08d5d1967c85ac4fa44ea5e | [
"Unlicense"
] | 2,951 | 2015-10-12T00:41:25.000Z | 2022-03-31T22:19:26.000Z | venv/Lib/site-packages/plotly/validators/scattergl/_x.py | wakisalvador/constructed-misdirection | 74779e9ec640a11bc08d5d1967c85ac4fa44ea5e | [
"Unlicense"
] | 2,623 | 2015-10-15T14:40:27.000Z | 2022-03-28T16:05:50.000Z | import _plotly_utils.basevalidators
class XValidator(_plotly_utils.basevalidators.DataArrayValidator):
def __init__(self, plotly_name="x", parent_name="scattergl", **kwargs):
super(XValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc+clearAxisTypes"),
**kwargs
)
| 33.166667 | 75 | 0.673367 |
7941b0a375a4067595a0680b647b7b6f9299482c | 11,049 | py | Python | examples/encoding/keyframe_archive_encoding.py | camberbridge/bitmovin-python | 3af4c6e79b0291fda05fd1ceeb5bed1bba9f3c95 | [
"Unlicense"
] | 44 | 2016-12-12T17:37:23.000Z | 2021-03-03T09:48:48.000Z | examples/encoding/keyframe_archive_encoding.py | camberbridge/bitmovin-python | 3af4c6e79b0291fda05fd1ceeb5bed1bba9f3c95 | [
"Unlicense"
] | 38 | 2017-01-09T14:45:45.000Z | 2022-02-27T18:04:33.000Z | examples/encoding/keyframe_archive_encoding.py | camberbridge/bitmovin-python | 3af4c6e79b0291fda05fd1ceeb5bed1bba9f3c95 | [
"Unlicense"
] | 27 | 2017-02-02T22:49:31.000Z | 2019-11-21T07:04:57.000Z | import datetime
from bitmovin import Bitmovin, Encoding, HTTPSInput, S3Output, StreamInput, SelectionMode, Stream, EncodingOutput, \
ACLEntry, ACLPermission, MuxingStream, CloudRegion, ProgressiveMOVMuxing, MJPEGCodecConfiguration, \
H264CodecConfiguration, H264Profile, FMP4Muxing, AACCodecConfiguration, DashManifest, Period, VideoAdaptationSet, \
AudioAdaptationSet, FMP4Representation, FMP4RepresentationType
from bitmovin.errors import BitmovinError
API_KEY = '<INSERT YOUR API KEY>'
# https://<INSERT_YOUR_HTTP_HOST>/<INSERT_YOUR_HTTP_PATH>
HTTPS_INPUT_HOST = '<INSERT_YOUR_HTTPS_HOST>'
HTTPS_INPUT_PATH = '<INSERT_YOUR_HTTPS_PATH>'
S3_OUTPUT_ACCESSKEY = '<INSERT_YOUR_ACCESS_KEY>'
S3_OUTPUT_SECRETKEY = '<INSERT_YOUR_SECRET_KEY>'
S3_OUTPUT_BUCKETNAME = '<INSERT_YOUR_BUCKET_NAME>'
date_component = str(datetime.datetime.now()).replace(' ', '_').replace(':', '-').split('.')[0].replace('_', '__')
OUTPUT_BASE_PATH = '/output/bitmovin_python/{}/'.format(date_component)
bitmovin = Bitmovin(api_key=API_KEY)
encoding_profiles = [
dict(name='180p_300kbit', height=180, bitrate=300 * 1000, fps=None),
dict(name='270p_500kbit', height=270, bitrate=500 * 1000, fps=None),
dict(name='360p_800kbit', height=360, bitrate=800 * 1000, fps=None),
dict(name='480p_1500kbit', height=480, bitrate=1500 * 1000, fps=None),
dict(name='720p_3000kbit', height=720, bitrate=3000 * 1000, fps=None),
dict(name='1080p_6000kbit', height=1080, bitrate=6000 * 1000, fps=None)
]
fmp4_muxings = []
def create_fmp4_muxings(encoding_id,
video_input_stream,
s3_output_id,
audio_input_stream=None):
if audio_input_stream is not None:
aac_codec_config = AACCodecConfiguration(
'AAC Audio Config',
bitrate=128000,
rate=48000
)
aac_codec_config = bitmovin.codecConfigurations.AAC.create(aac_codec_config).resource
audio_stream = Stream(
codec_configuration_id=aac_codec_config.id,
input_streams=[audio_input_stream],
name='Audio Stream'
)
audio_stream = bitmovin.encodings.Stream.create(object_=audio_stream,
encoding_id=encoding_id).resource
audio_muxing_stream = MuxingStream(audio_stream.id)
acl_entry = ACLEntry(permission=ACLPermission.PUBLIC_READ)
audio_fmp4_muxing_output = EncodingOutput(output_id=s3_output_id,
output_path=OUTPUT_BASE_PATH + 'audio',
acl=[acl_entry])
audio_fmp4_muxing = FMP4Muxing(streams=[audio_muxing_stream],
segment_length=4,
outputs=[audio_fmp4_muxing_output],
name='Audio Muxing')
audio_fmp4_muxing = bitmovin.encodings.Muxing.FMP4.create(object_=audio_fmp4_muxing,
encoding_id=encoding_id).resource
fmp4_muxings.append(dict(type='audio',
muxing=audio_fmp4_muxing,
segment_path='audio'))
for encoding_profile in encoding_profiles:
h264_codec_config = H264CodecConfiguration(
name='{} H264 Codec Config'.format(encoding_profile.get('name')),
bitrate=encoding_profile.get('bitrate'),
height=encoding_profile.get('height'),
rate=encoding_profile.get('fps'),
profile=H264Profile.MAIN
)
h264_codec_config = bitmovin.codecConfigurations.H264.create(h264_codec_config).resource
video_stream = Stream(
codec_configuration_id=h264_codec_config.id,
input_streams=[video_input_stream],
name='{} Stream'.format(encoding_profile.get('name'))
)
video_stream = bitmovin.encodings.Stream.create(object_=video_stream,
encoding_id=encoding_id).resource
video_muxing_stream = MuxingStream(video_stream.id)
acl_entry = ACLEntry(permission=ACLPermission.PUBLIC_READ)
fmp4_muxing_output = EncodingOutput(
output_id=s3_output_id,
output_path=OUTPUT_BASE_PATH + 'video/{}'.format(encoding_profile.get('name')),
acl=[acl_entry]
)
fmp4_muxing = FMP4Muxing(streams=[video_muxing_stream],
segment_length=4,
outputs=[fmp4_muxing_output],
name='{} Muxing'.format(encoding_profile.get('name')))
fmp4_muxing = bitmovin.encodings.Muxing.FMP4.create(object_=fmp4_muxing,
encoding_id=encoding_id).resource
fmp4_muxings.append(dict(type='video',
muxing=fmp4_muxing,
segment_path='video/{}'.format(encoding_profile.get('name'))))
return
def create_dash_manifest(encoding_id, s3_output_id):
acl_entry = ACLEntry(permission=ACLPermission.PUBLIC_READ)
manifest_output = EncodingOutput(output_id=s3_output_id,
output_path=OUTPUT_BASE_PATH,
acl=[acl_entry])
dash_manifest = DashManifest(manifest_name='myManifest.mpd',
outputs=[manifest_output],
name='Sample DASH Manifest')
dash_manifest = bitmovin.manifests.DASH.create(dash_manifest).resource
period = Period()
period = bitmovin.manifests.DASH.add_period(object_=period,
manifest_id=dash_manifest.id).resource
video_adaptation_set = VideoAdaptationSet()
video_adaptation_set = bitmovin.manifests.DASH.add_video_adaptation_set(object_=video_adaptation_set,
manifest_id=dash_manifest.id,
period_id=period.id).resource
audio_adaptation_set = AudioAdaptationSet(lang='en')
audio_adaptation_set = bitmovin.manifests.DASH.add_audio_adaptation_set(object_=audio_adaptation_set,
manifest_id=dash_manifest.id,
period_id=period.id).resource
for fmp4_muxing in fmp4_muxings:
fmp4_representation = FMP4Representation(FMP4RepresentationType.TEMPLATE,
encoding_id=encoding_id,
muxing_id=fmp4_muxing.get('muxing').id,
segment_path=fmp4_muxing.get('segment_path'))
if fmp4_muxing.get('type') == 'audio':
adapation_set_id = audio_adaptation_set.id
else:
adapation_set_id = video_adaptation_set.id
fmp4_representation = bitmovin.manifests.DASH.add_fmp4_representation(
object_=fmp4_representation,
manifest_id=dash_manifest.id,
period_id=period.id,
adaptationset_id=adapation_set_id
).resource
return dash_manifest
def main():
https_input = HTTPSInput(name='create_simple_encoding HTTPS input', host=HTTPS_INPUT_HOST)
https_input = bitmovin.inputs.HTTPS.create(https_input).resource
s3_output = S3Output(access_key=S3_OUTPUT_ACCESSKEY,
secret_key=S3_OUTPUT_SECRETKEY,
bucket_name=S3_OUTPUT_BUCKETNAME,
name='Sample S3 Output')
s3_output = bitmovin.outputs.S3.create(s3_output).resource
encoding = Encoding(name='example mov encoding',
cloud_region=CloudRegion.GOOGLE_EUROPE_WEST_1,
encoder_version='BETA')
encoding = bitmovin.encodings.Encoding.create(encoding).resource
video_input_stream = StreamInput(input_id=https_input.id,
input_path=HTTPS_INPUT_PATH,
selection_mode=SelectionMode.AUTO)
audio_input_stream = StreamInput(input_id=https_input.id,
input_path=HTTPS_INPUT_PATH,
selection_mode=SelectionMode.AUTO)
create_fmp4_muxings(encoding_id=encoding.id,
video_input_stream=video_input_stream,
audio_input_stream=audio_input_stream,
s3_output_id=s3_output.id)
mjpeg_codec_config = MJPEGCodecConfiguration(name='mjpeg codec configuration',
q_scale=2,
rate=1.0)
mjpeg_codec_config = bitmovin.codecConfigurations.MJPEG.create(mjpeg_codec_config).resource
video_stream = Stream(codec_configuration_id=mjpeg_codec_config.id,
input_streams=[video_input_stream],
name='Sample Stream 1080p')
video_stream = bitmovin.encodings.Stream.create(object_=video_stream,
encoding_id=encoding.id).resource
video_muxing_stream = MuxingStream(video_stream.id)
acl_entry = ACLEntry(permission=ACLPermission.PUBLIC_READ)
mov_muxing_output = EncodingOutput(output_id=s3_output.id,
output_path=OUTPUT_BASE_PATH,
acl=[acl_entry])
mov_muxing = ProgressiveMOVMuxing(streams=[video_muxing_stream],
filename='myKeyframeArchive.mov',
outputs=[mov_muxing_output],
name='Sample Progressive MOV Muxing',
description='This is a Progressive MOV muxing')
mov_muxing = bitmovin.encodings.Muxing.ProgressiveMOV.create(object_=mov_muxing,
encoding_id=encoding.id).resource
bitmovin.encodings.Encoding.start(encoding_id=encoding.id)
try:
bitmovin.encodings.Encoding.wait_until_finished(encoding_id=encoding.id)
except BitmovinError as bitmovin_error:
print("Exception occurred while waiting for encoding to finish: {}".format(bitmovin_error))
dash_manifest = create_dash_manifest(encoding_id=encoding.id, s3_output_id=s3_output.id)
bitmovin.manifests.DASH.start(manifest_id=dash_manifest.id)
try:
bitmovin.manifests.DASH.wait_until_finished(manifest_id=dash_manifest.id)
except BitmovinError as bitmovin_error:
print("Exception occurred while waiting for manifest creation to finish: {}".format(bitmovin_error))
if __name__ == '__main__':
main()
| 46.0375 | 119 | 0.603403 |
7941b130d7dc819b5a50c46b65945c030617b3b2 | 3,634 | py | Python | mother_adp.py | dimelab-public/MTConnect-Testbed-Simulator-Public | 4b42052953d042418ddecbd5ed8608ccbdbaa189 | [
"MIT"
] | null | null | null | mother_adp.py | dimelab-public/MTConnect-Testbed-Simulator-Public | 4b42052953d042418ddecbd5ed8608ccbdbaa189 | [
"MIT"
] | null | null | null | mother_adp.py | dimelab-public/MTConnect-Testbed-Simulator-Public | 4b42052953d042418ddecbd5ed8608ccbdbaa189 | [
"MIT"
] | 1 | 2019-12-09T14:52:18.000Z | 2019-12-09T14:52:18.000Z | # -*- coding: utf-8 -*-
import pandas as pd
import random
from math import floor
master = pd.read_csv('dbs\master.csv')
#////////////////////////////////////////////////""
file = open("parameters.txt","r+")
parameters=dict()
lines = file.readlines()
for i in lines:
temp=i.split("=")
parameters[temp[0]]=temp[1]
file.close()
Dburl ="mongodb://localhost:"+str(parameters["MongoPort"])
######################################
import pymongo
myclient = pymongo.MongoClient(Dburl)
mydb = myclient["mtconnectdatabase"]
mycol = mydb["orders"]
orders = []
orderType=['A','B','C','D','E','E2','F','F4','GZ','G']
#////////////////////////////////////////////////""
orgs = list(master['names'])
machs = list(master['machines'])
cu = list(master['CapacityUtil'])
ports =list(master['agentPorts'])
batchNumbers=random.sample(range(20000,100000),(sum(machs)))
indexBatch=0
for i in range(len(orgs)):
path='temp_folder/'+str(orgs[i])+'/adapters/'
machines=machs[i]
capacity=cu[i]
Aport=ports[i]
ON=floor((capacity*machines)/100)
count=1
pathOrg='temp_folder/'+str(orgs[i])+'/'
batch = open(pathOrg+'batchStart.bat','w+')
kill = open(pathOrg+'batchKill.bat','w+')
for m in range(machines):
#//////
t=random.sample(orderType,1)
name='Mach'+str(m+1)
MID=str(i+1)+name
order= { "_id": MID, "QTY": random.randint(100,4000), "PartID": t[0],"Order":batchNumbers[indexBatch]}
orders.append(order)
#/////
mId=str(m+1)
fpath=path+'Mach'+mId+'.py'
f = open(fpath,'w')
port= Aport+m+1
if count<=ON:
mState = 'ON'
count+=1
else:
mState='OFF'
count+=1
indexBatch+=1
weed='{ "_id": M }'
f.write(f'''import socket
import time
import random
HOST = '127.0.0.1'
PORT = {port}
import pymongo
myclient = pymongo.MongoClient("{Dburl}")
mydb = myclient["mtconnectdatabase"]
mycol = mydb["orders"]
M="{MID}"
myquery = {weed}
while True:
mydoc = mycol.find(myquery)
for x in mydoc:
order= ((x['Order']))
QTY=((x['QTY']))
PartID=((x['PartID']))
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
s.connect((HOST, PORT))
data=random.randint(50,95)
breakdown=random.paretovariate(1)
bd=0
c=0
if isinstance(order,str):
ran = "{name}|OEE|"+"UNAVAILABLE"+"|power|UNAVAILABLE"+"|Idle|Empty|UNAVAILABLE"
if isinstance(order,int):
if('{mState}'=='OFF'):
data=0
if('{mState}'!='OFF') and (breakdown>=2) and (breakdown<=500):
ran = "{name}|OEE|"+str(data)+"|power|Cutting|"+str(order)+"|"+str(QTY)+"|"+PartID
c=1
if(c==0):
ran = "{name}|OEE|"+str(data)+"|power|{mState}|"+str(order)+"|"+str(QTY)+"|"+PartID
if breakdown>500:
ran = "{name}|OEE|"+"UNAVAILABLE"+"|power|BREAKDOWN|"+str(order)+"|"+str(QTY)+"|"+PartID
bd=1
time.sleep(1)
strin=bytes(ran,encoding='utf-8')
s.sendall(strin)
if(bd==1):
repairTime=random.randint(50,100)
while repairTime>=1:
time.sleep(1)
repairTime-=1
bd=0
''')
id="O"+str(i)+"M"+str(m)
batch.write(f'start /min "[{id}]" python adapters/Mach{mId}.py\n')
kill.write(f'taskkill /f /FI "WINDOWTITLE eq [{id}]\n')
batch.close()
kill.close()
f.close()
x = mycol.insert_many(orders)
| 27.740458 | 110 | 0.524216 |
7941b3bbc3287c5506969b4e7dd66c1e883f6e8b | 15,257 | py | Python | spinup/algos/pytorch/sac/sac.py | yangyujie-jack/spinningup | 794f6e824450ec3b20abac6e0b8405d4b065e7dd | [
"MIT"
] | null | null | null | spinup/algos/pytorch/sac/sac.py | yangyujie-jack/spinningup | 794f6e824450ec3b20abac6e0b8405d4b065e7dd | [
"MIT"
] | null | null | null | spinup/algos/pytorch/sac/sac.py | yangyujie-jack/spinningup | 794f6e824450ec3b20abac6e0b8405d4b065e7dd | [
"MIT"
] | null | null | null | from copy import deepcopy
import itertools
import numpy as np
import torch
from torch.optim import Adam
import gym
# import time
import spinup.algos.pytorch.sac.core as core
# from spinup.utils.logx import EpochLogger
from spinup.utils.logger import Logger
class ReplayBuffer:
"""
A simple FIFO experience replay buffer for SAC agents.
"""
def __init__(self, obs_dim, act_dim, size):
self.obs_buf = np.zeros(core.combined_shape(size, obs_dim), dtype=np.float32)
self.obs2_buf = np.zeros(core.combined_shape(size, obs_dim), dtype=np.float32)
self.act_buf = np.zeros(core.combined_shape(size, act_dim), dtype=np.float32)
self.rew_buf = np.zeros(size, dtype=np.float32)
self.done_buf = np.zeros(size, dtype=np.float32)
self.ptr, self.size, self.max_size = 0, 0, size
def store(self, obs, act, rew, next_obs, done):
self.obs_buf[self.ptr] = obs
self.obs2_buf[self.ptr] = next_obs
self.act_buf[self.ptr] = act
self.rew_buf[self.ptr] = rew
self.done_buf[self.ptr] = done
self.ptr = (self.ptr+1) % self.max_size
self.size = min(self.size+1, self.max_size)
def sample_batch(self, batch_size=32):
idxs = np.random.randint(0, self.size, size=batch_size)
batch = dict(obs=self.obs_buf[idxs],
obs2=self.obs2_buf[idxs],
act=self.act_buf[idxs],
rew=self.rew_buf[idxs],
done=self.done_buf[idxs])
return {k: torch.as_tensor(v, dtype=torch.float32) for k,v in batch.items()}
def sac(env_fn, actor_critic=core.MLPActorCritic, ac_kwargs=dict(), seed=0,
steps_per_epoch=4000, epochs=100, replay_size=int(1e6), gamma=0.99,
polyak=0.995, lr=1e-3, alpha=0.2, batch_size=100, start_steps=10000,
update_after=1000, update_every=50, num_test_episodes=10, max_ep_len=1000,
logger_kwargs=dict(), save_freq=1):
"""
Soft Actor-Critic (SAC)
Args:
env_fn : A function which creates a copy of the environment.
The environment must satisfy the OpenAI Gym API.
actor_critic: The constructor method for a PyTorch Module with an ``act``
method, a ``pi`` module, a ``q1`` module, and a ``q2`` module.
The ``act`` method and ``pi`` module should accept batches of
observations as inputs, and ``q1`` and ``q2`` should accept a batch
of observations and a batch of actions as inputs. When called,
``act``, ``q1``, and ``q2`` should return:
=========== ================ ======================================
Call Output Shape Description
=========== ================ ======================================
``act`` (batch, act_dim) | Numpy array of actions for each
| observation.
``q1`` (batch,) | Tensor containing one current estimate
| of Q* for the provided observations
| and actions. (Critical: make sure to
| flatten this!)
``q2`` (batch,) | Tensor containing the other current
| estimate of Q* for the provided observations
| and actions. (Critical: make sure to
| flatten this!)
=========== ================ ======================================
Calling ``pi`` should return:
=========== ================ ======================================
Symbol Shape Description
=========== ================ ======================================
``a`` (batch, act_dim) | Tensor containing actions from policy
| given observations.
``logp_pi`` (batch,) | Tensor containing log probabilities of
| actions in ``a``. Importantly: gradients
| should be able to flow back into ``a``.
=========== ================ ======================================
ac_kwargs (dict): Any kwargs appropriate for the ActorCritic object
you provided to SAC.
seed (int): Seed for random number generators.
steps_per_epoch (int): Number of steps of interaction (state-action pairs)
for the agent and the environment in each epoch.
epochs (int): Number of epochs to run and train agent.
replay_size (int): Maximum length of replay buffer.
gamma (float): Discount factor. (Always between 0 and 1.)
polyak (float): Interpolation factor in polyak averaging for target
networks. Target networks are updated towards main networks
according to:
.. math:: \\theta_{\\text{targ}} \\leftarrow
\\rho \\theta_{\\text{targ}} + (1-\\rho) \\theta
where :math:`\\rho` is polyak. (Always between 0 and 1, usually
close to 1.)
lr (float): Learning rate (used for both policy and value learning).
alpha (float): Entropy regularization coefficient. (Equivalent to
inverse of reward scale in the original SAC paper.)
batch_size (int): Minibatch size for SGD.
start_steps (int): Number of steps for uniform-random action selection,
before running real policy. Helps exploration.
update_after (int): Number of env interactions to collect before
starting to do gradient descent updates. Ensures replay buffer
is full enough for useful updates.
update_every (int): Number of env interactions that should elapse
between gradient descent updates. Note: Regardless of how long
you wait between updates, the ratio of env steps to gradient steps
is locked to 1.
num_test_episodes (int): Number of episodes to test the deterministic
policy at the end of each epoch.
max_ep_len (int): Maximum length of trajectory / episode / rollout.
logger_kwargs (dict): Keyword args for EpochLogger.
save_freq (int): How often (in terms of gap between epochs) to save
the current policy and value function.
"""
# logger = EpochLogger(**logger_kwargs)
# logger.save_config(locals())
logger = Logger()
torch.manual_seed(seed)
np.random.seed(seed)
env, test_env = env_fn(), env_fn()
obs_dim = env.observation_space.shape
act_dim = env.action_space.shape[0]
# Action limit for clamping: critically, assumes all dimensions share the same bound!
# act_limit = env.action_space.high[0]
# Create actor-critic module and target networks
ac = actor_critic(env.observation_space, env.action_space, **ac_kwargs)
ac_targ = deepcopy(ac)
# Freeze target networks with respect to optimizers (only update via polyak averaging)
for p in ac_targ.parameters():
p.requires_grad = False
# List of parameters for both Q-networks (save this for convenience)
q_params = itertools.chain(ac.q1.parameters(), ac.q2.parameters())
# Experience buffer
replay_buffer = ReplayBuffer(obs_dim=obs_dim, act_dim=act_dim, size=replay_size)
# Count variables (protip: try to get a feel for how different size networks behave!)
# var_counts = tuple(core.count_vars(module) for module in [ac.pi, ac.q1, ac.q2])
# logger.log('\nNumber of parameters: \t pi: %d, \t q1: %d, \t q2: %d\n'%var_counts)
# Set up function for computing SAC Q-losses
def compute_loss_q(data):
o, a, r, o2, d = data['obs'], data['act'], data['rew'], data['obs2'], data['done']
q1 = ac.q1(o,a)
q2 = ac.q2(o,a)
# Bellman backup for Q functions
with torch.no_grad():
# Target actions come from *current* policy
a2, logp_a2 = ac.pi(o2)
# Target Q-values
q1_pi_targ = ac_targ.q1(o2, a2)
q2_pi_targ = ac_targ.q2(o2, a2)
q_pi_targ = torch.min(q1_pi_targ, q2_pi_targ)
backup = r + gamma * (1 - d) * (q_pi_targ - alpha * logp_a2)
# MSE loss against Bellman backup
loss_q1 = ((q1 - backup)**2).mean()
loss_q2 = ((q2 - backup)**2).mean()
loss_q = loss_q1 + loss_q2
# Useful info for logging
q_info = dict(Q1=q1.mean().item(), Q2=q2.mean().item())
return loss_q, q_info
# Set up function for computing SAC pi loss
def compute_loss_pi(data):
o = data['obs']
pi, logp_pi = ac.pi(o)
q1_pi = ac.q1(o, pi)
q2_pi = ac.q2(o, pi)
q_pi = torch.min(q1_pi, q2_pi)
# Entropy-regularized policy loss
loss_pi = (alpha * logp_pi - q_pi).mean()
# Useful info for logging
pi_info = dict(Entropy=-logp_pi.mean().item())
return loss_pi, pi_info
# Set up optimizers for policy and q-function
pi_optimizer = Adam(ac.pi.parameters(), lr=lr)
q_optimizer = Adam(q_params, lr=lr)
# Set up model saving
# logger.setup_pytorch_saver(ac)
def update(data):
# First run one gradient descent step for Q1 and Q2
q_optimizer.zero_grad()
loss_q, q_info = compute_loss_q(data)
loss_q.backward()
q_optimizer.step()
# Record things
# logger.store(LossQ=loss_q.item(), **q_info)
# Freeze Q-networks so you don't waste computational effort
# computing gradients for them during the policy learning step.
for p in q_params:
p.requires_grad = False
# Next run one gradient descent step for pi.
pi_optimizer.zero_grad()
loss_pi, pi_info = compute_loss_pi(data)
loss_pi.backward()
pi_optimizer.step()
# Unfreeze Q-networks so you can optimize it at next DDPG step.
for p in q_params:
p.requires_grad = True
# Record things
# logger.store(LossPi=loss_pi.item(), **pi_info)
# Finally, update target networks by polyak averaging.
with torch.no_grad():
for p, p_targ in zip(ac.parameters(), ac_targ.parameters()):
# NB: We use an in-place operations "mul_", "add_" to update target
# params, as opposed to "mul" and "add", which would make new tensors.
p_targ.data.mul_(polyak)
p_targ.data.add_((1 - polyak) * p.data)
logger.add_scalar(QLoss=loss_q, PolicyLoss=loss_pi, **q_info, **pi_info, tag='algorithm')
def get_action(o, deterministic=False):
return ac.act(torch.as_tensor(o, dtype=torch.float32),
deterministic)
def test_agent():
for j in range(num_test_episodes):
o, d, ep_ret, ep_len = test_env.reset(), False, 0, 0
while not(d or (ep_len == max_ep_len)):
# Take deterministic actions at test time
o, r, d, _ = test_env.step(get_action(o, True))
ep_ret += r
ep_len += 1
# logger.store(TestEpRet=ep_ret, TestEpLen=ep_len)
# Prepare for interaction with environment
total_steps = steps_per_epoch * epochs
# start_time = time.time()
o, ep_ret, ep_len = env.reset(), 0, 0
# Main loop: collect experience in env and update/log each epoch
for t in range(total_steps):
# Until start_steps have elapsed, randomly sample actions
# from a uniform distribution for better exploration. Afterwards,
# use the learned policy.
if t > start_steps:
a = get_action(o)
else:
a = env.action_space.sample()
# Step the env
o2, r, d, _ = env.step(a)
ep_ret += r
ep_len += 1
# Ignore the "done" signal if it comes from hitting the time
# horizon (that is, when it's an artificial terminal signal
# that isn't based on the agent's state)
d = False if ep_len == max_ep_len else d
# Store experience to replay buffer
replay_buffer.store(o, a, r, o2, d)
# Super critical, easy to overlook step: make sure to update
# most recent observation!
o = o2
# End of trajectory handling
if d or (ep_len == max_ep_len):
# logger.store(EpRet=ep_ret, EpLen=ep_len)
logger.add_scalar(EpRet=ep_ret, EpLen=ep_len, tag='env')
o, ep_ret, ep_len = env.reset(), 0, 0
# Update handling
if t >= update_after and t % update_every == 0:
for j in range(update_every):
batch = replay_buffer.sample_batch(batch_size)
update(data=batch)
# End of epoch handling
# if (t+1) % steps_per_epoch == 0:
# epoch = (t+1) // steps_per_epoch
#
# # Save model
# # if (epoch % save_freq == 0) or (epoch == epochs):
# # logger.save_state({'env': env}, None)
#
# # Test the performance of the deterministic version of the agent.
# # test_agent()
#
# # Log info about epoch
# # logger.log_tabular('Epoch', epoch)
# # logger.log_tabular('EpRet', with_min_and_max=True)
# # logger.log_tabular('TestEpRet', with_min_and_max=True)
# # logger.log_tabular('EpLen', average_only=True)
# # logger.log_tabular('TestEpLen', average_only=True)
# # logger.log_tabular('TotalEnvInteracts', t)
# # logger.log_tabular('Q1Vals', with_min_and_max=True)
# # logger.log_tabular('Q2Vals', with_min_and_max=True)
# # logger.log_tabular('LogPi', with_min_and_max=True)
# # logger.log_tabular('LossPi', average_only=True)
# # logger.log_tabular('LossQ', average_only=True)
# # logger.log_tabular('Time', time.time()-start_time)
# # logger.dump_tabular()
if (t + 1) % 100 == 0:
print(f'env step {t + 1}')
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--env', type=str, default='Pendulum-v0')
parser.add_argument('--hid', type=int, default=256)
parser.add_argument('--l', type=int, default=2)
parser.add_argument('--gamma', type=float, default=0.99)
parser.add_argument('--seed', '-s', type=int, default=0)
parser.add_argument('--epochs', type=int, default=50)
parser.add_argument('--exp_name', type=str, default='sac')
args = parser.parse_args()
# from spinup.utils.run_utils import setup_logger_kwargs
# logger_kwargs = setup_logger_kwargs(args.exp_name, args.seed)
torch.set_num_threads(torch.get_num_threads())
sac(lambda: gym.make(args.env), actor_critic=core.MLPActorCritic,
ac_kwargs=dict(hidden_sizes=[args.hid]*args.l),
gamma=args.gamma, seed=args.seed, epochs=args.epochs, max_ep_len=200)
| 40.685333 | 97 | 0.577178 |
7941b47491b8a6794f599df8e11505978523a136 | 162 | py | Python | src/domain/use_cases/__init__.py | panda-coder/py-clean-flask | e7b8af5056178cd1dc6161f52a909f8043dc4b66 | [
"MIT"
] | null | null | null | src/domain/use_cases/__init__.py | panda-coder/py-clean-flask | e7b8af5056178cd1dc6161f52a909f8043dc4b66 | [
"MIT"
] | null | null | null | src/domain/use_cases/__init__.py | panda-coder/py-clean-flask | e7b8af5056178cd1dc6161f52a909f8043dc4b66 | [
"MIT"
] | null | null | null | __all__ = ['Sum', 'Subtract', 'Multiply', 'Divide']
from .sum import Sum
from .subtract import Subtract
from .multiply import Multiply
from .divide import Divide | 27 | 51 | 0.753086 |
7941b4849565fedc8bcf58146b78f4b338d978d9 | 693 | py | Python | src/opencv/17_smoothing-blurring-images.py | prakashdale/prakashdale.github.io | 7e23a4f5d8c9c64bcc910ef6e0b7337a65feeff2 | [
"MIT"
] | null | null | null | src/opencv/17_smoothing-blurring-images.py | prakashdale/prakashdale.github.io | 7e23a4f5d8c9c64bcc910ef6e0b7337a65feeff2 | [
"MIT"
] | null | null | null | src/opencv/17_smoothing-blurring-images.py | prakashdale/prakashdale.github.io | 7e23a4f5d8c9c64bcc910ef6e0b7337a65feeff2 | [
"MIT"
] | null | null | null | import cv2
import numpy as np
from matplotlib import pyplot as plt
img = cv2.imread('saltandpeppernoise.jpg')
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
kernal = np.ones((5,5), np.float)/25
dst = cv2.filter2D(img, -1, kernal) #homogeneous filter
blur = cv2.blur(img, (5, 5))
gblur = cv2.GaussianBlur(img, (5,5), 0)
median = cv2.medianBlur(img, 5)
bilateral = cv2.bilateralFilter(img, 9, 75, 75)
titles = ['image', '2D Convolution', 'Blur', 'GBlur', 'median', 'bilateral']
images = [img, dst, blur, gblur, median, bilateral]
for i in range(len(images)):
plt.subplot(2, 3, i+1)
plt.imshow(images[i], 'gray')
plt.title(titles[i])
plt.xticks([])
plt.yticks([])
plt.show()
| 26.653846 | 76 | 0.66811 |
7941b486af3e6619ab735083b37ae8a270435969 | 4,324 | py | Python | clinicaltrials/frontend/custom_rest_views.py | chadmiller/clinicaltrials-act-tracker | d16f5ff7b1fde673e7b00cd674666a19b19bf092 | [
"MIT"
] | null | null | null | clinicaltrials/frontend/custom_rest_views.py | chadmiller/clinicaltrials-act-tracker | d16f5ff7b1fde673e7b00cd674666a19b19bf092 | [
"MIT"
] | null | null | null | clinicaltrials/frontend/custom_rest_views.py | chadmiller/clinicaltrials-act-tracker | d16f5ff7b1fde673e7b00cd674666a19b19bf092 | [
"MIT"
] | null | null | null | """Custom django-rest-framework views to support API representation of
our data.
The names of the ordering and search fields are coupled to names
expected to be used by the DataTables javascript library (see
`site.js`).
See also custom_rest_backends.py
"""
from django.db.models import Count
from django.db.models import Q
from rest_framework import serializers
from rest_framework import viewsets
from rest_framework.urlpatterns import format_suffix_patterns
from .custom_filters import RankingFilter
from .custom_filters import TrialStatusFilter
from .custom_filters import SponsorFilter
from frontend.models import Ranking
from frontend.models import Sponsor
from frontend.models import Trial
class IsIndustrySponsorField(serializers.RelatedField):
def to_representation(self, value):
return value.is_industry_sponsor
# Serializers define the API representation.
class RankingSerializer(serializers.HyperlinkedModelSerializer):
sponsor_name = serializers.StringRelatedField(source='sponsor')
sponsor_slug = serializers.SlugRelatedField(
source='sponsor', read_only=True, slug_field='slug')
is_industry_sponsor = IsIndustrySponsorField(read_only=True, source='sponsor')
class Meta:
model = Ranking
fields = ('date', 'rank', 'due', 'reported', 'total', 'percentage',
'sponsor_name', 'is_industry_sponsor', 'sponsor_slug')
class TrialSerializer(serializers.HyperlinkedModelSerializer):
sponsor_name = serializers.StringRelatedField(source='sponsor')
sponsor_slug = serializers.SlugRelatedField(
source='sponsor', read_only=True, slug_field='slug')
class Meta:
model = Trial
fields = ('registry_id', 'publication_url', 'title', 'has_exemption',
'start_date', 'completion_date', 'has_results', 'results_due',
'sponsor_name', 'sponsor_slug', 'status', 'is_pact', 'days_late',)
class SponsorSerializer(serializers.HyperlinkedModelSerializer):
num_trials = serializers.IntegerField()
class Meta:
model = Sponsor
fields = ('slug', 'name', 'is_industry_sponsor', 'updated_date', 'num_trials')
class CSVNonPagingViewSet(viewsets.ModelViewSet):
"""A viewset that allows downloading a CSV in its entirety, rather
than in pages.
"""
@property
def paginator(self):
"""Overrides paginator lookup in base class
"""
if getattr(self, '_skip_paginator', False):
p = None
else:
p = super(CSVNonPagingViewSet, self).paginator
return p
def list(self, request, format=None):
"""Overrides method in base class
"""
if request.accepted_renderer.media_type == 'text/csv':
self._skip_paginator = True
result = super(CSVNonPagingViewSet, self).list(request, format=format)
self._skip_paginator = False
else:
result = super(CSVNonPagingViewSet, self).list(request, format=format)
return result
class RankingViewSet(CSVNonPagingViewSet):
queryset = Ranking.objects.select_related('sponsor')
serializer_class = RankingSerializer
ordering_fields = ['sponsor__name', 'due', 'reported', 'percentage']
filter_class = RankingFilter
search_fields = ('sponsor__name',)
class TrialViewSet(CSVNonPagingViewSet):
serializer_class = TrialSerializer
ordering_fields = ['status', 'sponsor__name', 'registry_id',
'title', 'completion_date', 'days_late']
filter_class = TrialStatusFilter
search_fields = ('title', 'sponsor__name',)
def get_queryset(self):
"""By default, don't show Trials that are no longer ACTs.
The exception is when using the one filter that does is
interested in such Trials.
"""
if 'is_no_longer_overdue_today' in self.request.GET:
return Trial.objects.select_related('sponsor').all()
return Trial.objects.visible().select_related('sponsor').all()
class SponsorViewSet(CSVNonPagingViewSet):
queryset = Sponsor.objects.annotate(
num_trials=Count(
'trial',
filter=~Q(trial__status=Trial.STATUS_NO_LONGER_ACT)))
serializer_class = SponsorSerializer
filter_class = SponsorFilter
search_fields = ('name',)
| 33.261538 | 86 | 0.70074 |
7941b4c100de80eff5db3374f7eaefbb4f329a12 | 9,175 | py | Python | backbone/deeplabv3_cosnet/deeplabv3.py | lartpang/OpticalFlowBasedVOS | cd4856644b77dd963133797c543aeafb4f402217 | [
"MIT"
] | 6 | 2020-11-19T04:20:52.000Z | 2021-09-24T02:40:11.000Z | backbone/deeplabv3_cosnet/deeplabv3.py | lartpang/OpticalFlowBasedVOS | cd4856644b77dd963133797c543aeafb4f402217 | [
"MIT"
] | null | null | null | backbone/deeplabv3_cosnet/deeplabv3.py | lartpang/OpticalFlowBasedVOS | cd4856644b77dd963133797c543aeafb4f402217 | [
"MIT"
] | 2 | 2020-11-22T02:02:18.000Z | 2021-11-14T08:56:15.000Z | # -*- coding: utf-8 -*-
"""
Created on Sun Sep 16 10:01:14 2018
@author: carri
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
__all__ = ["Res_Deeplab"]
from backbone.utils import load_pretrained_params
model_urls = {"res_deeplabv3": "/home/lart/Datasets/PretainedWeighted/deeplab_davis_12_0.pth"}
affine_par = True
# 区别于siamese_model_concat的地方就是采用的最标准的deeplab_v3的基础网络,然后加上了非对称的分支
def conv3x3(in_planes, out_planes, stride=1):
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias=False)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = nn.BatchNorm2d(planes, affine=affine_par)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = nn.BatchNorm2d(planes, affine=affine_par)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, dilation=1, downsample=None):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(
inplanes, planes, kernel_size=1, stride=stride, bias=False
) # change
self.bn1 = nn.BatchNorm2d(planes, affine=affine_par)
padding = dilation
self.conv2 = nn.Conv2d(
planes,
planes,
kernel_size=3,
stride=1, # change
padding=padding,
bias=False,
dilation=dilation,
)
self.bn2 = nn.BatchNorm2d(planes, affine=affine_par)
self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(planes * 4, affine=affine_par)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class ASPP(nn.Module):
def __init__(self, dilation_series, padding_series, depth):
super(ASPP, self).__init__()
self.mean = nn.AdaptiveAvgPool2d((1, 1))
self.conv = nn.Conv2d(2048, depth, 1, 1)
self.bn_x = nn.BatchNorm2d(depth)
self.conv2d_0 = nn.Conv2d(2048, depth, kernel_size=1, stride=1)
self.bn_0 = nn.BatchNorm2d(depth)
self.conv2d_1 = nn.Conv2d(
2048,
depth,
kernel_size=3,
stride=1,
padding=padding_series[0],
dilation=dilation_series[0],
)
self.bn_1 = nn.BatchNorm2d(depth)
self.conv2d_2 = nn.Conv2d(
2048,
depth,
kernel_size=3,
stride=1,
padding=padding_series[1],
dilation=dilation_series[1],
)
self.bn_2 = nn.BatchNorm2d(depth)
self.conv2d_3 = nn.Conv2d(
2048,
depth,
kernel_size=3,
stride=1,
padding=padding_series[2],
dilation=dilation_series[2],
)
self.bn_3 = nn.BatchNorm2d(depth)
self.relu = nn.ReLU(inplace=True)
self.bottleneck = nn.Conv2d(depth * 5, 256, kernel_size=3, padding=1) # 512 1x1Conv
self.bn = nn.BatchNorm2d(256)
self.prelu = nn.PReLU()
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, 0.01)
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def _make_stage_(self, dilation1, padding1):
Conv = nn.Conv2d(
2048, 256, kernel_size=3, stride=1, padding=padding1, dilation=dilation1, bias=True
)
Bn = nn.BatchNorm2d(256)
Relu = nn.ReLU(inplace=True)
return nn.Sequential(Conv, Bn, Relu)
def forward(self, x):
size = x.shape[2:]
image_features = self.mean(x)
image_features = self.conv(image_features)
image_features = self.bn_x(image_features)
image_features = self.relu(image_features)
image_features = F.interpolate(
image_features, size=size, mode="bilinear", align_corners=True
)
out_0 = self.conv2d_0(x)
out_0 = self.bn_0(out_0)
out_0 = self.relu(out_0)
out_1 = self.conv2d_1(x)
out_1 = self.bn_1(out_1)
out_1 = self.relu(out_1)
out_2 = self.conv2d_2(x)
out_2 = self.bn_2(out_2)
out_2 = self.relu(out_2)
out_3 = self.conv2d_3(x)
out_3 = self.bn_3(out_3)
out_3 = self.relu(out_3)
out = torch.cat([image_features, out_0, out_1, out_2, out_3], 1)
out = self.bottleneck(out)
out = self.bn(out)
out = self.prelu(out)
return out
class ResNet(nn.Module):
def __init__(self, block, layers):
self.inplanes = 64
super(ResNet, self).__init__()
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3, bias=False)
self.bn1 = nn.BatchNorm2d(64, affine=affine_par)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1, ceil_mode=False) # change
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
self.layer3 = self._make_layer(block, 256, layers[2], stride=1, dilation=2)
self.layer4 = self._make_layer(block, 512, layers[3], stride=1, dilation=4)
self.layer5 = self._make_pred_layer(ASPP, [6, 12, 18], [6, 12, 18], 512)
# self.main_classifier = nn.Conv2d(256, num_classes, kernel_size=1)
# self.softmax = nn.Sigmoid() # nn.Softmax()
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, 0.01)
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def _make_layer(self, block, planes, blocks, stride=1, dilation=1):
downsample = None
if (
stride != 1
or self.inplanes != planes * block.expansion
or dilation == 2
or dilation == 4
):
downsample = nn.Sequential(
nn.Conv2d(
self.inplanes,
planes * block.expansion,
kernel_size=1,
stride=stride,
bias=False,
),
nn.BatchNorm2d(planes * block.expansion, affine=affine_par),
)
for i in downsample._modules["1"].parameters():
i.requires_grad = False
layers = []
layers.append(
block(self.inplanes, planes, stride, dilation=dilation, downsample=downsample)
)
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes, dilation=dilation))
return nn.Sequential(*layers)
def _make_pred_layer(self, block, dilation_series, padding_series, num_classes):
return block(dilation_series, padding_series, num_classes)
def forward(self, x):
# input_size = x.size()[2:]
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
print(x.size())
x = self.maxpool(x)
print(x.size())
x = self.layer1(x)
print(x.size())
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.layer5(x)
# x = self.main_classifier(fea)
# x = F.upsample(
# x, input_size, mode="bilinear"
# ) # upsample to the size of input image, scale=8
# x = self.softmax(x)
return x
def Res_Deeplab(pretrained: bool = True) -> nn.Module:
model = ResNet(Bottleneck, [3, 4, 23, 3])
if pretrained:
pretrained_dict = torch.load(model_urls["res_deeplabv3"])
load_pretrained_params(model, pretrained_dict["model"])
return model
if __name__ == "__main__":
m = Res_Deeplab(pretrained=True)
in_data = torch.rand((3, 3, 320, 320))
print(m(in_data).size())
| 32.306338 | 98 | 0.575259 |
7941b54ea6b62157415200f5c5f39f609e13e95e | 1,164 | py | Python | logger/__init__.py | 3lpsy/FactionAPI | e3659c4a1a1cbdefcf6c3a240ee6db1475e3b022 | [
"BSD-3-Clause"
] | null | null | null | logger/__init__.py | 3lpsy/FactionAPI | e3659c4a1a1cbdefcf6c3a240ee6db1475e3b022 | [
"BSD-3-Clause"
] | null | null | null | logger/__init__.py | 3lpsy/FactionAPI | e3659c4a1a1cbdefcf6c3a240ee6db1475e3b022 | [
"BSD-3-Clause"
] | null | null | null | from datetime import datetime
import logging
from os import environ
DEFAULT_LEVEL_STR = environ.get("DEFAULT_LOGGING_LEVEL", "INFO")
DEFAULT_LEVEL = getattr(logging, DEFAULT_LEVEL_STR.upper(), "INFO")
ROOT_LOGGER = logging.getLogger() # get root logger
if environ.get("GUNICORN_SERVER", "gunicorn") == "flask":
ROOT_LOGGER.setLevel(DEFAULT_LEVEL) # set default level if using the flask server
def log(source, message, level="info"):
msg = f"[{str(source)}] - {str(message)}"
if int(environ.get("USE_NATIVE_LOGGER", 0)) == 1:
if not isinstance(level, int):
level = getattr(logging, level.upper(), DEFAULT_LEVEL)
logger = logging.getLogger() # get root logger
if len(logger.handlers) < 1: # if no handlers are passed in, setup default console handler
ch = logging.StreamHandler()
ch.setLevel(DEFAULT_LEVEL)
logger.addHandler(ch)
logger.log(DEFAULT_LEVEL, "[logger] - Setting Up Default Handler at level 'INFO' Becase No Handlers Were Found")
logger.log(level, msg)
else:
print("({0}){1}".format(datetime.now().strftime("%m/%d %H:%M:%S"), msg))
| 40.137931 | 124 | 0.666667 |
7941b5ac5fcb9bc47e2e434387655dc943744c03 | 4,552 | py | Python | parser/fase2/team21/Analisis_Ascendente/Instrucciones/Delete/delete.py | webdev188/tytus | 847071edb17b218f51bb969d335a8ec093d13f94 | [
"MIT"
] | 35 | 2020-12-07T03:11:43.000Z | 2021-04-15T17:38:16.000Z | parser/fase2/team21/Analisis_Ascendente/Instrucciones/Delete/delete.py | webdev188/tytus | 847071edb17b218f51bb969d335a8ec093d13f94 | [
"MIT"
] | 47 | 2020-12-09T01:29:09.000Z | 2021-01-13T05:37:50.000Z | parser/fase2/team21/Analisis_Ascendente/Instrucciones/Delete/delete.py | webdev188/tytus | 847071edb17b218f51bb969d335a8ec093d13f94 | [
"MIT"
] | 556 | 2020-12-07T03:13:31.000Z | 2021-06-17T17:41:10.000Z | #from Instrucciones.instruccion import Instruccion
from tytus.parser.fase2.team21.Analisis_Ascendente.Instrucciones.expresion import Id
from tytus.parser.fase2.team21.Analisis_Ascendente.Instrucciones.instruccion import Instruccion
from tytus.parser.fase2.team21.Analisis_Ascendente.Instrucciones.Expresiones.Expresion import Expresion
#from storageManager.jsonMode import *
from tytus.parser.fase2.team21.Analisis_Ascendente.storageManager.jsonMode import *
#import Tabla_simbolos.TablaSimbolos as ts
import tytus.parser.fase2.team21.Analisis_Ascendente.Tabla_simbolos.TablaSimbolos as TS
#DELETE
class Delete(Instruccion):
def __init__(self,caso, id, where,concatena,fila,columna):
self.caso = caso
self.id = id
self.where = where
self.concatena = concatena
self.fila = fila
self.columna = columna
def ejecutar(deleteData, ts, consola, exceptions):
#insert('test','tbventa',[1,4,'2020-10-12',450,'False','Venta de bomba de agua para toyota'])
#insert('test','tbventa',[2,4,'2020-10-12',450,'False','Venta de bomba de agua para toyota'])
if ts.validar_sim("usedatabase1234") == 1:
# nombre de la bd
bdactual = ts.buscar_sim("usedatabase1234")
# se busca el simbolo y por lo tanto se pide el entorno de la bd
BD = ts.buscar_sim(bdactual.valor)
entornoBD = BD.Entorno
print(deleteData.id," -> nombre tabla")
if entornoBD.validar_sim(deleteData.id) == 1:
simbolo_tabla = entornoBD.buscar_sim(deleteData.id)
if deleteData.where == None:
truncate(BD.id,simbolo_tabla.id)
else:
datoiz= deleteData.where.iz
datodr = deleteData.where.dr
operador = deleteData.where.operador
resultado = Expresion.Resolver(datodr,ts,consola,exceptions)
#consola.append(f" El resultado es: {str(resultado)}")
print("el nombre del campo es: ",datoiz.id)
if simbolo_tabla.Entorno.validar_sim(datoiz.id) == 1:
campos = simbolo_tabla.Entorno.simbolos
i =0
data = []
data.append(resultado)
if delete(BD.id,simbolo_tabla.id,data) == 0:
consola.append(f"Delete from {simbolo_tabla.id} exitosamente")
else:
consola.append(
f"22005-error_in_assignment no existe la llave en tabla {simbolo_tabla.id}-{deleteData.fila}-{deleteData.columna}")
exceptions.append(
f"Error semantico-22005-error_in_assignment no existe la columna en tabla {simbolo_tabla.id}-{deleteData.fila}-{deleteData.columna}")
else:
consola.append(
f"Error semantico-22005-error_in_assignment no existe la columna en tabla {simbolo_tabla.id}-{deleteData.fila}-{deleteData.columna}")
exceptions.append(
f"Error semantico-22005-error_in_assignment no existe la columna en tabla {simbolo_tabla.id}-{deleteData.fila}-{deleteData.columna}")
print("ejecuntando un delete")
else:
consola.append(f"42P01 undefined_table, no existe la tabla {deleteData.id}")
exceptions.append(f"Error semantico-42P01- 42P01 undefined_table, no existe la tabla {deleteData.id}-{deleteData.fila}-{deleteData.columna}")
else:
consola.append("22005 error_in_assignment, No se ha seleccionado una BD\n")
consola.append(
f"Error semantico-22005-error_in_assignment No se ha seleccionado DB-{deleteData.fila}-{deleteData.columna}")
exceptions.append(f"Error semantico-22005-error_in_assignment No se ha seleccionado DB-{deleteData.fila}-{deleteData.columna}")
print("ejecuntando un delete")
def traducir(instr, ts, consola, exceptions,tv):
info = ""
for data in instr.concatena:
info += " " + data
contador = tv.Temp()
consola.append(f"\n\t{contador} = \"{info}\"")
contador2 = tv.Temp()
consola.append(f"\n\t{contador2} = T({contador})")
consola.append(f"\n\tT1 = T3({contador2})")
consola.append(f"\n\tstack.append(T1)\n") | 44.627451 | 166 | 0.607645 |
7941b6e08a6b23944762115eda171337a24a71b2 | 4,146 | py | Python | pesummary/conf/configuration.py | pesummary/pesummary | 99e3c450ecbcaf5a23564d329bdf6e0080f6f2a8 | [
"MIT"
] | 1 | 2021-08-03T05:58:20.000Z | 2021-08-03T05:58:20.000Z | pesummary/conf/configuration.py | pesummary/pesummary | 99e3c450ecbcaf5a23564d329bdf6e0080f6f2a8 | [
"MIT"
] | 1 | 2020-06-13T13:29:35.000Z | 2020-06-15T12:45:04.000Z | pesummary/conf/configuration.py | pesummary/pesummary | 99e3c450ecbcaf5a23564d329bdf6e0080f6f2a8 | [
"MIT"
] | 3 | 2021-07-08T08:31:28.000Z | 2022-03-31T14:08:58.000Z | import numpy as np
import pkg_resources
import os
# matplotlib style file
_path = pkg_resources.resource_filename("pesummary", "conf")
style_file = os.path.join(_path, "matplotlib_rcparams.sty")
# checkpoint file
checkpoint_dir = lambda webdir: os.path.join(webdir, "checkpoint")
resume_file = "pesummary_resume.pickle"
# Overwrite message
overwrite = "Overwriting {} from {} to {}"
# The palette to be used to distinguish result files
palette = "colorblind"
# Include the prior on the posterior plots
include_prior = False
# The user that submitted the job
user = "albert.einstein"
# The number of samples to disregard as burnin
burnin = 0
# The method to use to remove the samples as burnin
burnin_method = "burnin_by_step_number"
# delimiter to use when saving files to dat with np.savetxt
delimiter = "\t"
# Minimum length of h5 dataset for compression. Compressing small datasets can
# lead to an increased file size
compression_min_length = 1
# Plot 1d kdes rather than 1d histograms
kde_plot = False
# color for non-comparison plots
color = 'b'
# color cycle for different mcmc chains
colorcycle = "brgkmc"
# color cycle for different cmaps
cmapcycle = ["YlOrBr", "Blues", "Purples", "Greens", "PuRd", "inferno"]
# color for injection lines
injection_color = 'orange'
# color for prior histograms
prior_color = 'k'
# Produce public facing summarypages
public = False
# Number of cores to run on
multi_process = 1
# Default f_low to use for GW specific conversions
default_flow = 20.0
# Default f_final to use for GW specific conversions
default_f_final = 1024.0
# Default delta_f to use for GW specific conversions
default_delta_f = 1. / 256
# Standard meta_data names
log_evidence = "ln_evidence"
evidence = "evidence"
log_evidence_error = "ln_evidence_error"
log_bayes_factor = "ln_bayes_factor"
bayes_factor = "bayes_factor"
log_noise_evidence = "ln_noise_evidence"
log_prior_volume = "ln_prior_volume"
# corner.corner colors
corner_colors = ['#0072C1', '#b30909', '#8809b3', '#b37a09']
# corner.corner default kwargs
corner_kwargs = dict(
bins=50, smooth=0.9, label_kwargs=dict(fontsize=16),
title_kwargs=dict(fontsize=16), color=corner_colors[0],
truth_color='tab:orange', quantiles=[0.16, 0.84],
levels=(1 - np.exp(-0.5), 1 - np.exp(-2), 1 - np.exp(-9 / 2.)),
plot_density=False, plot_datapoints=True, fill_contours=True,
max_n_ticks=3
)
# Parameters to use for GW corner plot
gw_corner_parameters = [
"luminosity_distance", "dec", "a_2", "a_1", "geocent_time", "phi_jl",
"psi", "ra", "phase", "mass_2", "mass_1", "phi_12", "tilt_2", "iota",
"tilt_1", "chi_p", "chirp_mass", "mass_ratio", "symmetric_mass_ratio",
"total_mass", "chi_eff", "redshift", "mass_1_source", "mass_2_source",
"total_mass_source", "chirp_mass_source", "lambda_1", "lambda_2",
"delta_lambda", "lambda_tilde", "log_likelihood"
]
# Parameters to use for GW source frame corner plot
gw_source_frame_corner_parameters = [
"luminosity_distance", "mass_1_source", "mass_2_source",
"total_mass_source", "chirp_mass_source", "redshift"
]
# List of precessing angles
precessing_angles = [
"cos_tilt_1", "cos_tilt_2", "tilt_1", "tilt_2", "phi_12", "phi_jl"
]
# List of precessing spins
precessing_spins = ["spin_1x", "spin_1y", "spin_2x", "spin_2y"]
# Parameters to use for GW extrinsic corner plot
gw_extrinsic_corner_parameters = ["luminosity_distance", "psi", "ra", "dec"]
# Cosmology to use when calculating redshift
cosmology = "Planck15"
# Analytic PSD to use for conversions when no PSD file is provided
psd = "aLIGOZeroDetHighPower"
# GraceDB service url to use
gracedb_server = "https://gracedb.ligo.org/api/"
# Information required for reproducing a GW analysis
gw_reproducibility = ["config", "psd"]
# Additional 1d histogram pages that combine multiple GW marginalized posterior
# distributions
additional_1d_pages = {
"precession": ["chi_p", "chi_p_2spin", "network_precessing_snr", "beta"],
"subdominant_multipoles": [
"network_matched_filter_snr", "network_21_multipole_snr",
"network_33_multipole_snr", "network_44_multipole_snr"
]
}
| 29.827338 | 79 | 0.738061 |
7941b7c8976903eb727dc15e9dace5065fc0c15c | 2,350 | py | Python | tests/admin_views/customadmin.py | devops2014/djangosite | db77915c9fd35a203edd8206f702ee4082f04d4a | [
"BSD-3-Clause"
] | null | null | null | tests/admin_views/customadmin.py | devops2014/djangosite | db77915c9fd35a203edd8206f702ee4082f04d4a | [
"BSD-3-Clause"
] | null | null | null | tests/admin_views/customadmin.py | devops2014/djangosite | db77915c9fd35a203edd8206f702ee4082f04d4a | [
"BSD-3-Clause"
] | null | null | null | """
A second, custom AdminSite -- see tests.CustomAdminSiteTests.
"""
from __future__ import unicode_literals
from django.conf.urls import url
from django.contrib import admin
from django.contrib.auth.admin import UserAdmin
from django.contrib.auth.models import User
from django.http import HttpResponse
from . import admin as base_admin, forms, models
class Admin2(admin.AdminSite):
app_index_template = 'custom_admin/app_index.html'
login_form = forms.CustomAdminAuthenticationForm
login_template = 'custom_admin/login.html'
logout_template = 'custom_admin/logout.html'
index_template = ['custom_admin/index.html'] # a list, to test fix for #18697
password_change_template = 'custom_admin/password_change_form.html'
password_change_done_template = 'custom_admin/password_change_done.html'
# A custom index view.
def index(self, request, extra_context=None):
return super(Admin2, self).index(request, {'foo': '*bar*'})
def get_urls(self):
return [
url(r'^my_view/$', self.admin_view(self.my_view)),
] + super(Admin2, self).get_urls()
def my_view(self, request):
return HttpResponse("Django is a magical pony!")
def password_change(self, request, extra_context=None):
return super(Admin2, self).password_change(request, {'spam': 'eggs'})
class UserLimitedAdmin(UserAdmin):
# used for testing password change on a user not in queryset
def get_queryset(self, request):
qs = super(UserLimitedAdmin, self).get_queryset(request)
return qs.filter(is_superuser=False)
class CustomPwdTemplateUserAdmin(UserAdmin):
change_user_password_template = ['admin/auth/user/change_password.html'] # a list, to test fix for #18697
site = Admin2(name="admin2")
site.register(models.Article, base_admin.ArticleAdmin)
site.register(models.Section, inlines=[base_admin.ArticleInline])
site.register(models.Thing, base_admin.ThingAdmin)
site.register(models.Fabric, base_admin.FabricAdmin)
site.register(models.ChapterXtra1, base_admin.ChapterXtra1Admin)
site.register(User, UserLimitedAdmin)
site.register(models.UndeletableObject, base_admin.UndeletableObjectAdmin)
site.register(models.Simple, base_admin.AttributeErrorRaisingAdmin)
simple_site = Admin2(name='admin4')
simple_site.register(User, CustomPwdTemplateUserAdmin)
| 36.71875 | 110 | 0.760851 |
7941b83bf5073eef4526c8f62276f7acd2b96ef2 | 1,745 | py | Python | linear.py | dataubc/Modeling_IFT_for_heavy_oil_emulsion | e9a9e75fbf6434d755979c4b616801d60c8a617f | [
"MIT"
] | 1 | 2021-05-02T17:19:31.000Z | 2021-05-02T17:19:31.000Z | linear.py | dataubc/Modeling_IFT_for_heavy_oil_emulsion | e9a9e75fbf6434d755979c4b616801d60c8a617f | [
"MIT"
] | null | null | null | linear.py | dataubc/Modeling_IFT_for_heavy_oil_emulsion | e9a9e75fbf6434d755979c4b616801d60c8a617f | [
"MIT"
] | null | null | null | import pandas as pd
# import#
from sklearn.pipeline import Pipeline
from sklearn.linear_model import Ridge
from sklearn.model_selection import train_test_split, GridSearchCV
from sklearn.compose import ColumnTransformer
from sklearn.impute import SimpleImputer
from sklearn.preprocessing import StandardScaler, OneHotEncoder
# reg = Pipeline(steps = list of tuples)
def main():
df = read_data('data/ift_data.xlsx')
y = df[['IFT']]
x = df[['time_minutes', 'Gas', 'Water_content']]
X_train, X_test, y_train, y_test = train_test_split(x, y, test_size=0.2)
numeric_features = ['Water_content', 'time_minutes']
numeric_transformer = Pipeline(steps=[
('imputer', SimpleImputer(strategy='median')),
('scaler', StandardScaler())])
categorical_features = ['Gas']
categorical_transformer = Pipeline(steps=[
('imputer', SimpleImputer(strategy='constant')),
('onehot', OneHotEncoder(handle_unknown='ignore'))])
preprocessor = ColumnTransformer(transformers = [('num',numeric_transformer,numeric_features),
('cat',categorical_transformer,categorical_features)])
# Append classifier to preprocessing pipeline.
# Now we have a full prediction pipeline.
reg = Pipeline(steps=[('preprocessor', preprocessor),
('regressor', Ridge())])
params = {'regressor__alpha':[0.1,0.3,0.5,0.8,1]}
gs = GridSearchCV(reg,params,cv = 5)
gs.fit(X_train, y_train)
print('model score on training set = ',gs.score(X_train, y_train))
print('model score on test set =',gs.score(X_test,y_test) )
def read_data(path):
data = pd.read_excel(path)
return data
main()
| 30.086207 | 106 | 0.668195 |
7941b844333b6719b7ed592b0a195a10f93a336d | 319 | py | Python | algo/bitwise/bit_sign.py | tkornuta/python-sandbox | 00e03cd3f49ebb014611d67aad886aaff04c058f | [
"Apache-2.0"
] | null | null | null | algo/bitwise/bit_sign.py | tkornuta/python-sandbox | 00e03cd3f49ebb014611d67aad886aaff04c058f | [
"Apache-2.0"
] | null | null | null | algo/bitwise/bit_sign.py | tkornuta/python-sandbox | 00e03cd3f49ebb014611d67aad886aaff04c058f | [
"Apache-2.0"
] | null | null | null | # Get sign with bitwise operations only.
def bit_sign(d):
print("d: {0:b}".format(d))
# NOT WORKING FOR 2,4,8, 16 etc.!!!!
sign = ((d & (1 << (d.bit_length() - 1))) != 0)
return sign
def sign(x):
return 1*(x>0)
#return 1-(x<=0)
if __name__ == "__main__":
d = -15
print(bit_sign(d))
| 17.722222 | 51 | 0.532915 |
7941b867646f98bfe651ab42e8290f3233aa457e | 1,545 | py | Python | pyEX/stocks/fundamentalValuations.py | adamklaff/pyEX | 74a4cfa5978ccff95261aeb54f526dedc579aa6b | [
"Apache-2.0"
] | 335 | 2017-11-06T00:45:41.000Z | 2022-03-14T10:17:36.000Z | pyEX/stocks/fundamentalValuations.py | adamklaff/pyEX | 74a4cfa5978ccff95261aeb54f526dedc579aa6b | [
"Apache-2.0"
] | 193 | 2018-02-11T21:39:35.000Z | 2022-02-25T15:46:38.000Z | pyEX/stocks/fundamentalValuations.py | adamklaff/pyEX | 74a4cfa5978ccff95261aeb54f526dedc579aa6b | [
"Apache-2.0"
] | 92 | 2017-11-10T08:09:35.000Z | 2022-02-16T19:27:46.000Z | # *****************************************************************************
#
# Copyright (c) 2020, the pyEX authors.
#
# This file is part of the pyEX library, distributed under the terms of
# the Apache License 2.0. The full license can be found in the LICENSE file.
#
from functools import wraps
import pandas as pd
from ..common import _quoteSymbols, _raiseIfNotStr, _timeseriesWrapper, _expire, _UTC
from ..timeseries import timeSeries
@_expire(hour=8, tz=_UTC)
def fundamentalValuations(
symbol="",
frequency="",
token="",
version="stable",
filter="",
format="json",
**timeseries_kwargs
):
"""Fundamental Valuations
Args:
symbol (str): Symbol to look up
frequency (str): Optional.
token (str): Access token
version (str): API version
filter (str): filters: https://iexcloud.io/docs/api/#filter-results
format (str): return format, defaults to json
Supports all kwargs from `pyEX.timeseries.timeSeries`
Returns:
dict or DataFrame: result
"""
_raiseIfNotStr(symbol)
symbol = _quoteSymbols(symbol)
_timeseriesWrapper(timeseries_kwargs)
return timeSeries(
id="fundamental_valuations",
key=symbol,
subkey=frequency,
token=token,
version=version,
filter=filter,
format=format,
**timeseries_kwargs
)
@wraps(fundamentalValuations)
def fundamentalValuationsDF(*args, **kwargs):
return pd.DataFrame(fundamentalValuations(*args, **kwargs))
| 26.186441 | 85 | 0.632362 |
7941ba0707653b5c782dd4931d5840c9e40d34f1 | 461 | py | Python | saveDB.py | chenmengchieh2012/ESPCentralController | 1526da22033e476332514350ff10eb8459445ed0 | [
"MIT"
] | null | null | null | saveDB.py | chenmengchieh2012/ESPCentralController | 1526da22033e476332514350ff10eb8459445ed0 | [
"MIT"
] | null | null | null | saveDB.py | chenmengchieh2012/ESPCentralController | 1526da22033e476332514350ff10eb8459445ed0 | [
"MIT"
] | null | null | null | import os, shutil
from datetime import datetime
print("My working path is ", os.getcwd())
now = datetime.now()
timestamp = now.strftime("%Y-%m-%d_%H-%M")
originName = 'esp-rssi-measurements-sqlite.db'
newName = 'esp-rssi-measurements-sqlite_'+timestamp+'.db'
print("originName: ", originName)
print("newName: ", newName)
os.rename(os.getcwd()+'/'+originName, os.getcwd()+'/'+newName)
shutil.move(os.getcwd()+'/'+newName, os.getcwd()+'/DB_records/'+newName) | 28.8125 | 72 | 0.70282 |
7941ba169a839e8d936f22b731b9cca847a88545 | 1,182 | py | Python | quantizer.py | javigm98/Mejorando-el-Aprendizaje-Automatico | fefdbb5c3b3826230738b3e5fbd76f64e701d344 | [
"MIT"
] | null | null | null | quantizer.py | javigm98/Mejorando-el-Aprendizaje-Automatico | fefdbb5c3b3826230738b3e5fbd76f64e701d344 | [
"MIT"
] | null | null | null | quantizer.py | javigm98/Mejorando-el-Aprendizaje-Automatico | fefdbb5c3b3826230738b3e5fbd76f64e701d344 | [
"MIT"
] | null | null | null | import sys
dataset_dir = sys.argv[1]
h5_dir = sys.argv[2]
tflite_dir= sys.argv[3]
images = []
import numpy as np
with open(dataset_dir, 'rb') as f:
for _ in range(500):
images.append(np.load(f))
print("Loaded images shape: ", images[20].shape)
import tensorflow as tf
def representative_data_gen():
for data in tf.data.Dataset.from_tensor_slices((images)).batch(1).take(100):
yield[tf.dtypes.cast(data, tf.float32)]
model = tf.keras.models.load_model(h5_dir, custom_objects={'tf':tf})
converter = tf.lite.TFLiteConverter.from_keras_model(model)
converter.optimizations = [tf.lite.Optimize.DEFAULT]
converter.representative_dataset = representative_data_gen
converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS_INT8]
converter.inference_input_type = tf.uint8
converter.inference_output_type = tf.uint8
tflite_model_quant= converter.convert()
interpreter = tf.lite.Interpreter(model_content=tflite_model_quant)
input_type = interpreter.get_input_details()[0]['dtype']
print('input: ', input_type)
output_type = interpreter.get_output_details()[0]['dtype']
print('output: ', output_type)
open(tflite_dir, "wb").write(tflite_model_quant)
| 33.771429 | 80 | 0.769882 |
7941ba65b8a6f6b10e87d8c8c87e982f52d85178 | 68 | py | Python | ramda/equals.py | Rafi993/pyramda | 4fa7fe28d5eaa798b702d28bdd3948515cb88f48 | [
"MIT"
] | 1 | 2018-08-05T04:41:14.000Z | 2018-08-05T04:41:14.000Z | ramda/equals.py | Rafi993/pyramda | 4fa7fe28d5eaa798b702d28bdd3948515cb88f48 | [
"MIT"
] | 3 | 2018-06-12T18:42:05.000Z | 2018-07-23T11:50:25.000Z | ramda/equals.py | slavaGanzin/pyramda | 4fa7fe28d5eaa798b702d28bdd3948515cb88f48 | [
"MIT"
] | null | null | null | from ramda.curry import curry
equals = curry(lambda x, y: x == y)
| 13.6 | 35 | 0.676471 |
7941bad146678f1a9925d97a12840e01b67645dd | 479 | py | Python | tests/conftest.py | TomAB99/kima | 15e13159dd7bdaa4053e3157cbf5f562ee988461 | [
"MIT"
] | 14 | 2017-11-13T19:45:35.000Z | 2021-09-23T14:15:10.000Z | tests/conftest.py | TomAB99/kima | 15e13159dd7bdaa4053e3157cbf5f562ee988461 | [
"MIT"
] | 67 | 2017-11-03T13:57:54.000Z | 2021-12-03T13:54:51.000Z | tests/conftest.py | TomAB99/kima | 15e13159dd7bdaa4053e3157cbf5f562ee988461 | [
"MIT"
] | 15 | 2017-11-09T11:31:57.000Z | 2022-03-03T11:02:03.000Z | import pytest
def pytest_addoption(parser):
parser.addoption("--slow", action="store_true",
default=False, help="run slow tests")
def pytest_collection_modifyitems(config, items):
if config.getoption("--slow"):
# --runslow given in cli: do not skip slow tests
return
skip_slow = pytest.mark.skip(reason="need --slow option to run")
for item in items:
if "slow" in item.keywords:
item.add_marker(skip_slow) | 36.846154 | 68 | 0.647182 |
7941bc07421ffb7ff7e21f82e463bbb1efeb61bb | 1,079 | py | Python | pyvisdk/do/invalid_network_in_type.py | Infinidat/pyvisdk | f2f4e5f50da16f659ccc1d84b6a00f397fa997f8 | [
"MIT"
] | null | null | null | pyvisdk/do/invalid_network_in_type.py | Infinidat/pyvisdk | f2f4e5f50da16f659ccc1d84b6a00f397fa997f8 | [
"MIT"
] | null | null | null | pyvisdk/do/invalid_network_in_type.py | Infinidat/pyvisdk | f2f4e5f50da16f659ccc1d84b6a00f397fa997f8 | [
"MIT"
] | null | null | null |
import logging
from pyvisdk.exceptions import InvalidArgumentError
########################################
# Automatically generated, do not edit.
########################################
log = logging.getLogger(__name__)
def InvalidNetworkInType(vim, *args, **kwargs):
'''The network specified in the property does not exists.'''
obj = vim.client.factory.create('{urn:vim25}InvalidNetworkInType')
# do some validation checking...
if (len(args) + len(kwargs)) < 9:
raise IndexError('Expected at least 10 arguments got: %d' % len(args))
required = [ 'category', 'id', 'label', 'type', 'value', 'dynamicProperty', 'dynamicType',
'faultCause', 'faultMessage' ]
optional = [ ]
for name, arg in zip(required+optional, args):
setattr(obj, name, arg)
for name, value in kwargs.items():
if name in required + optional:
setattr(obj, name, value)
else:
raise InvalidArgumentError("Invalid argument: %s. Expected one of %s" % (name, ", ".join(required + optional)))
return obj
| 31.735294 | 124 | 0.599629 |
7941bcb2d2218e99528141883dfb7383be5c59f5 | 19,285 | py | Python | benchmarks/f3_wrong_hints/scaling_ltl_timed_transition_system/18-sender_receiver_1.py | EnricoMagnago/F3 | c863215c318d7d5f258eb9be38c6962cf6863b52 | [
"MIT"
] | 3 | 2021-04-23T23:29:26.000Z | 2022-03-23T10:00:30.000Z | benchmarks/f3_wrong_hints/scaling_ltl_timed_transition_system/18-sender_receiver_1.py | EnricoMagnago/F3 | c863215c318d7d5f258eb9be38c6962cf6863b52 | [
"MIT"
] | null | null | null | benchmarks/f3_wrong_hints/scaling_ltl_timed_transition_system/18-sender_receiver_1.py | EnricoMagnago/F3 | c863215c318d7d5f258eb9be38c6962cf6863b52 | [
"MIT"
] | 1 | 2021-11-17T22:02:56.000Z | 2021-11-17T22:02:56.000Z | from typing import FrozenSet
from collections import Iterable
from math import log, ceil
from mathsat import msat_term, msat_env
from mathsat import msat_make_constant, msat_declare_function
from mathsat import msat_get_integer_type, msat_get_rational_type, msat_get_bool_type
from mathsat import msat_make_and, msat_make_not, msat_make_or, msat_make_iff
from mathsat import msat_make_leq, msat_make_equal, msat_make_true
from mathsat import msat_make_number, msat_make_plus, msat_make_times
from pysmt.environment import Environment as PysmtEnv
import pysmt.typing as types
from ltl.ltl import TermMap, LTLEncoder
from utils import name_next, symb_to_next
from hint import Hint, Location
delta_name = "delta"
def decl_consts(menv: msat_env, name: str, c_type) -> tuple:
assert not name.startswith("_"), name
s = msat_declare_function(menv, name, c_type)
s = msat_make_constant(menv, s)
x_s = msat_declare_function(menv, name_next(name), c_type)
x_s = msat_make_constant(menv, x_s)
return s, x_s
def make_enum(menv, v_name: str, enum_size: int):
bool_type = msat_get_bool_type(menv)
num_bits = ceil(log(enum_size, 2))
b_vars = []
for idx in range(num_bits):
c_name = "{}{}".format(v_name, idx)
b_vars.append(tuple(decl_consts(menv, c_name, bool_type)))
vals = []
x_vals = []
for enum_val in range(enum_size):
bit_val = format(enum_val, '0{}b'.format(num_bits))
assert len(bit_val) == num_bits
assert all(c in {'0', '1'} for c in bit_val)
assign = [b_vars[idx] if c == '1' else
(msat_make_not(menv, b_vars[idx][0]),
msat_make_not(menv, b_vars[idx][1]))
for idx, c in enumerate(reversed(bit_val))]
pred = assign[0][0]
x_pred = assign[0][1]
for it in assign[1:]:
pred = msat_make_and(menv, pred, it[0])
x_pred = msat_make_and(menv, x_pred, it[1])
vals.append(pred)
x_vals.append(x_pred)
assert len(vals) == enum_size
assert len(x_vals) == enum_size
return b_vars, vals, x_vals
def msat_make_minus(menv: msat_env, arg0: msat_term, arg1: msat_term):
m_one = msat_make_number(menv, "-1")
arg1 = msat_make_times(menv, arg1, m_one)
return msat_make_plus(menv, arg0, arg1)
def msat_make_lt(menv: msat_env, arg0: msat_term, arg1: msat_term):
geq = msat_make_geq(menv, arg0, arg1)
return msat_make_not(menv, geq)
def msat_make_geq(menv: msat_env, arg0: msat_term, arg1: msat_term):
return msat_make_leq(menv, arg1, arg0)
def msat_make_gt(menv: msat_env, arg0: msat_term, arg1: msat_term):
leq = msat_make_leq(menv, arg0, arg1)
return msat_make_not(menv, leq)
def msat_make_impl(menv: msat_env, arg0: msat_term, arg1: msat_term):
n_arg0 = msat_make_not(menv, arg0)
return msat_make_or(menv, n_arg0, arg1)
def diverging_symbs(menv: msat_env) -> frozenset:
real_type = msat_get_rational_type(menv)
delta = msat_declare_function(menv, delta_name, real_type)
delta = msat_make_constant(menv, delta)
return frozenset([delta])
def check_ltl(menv: msat_env, enc: LTLEncoder) -> (Iterable, msat_term,
msat_term, msat_term):
assert menv
assert isinstance(menv, msat_env)
assert enc
assert isinstance(enc, LTLEncoder)
int_type = msat_get_integer_type(menv)
real_type = msat_get_rational_type(menv)
r2s, x_r2s = decl_consts(menv, "r2s", int_type)
s2r, x_s2r = decl_consts(menv, "s2r", int_type)
delta, x_delta = decl_consts(menv, delta_name, real_type)
sender = Sender("s", menv, enc, r2s, x_r2s, s2r, x_s2r, delta)
receiver = Receiver("r", menv, enc, s2r, x_s2r, r2s, x_r2s, delta)
curr2next = {r2s: x_r2s, s2r: x_s2r, delta: x_delta}
for comp in [sender, receiver]:
for s, x_s in comp.symb2next.items():
curr2next[s] = x_s
zero = msat_make_number(menv, "0")
init = msat_make_and(menv, receiver.init, sender.init)
trans = msat_make_and(menv, receiver.trans, sender.trans)
# invar delta >= 0
init = msat_make_and(menv, init,
msat_make_geq(menv, delta, zero))
trans = msat_make_and(menv, trans,
msat_make_geq(menv, x_delta, zero))
# delta > 0 -> (r2s' = r2s & s2r' = s2r)
lhs = msat_make_gt(menv, delta, zero)
rhs = msat_make_and(menv,
msat_make_equal(menv, x_r2s, r2s),
msat_make_equal(menv, x_s2r, s2r))
trans = msat_make_and(menv, trans,
msat_make_impl(menv, lhs, rhs))
# (G F !s.stutter) -> G (s.wait_ack -> F s.send)
lhs = enc.make_G(enc.make_F(msat_make_not(menv, sender.stutter)))
rhs = enc.make_G(msat_make_impl(menv, sender.wait_ack,
enc.make_F(sender.send)))
ltl = msat_make_impl(menv, lhs, rhs)
return TermMap(curr2next), init, trans, ltl
class Module:
def __init__(self, name: str, menv: msat_env, enc: LTLEncoder,
*args, **kwargs):
self.name = name
self.menv = menv
self.enc = enc
self.symb2next = {}
true = msat_make_true(menv)
self.init = true
self.trans = true
def _symb(self, v_name, v_type):
v_name = "{}_{}".format(self.name, v_name)
return decl_consts(self.menv, v_name, v_type)
def _enum(self, v_name: str, enum_size: int):
c_name = "{}_{}".format(self.name, v_name)
return make_enum(self.menv, c_name, enum_size)
class Sender(Module):
def __init__(self, name: str, menv: msat_env, enc: LTLEncoder,
in_c, x_in_c, out_c, x_out_c, delta):
super().__init__(name, menv, enc)
bool_type = msat_get_bool_type(menv)
int_type = msat_get_integer_type(menv)
real_type = msat_get_rational_type(menv)
loc, x_loc = self._symb("l", bool_type)
evt, x_evt = self._symb("evt", bool_type)
msg_id, x_msg_id = self._symb("msg_id", int_type)
timeout, x_timeout = self._symb("timeout", real_type)
c, x_c = self._symb("c", real_type)
self.move = evt
self.stutter = msat_make_not(menv, evt)
self.x_move = x_evt
self.x_stutter = msat_make_not(menv, x_evt)
self.send = loc
self.wait_ack = msat_make_not(menv, loc)
self.x_send = x_loc
self.x_wait_ack = msat_make_not(menv, x_loc)
self.symb2next = {loc: x_loc, evt: x_evt, msg_id: x_msg_id,
timeout: x_timeout, c: x_c}
zero = msat_make_number(menv, "0")
one = msat_make_number(menv, "1")
base_timeout = one
# send & c = 0 & msg_id = 0
self.init = msat_make_and(menv,
msat_make_and(menv, self.send,
msat_make_equal(menv, c,
zero)),
msat_make_equal(menv, msg_id, zero))
# invar: wait_ack -> c <= timeout
self.init = msat_make_and(
menv, self.init,
msat_make_impl(menv, self.wait_ack,
msat_make_leq(menv, c, timeout)))
self.trans = msat_make_impl(menv, self.x_wait_ack,
msat_make_leq(menv, x_c, x_timeout))
# delta > 0 | stutter -> l' = l & msg_id' = msg_id & timeout' = timeout &
# c' = c + delta & out_c' = out_c
lhs = msat_make_or(menv, msat_make_gt(menv, delta, zero), self.stutter)
rhs = msat_make_and(
menv,
msat_make_and(menv,
msat_make_iff(menv, x_loc, loc),
msat_make_equal(menv, x_msg_id, msg_id)),
msat_make_and(menv,
msat_make_equal(menv, x_timeout, timeout),
msat_make_equal(menv, x_c,
msat_make_plus(menv, c, delta))))
rhs = msat_make_and(menv, rhs,
msat_make_equal(menv, x_out_c, out_c))
self.trans = msat_make_and(menv, self.trans,
msat_make_impl(menv, lhs, rhs))
disc_t = msat_make_and(menv, self.move,
msat_make_equal(menv, delta, zero))
# (send & send') ->
# (msg_id' = msg_id & timeout' = base_timeout & c' = 0 & out_c' = out_c)
lhs = msat_make_and(menv, disc_t,
msat_make_and(menv, self.send, self.x_send))
rhs = msat_make_and(
menv,
msat_make_and(menv,
msat_make_equal(menv, x_msg_id, msg_id),
msat_make_equal(menv, x_timeout, base_timeout)),
msat_make_and(menv,
msat_make_equal(menv, x_c, zero),
msat_make_equal(menv, x_out_c, out_c)))
self.trans = msat_make_and(menv, self.trans,
msat_make_impl(menv, lhs, rhs))
# (send & wait_ack') ->
# (msg_id' = msg_id + 1 & timeout' = base_timeout & c' = 0 & out_c' = out_c)
lhs = msat_make_and(menv, disc_t,
msat_make_and(menv, self.send, self.x_wait_ack))
rhs = msat_make_and(
menv,
msat_make_and(menv,
msat_make_equal(menv, x_msg_id,
msat_make_plus(menv, msg_id, one)),
msat_make_equal(menv, x_timeout, base_timeout)),
msat_make_and(menv,
msat_make_equal(menv, x_c, zero),
msat_make_equal(menv, x_out_c, out_c)))
self.trans = msat_make_and(menv, self.trans,
msat_make_impl(menv, lhs, rhs))
# (wait_ack) -> (c' = 0 & out_c' = out_c &
# (wait_ack' <-> (in_c != msg_id & c > timeout))
lhs = msat_make_and(menv, disc_t, self.wait_ack)
rhs_iff = msat_make_and(menv,
msat_make_not(menv,
msat_make_equal(menv, in_c,
msg_id)),
msat_make_geq(menv, c, timeout))
rhs_iff = msat_make_iff(menv, self.x_wait_ack, rhs_iff)
rhs = msat_make_and(menv,
msat_make_and(menv,
msat_make_equal(menv, x_c, zero),
msat_make_equal(menv, x_out_c,
out_c)),
rhs_iff)
self.trans = msat_make_and(menv, self.trans,
msat_make_impl(menv, lhs, rhs))
# (wait_ack & wait_ack') -> (timeout' > timeout)
lhs = msat_make_and(menv, disc_t,
msat_make_and(menv, self.wait_ack,
self.x_wait_ack))
rhs = msat_make_gt(menv, x_timeout, timeout)
self.trans = msat_make_and(menv, self.trans,
msat_make_impl(menv, lhs, rhs))
# (wait_ack) -> (send' <-> (in_c = msg_id & c < timeout))
lhs = msat_make_and(menv, disc_t, self.wait_ack)
rhs = msat_make_iff(menv, self.x_send,
msat_make_and(menv,
msat_make_equal(menv, in_c, msg_id),
msat_make_lt(menv, c, timeout)))
self.trans = msat_make_and(menv, self.trans,
msat_make_impl(menv, lhs, rhs))
# (wait_ack & send') -> (timeout' = base_timeout)
lhs = msat_make_and(menv, disc_t,
msat_make_and(menv, self.wait_ack, self.x_send))
rhs = msat_make_equal(menv, x_timeout, base_timeout)
self.trans = msat_make_and(menv, self.trans,
msat_make_impl(menv, lhs, rhs))
class Receiver(Module):
def __init__(self, name: str, menv: msat_env, enc: LTLEncoder,
in_c, x_in_c, out_c, x_out_c, delta):
super().__init__(name, menv, enc)
bool_type = msat_get_bool_type(menv)
loc, x_loc = self._symb("l", bool_type)
self.wait = loc
self.work = msat_make_not(menv, loc)
self.x_wait = x_loc
self.x_work = msat_make_not(menv, x_loc)
self.symb2next = {loc: x_loc}
zero = msat_make_number(menv, "0")
# wait
self.init = self.wait
# delta > 0 -> loc' = loc & out_c' = out_c
lhs = msat_make_gt(menv, delta, zero)
rhs = msat_make_and(menv,
msat_make_iff(menv, x_loc, loc),
msat_make_equal(menv, x_out_c, out_c))
self.trans = msat_make_impl(menv, lhs, rhs)
disc_t = msat_make_equal(menv, delta, zero)
# wait -> (wait' <-> in_c = out_c)
lhs = msat_make_and(menv, disc_t, self.wait)
rhs = msat_make_iff(menv, self.x_wait,
msat_make_equal(menv, in_c, out_c))
self.trans = msat_make_and(menv, self.trans,
msat_make_impl(menv, lhs, rhs))
# (wait & wait') -> (out_c' = out_c)
lhs = msat_make_and(menv, disc_t,
msat_make_and(menv, self.wait, self.x_wait))
rhs = msat_make_equal(menv, x_out_c, out_c)
self.trans = msat_make_and(menv, self.trans,
msat_make_impl(menv, lhs, rhs))
# (wait & work') -> out_c' = in_c
lhs = msat_make_and(menv, disc_t,
msat_make_and(menv, self.wait, self.x_work))
rhs = msat_make_equal(menv, x_out_c, in_c)
self.trans = msat_make_and(menv, self.trans,
msat_make_impl(menv, lhs, rhs))
# work -> out_c' = out_c
lhs = msat_make_and(menv, disc_t, self.work)
rhs = msat_make_equal(menv, x_out_c, out_c)
self.trans = msat_make_and(menv, self.trans,
msat_make_impl(menv, lhs, rhs))
def hints(env: PysmtEnv) -> FrozenSet[Hint]:
assert isinstance(env, PysmtEnv)
mgr = env.formula_manager
delta = mgr.Symbol(delta_name, types.REAL)
r2s = mgr.Symbol("r2s", types.INT)
s2r = mgr.Symbol("r2s", types.INT)
s_l = mgr.Symbol("s_l", types.BOOL)
s_evt = mgr.Symbol("s_evt", types.BOOL)
s_msg_id = mgr.Symbol("s_msg_id", types.INT)
s_timeout = mgr.Symbol("s_timeout", types.REAL)
s_c = mgr.Symbol("s_c", types.REAL)
r_l = mgr.Symbol("r_l", types.BOOL)
symbs = frozenset([delta, r2s, s2r, s_l, s_evt, s_msg_id, s_timeout, s_c,
r_l])
x_delta = symb_to_next(mgr, delta)
x_r2s = symb_to_next(mgr, r2s)
x_s2r = symb_to_next(mgr, s2r)
x_s_l = symb_to_next(mgr, s_l)
x_s_evt = symb_to_next(mgr, s_evt)
x_s_msg_id = symb_to_next(mgr, s_msg_id)
x_s_timeout = symb_to_next(mgr, s_timeout)
x_s_c = symb_to_next(mgr, s_c)
x_r_l = symb_to_next(mgr, r_l)
res = []
r0 = mgr.Real(0)
r1 = mgr.Real(1)
i0 = mgr.Int(0)
i1 = mgr.Int(1)
loc0 = Location(env, mgr.Equals(delta, r0))
loc0.set_progress(0, mgr.Equals(x_delta, r0))
hint = Hint("h_delta0", env, frozenset([delta]), symbs)
hint.set_locs([loc0])
res.append(hint)
loc0 = Location(env, mgr.Equals(s2r, i0))
loc0.set_progress(0, mgr.Equals(x_s2r, i0))
hint = Hint("h_s2r0", env, frozenset([s2r]), symbs)
hint.set_locs([loc0])
res.append(hint)
loc0 = Location(env, s_l)
loc0.set_progress(0, x_s_l)
hint = Hint("h_s_l0", env, frozenset([s_l]), symbs)
hint.set_locs([loc0])
res.append(hint)
loc0 = Location(env, mgr.Equals(s_msg_id, i0))
loc0.set_progress(0, mgr.Equals(x_s_msg_id, i0))
hint = Hint("h_s_msg_id0", env, frozenset([s_msg_id]), symbs)
hint.set_locs([loc0])
res.append(hint)
loc0 = Location(env, mgr.Equals(s_timeout, r0))
loc0.set_progress(0, mgr.Equals(x_s_timeout, r0))
hint = Hint("h_s_timeout0", env, frozenset([s_timeout]), symbs)
hint.set_locs([loc0])
res.append(hint)
loc0 = Location(env, mgr.Equals(s_c, r0))
loc0.set_progress(0, mgr.Equals(x_s_c, r0))
hint = Hint("h_s_c0", env, frozenset([s_c]), symbs)
hint.set_locs([loc0])
res.append(hint)
loc0 = Location(env, r_l)
loc0.set_progress(0, x_r_l)
hint = Hint("h_r_l0", env, frozenset([r_l]), symbs)
hint.set_locs([loc0])
res.append(hint)
loc0 = Location(env, mgr.GE(delta, r0))
loc0.set_progress(0, mgr.Equals(x_delta, r1))
hint = Hint("h_delta1", env, frozenset([delta]), symbs)
hint.set_locs([loc0])
res.append(hint)
loc0 = Location(env, mgr.GE(s2r, i0))
loc0.set_progress(0, mgr.Equals(x_s2r, i1))
hint = Hint("h_s2r1", env, frozenset([s2r]), symbs)
hint.set_locs([loc0])
res.append(hint)
loc0 = Location(env, mgr.GE(r2s, i0))
loc0.set_progress(0, mgr.Equals(x_r2s, i1))
hint = Hint("h_r2s1", env, frozenset([r2s]), symbs)
hint.set_locs([loc0])
res.append(hint)
loc0 = Location(env, s_l)
loc0.set_progress(1, mgr.Not(x_s_l))
loc1 = Location(env, mgr.Not(s_l))
loc1.set_progress(0, x_s_l)
hint = Hint("h_s_l1", env, frozenset([s_l]), symbs)
hint.set_locs([loc0, loc1])
res.append(hint)
loc0 = Location(env, s_evt)
loc0.set_progress(1, mgr.Not(x_s_evt))
loc1 = Location(env, mgr.Not(s_evt))
loc1.set_progress(0, x_s_evt)
hint = Hint("h_s_evt1", env, frozenset([s_evt]), symbs)
hint.set_locs([loc0, loc1])
res.append(hint)
loc0 = Location(env, mgr.GE(s_msg_id, i0))
loc0.set_progress(0, mgr.Equals(x_s_msg_id, mgr.Plus(s_msg_id, i1)))
hint = Hint("h_s_msg_id1", env, frozenset([s_msg_id]), symbs)
hint.set_locs([loc0])
res.append(hint)
loc0 = Location(env, mgr.GE(s_timeout, r0))
loc0.set_progress(0, mgr.Equals(x_s_timeout, mgr.Plus(s_timeout, r1)))
hint = Hint("h_s_timeout1", env, frozenset([s_timeout]), symbs)
hint.set_locs([loc0])
res.append(hint)
loc0 = Location(env, mgr.GE(s_c, r0))
loc0.set_progress(0, mgr.Equals(x_s_c, mgr.Plus(s_c, r1)))
hint = Hint("h_s_c1", env, frozenset([s_c]), symbs)
hint.set_locs([loc0])
res.append(hint)
loc0 = Location(env, r_l)
loc0.set_progress(1, mgr.Not(x_r_l))
loc1 = Location(env, mgr.Not(r_l))
loc1.set_progress(0, x_r_l)
hint = Hint("h_r_l1", env, frozenset([r_l]), symbs)
hint.set_locs([loc0, loc1])
res.append(hint)
loc0 = Location(env, mgr.GE(delta, r0))
loc0.set_progress(0, mgr.Equals(x_delta, mgr.Plus(delta, r1)))
hint = Hint("h_delta2", env, frozenset([delta]), symbs)
hint.set_locs([loc0])
res.append(hint)
loc0 = Location(env, mgr.GE(s2r, i0))
loc0.set_progress(0, mgr.Equals(x_s2r, mgr.Plus(s2r, i1)))
hint = Hint("h_s2r2", env, frozenset([s2r]), symbs)
hint.set_locs([loc0])
res.append(hint)
return frozenset(res)
| 38.037475 | 89 | 0.579051 |
7941be46bcd117d74bb0ee1fa0c4e7674edd0d6d | 61,925 | py | Python | beets/ui/commands.py | Ruin0x11/beets | 0077c0514b2bc515336cc00e58f5a239d4d032f4 | [
"MIT"
] | null | null | null | beets/ui/commands.py | Ruin0x11/beets | 0077c0514b2bc515336cc00e58f5a239d4d032f4 | [
"MIT"
] | null | null | null | beets/ui/commands.py | Ruin0x11/beets | 0077c0514b2bc515336cc00e58f5a239d4d032f4 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# This file is part of beets.
# Copyright 2016, Adrian Sampson.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""This module provides the default commands for beets' command-line
interface.
"""
from __future__ import division, absolute_import, print_function
import os
import re
from platform import python_version
from collections import namedtuple, Counter
from itertools import chain
import beets
from beets import ui
from beets.ui import print_, input_, decargs, show_path_changes
from beets import autotag
from beets.autotag import Recommendation
from beets.autotag import hooks
from beets import plugins
from beets import importer
from beets import util
from beets.util import syspath, normpath, ancestry, displayable_path, \
MoveOperation
from beets import library
from beets import config
from beets import logging
import six
from . import _store_dict
VARIOUS_ARTISTS = u'Various Artists'
PromptChoice = namedtuple('PromptChoice', ['short', 'long', 'callback'])
# Global logger.
log = logging.getLogger('beets')
# The list of default subcommands. This is populated with Subcommand
# objects that can be fed to a SubcommandsOptionParser.
default_commands = []
# Utilities.
def _do_query(lib, query, album, also_items=True):
"""For commands that operate on matched items, performs a query
and returns a list of matching items and a list of matching
albums. (The latter is only nonempty when album is True.) Raises
a UserError if no items match. also_items controls whether, when
fetching albums, the associated items should be fetched also.
"""
if album:
albums = list(lib.albums(query))
items = []
if also_items:
for al in albums:
items += al.items()
else:
albums = []
items = list(lib.items(query))
if album and not albums:
raise ui.UserError(u'No matching albums found.')
elif not album and not items:
raise ui.UserError(u'No matching items found.')
return items, albums
# fields: Shows a list of available fields for queries and format strings.
def _print_keys(query):
"""Given a SQLite query result, print the `key` field of each
returned row, with indentation of 2 spaces.
"""
for row in query:
print_(u' ' * 2 + row['key'])
def fields_func(lib, opts, args):
def _print_rows(names):
names.sort()
print_(u' ' + u'\n '.join(names))
print_(u"Item fields:")
_print_rows(library.Item.all_keys())
print_(u"Album fields:")
_print_rows(library.Album.all_keys())
with lib.transaction() as tx:
# The SQL uses the DISTINCT to get unique values from the query
unique_fields = 'SELECT DISTINCT key FROM (%s)'
print_(u"Item flexible attributes:")
_print_keys(tx.query(unique_fields % library.Item._flex_table))
print_(u"Album flexible attributes:")
_print_keys(tx.query(unique_fields % library.Album._flex_table))
fields_cmd = ui.Subcommand(
'fields',
help=u'show fields available for queries and format strings'
)
fields_cmd.func = fields_func
default_commands.append(fields_cmd)
# help: Print help text for commands
class HelpCommand(ui.Subcommand):
def __init__(self):
super(HelpCommand, self).__init__(
'help', aliases=('?',),
help=u'give detailed help on a specific sub-command',
)
def func(self, lib, opts, args):
if args:
cmdname = args[0]
helpcommand = self.root_parser._subcommand_for_name(cmdname)
if not helpcommand:
raise ui.UserError(u"unknown command '{0}'".format(cmdname))
helpcommand.print_help()
else:
self.root_parser.print_help()
default_commands.append(HelpCommand())
# import: Autotagger and importer.
# Importer utilities and support.
def disambig_string(info):
"""Generate a string for an AlbumInfo or TrackInfo object that
provides context that helps disambiguate similar-looking albums and
tracks.
"""
disambig = []
if info.data_source and info.data_source != 'MusicBrainz':
disambig.append(info.data_source)
if isinstance(info, hooks.AlbumInfo):
if info.media:
if info.mediums and info.mediums > 1:
disambig.append(u'{0}x{1}'.format(
info.mediums, info.media
))
else:
disambig.append(info.media)
if info.year:
disambig.append(six.text_type(info.year))
if info.country:
disambig.append(info.country)
if info.label:
disambig.append(info.label)
if info.catalognum:
disambig.append(info.catalognum)
if info.albumdisambig:
disambig.append(info.albumdisambig)
if disambig:
return u', '.join(disambig)
def dist_string(dist):
"""Formats a distance (a float) as a colorized similarity percentage
string.
"""
out = u'%.1f%%' % ((1 - dist) * 100)
if dist <= config['match']['strong_rec_thresh'].as_number():
out = ui.colorize('text_success', out)
elif dist <= config['match']['medium_rec_thresh'].as_number():
out = ui.colorize('text_warning', out)
else:
out = ui.colorize('text_error', out)
return out
def penalty_string(distance, limit=None):
"""Returns a colorized string that indicates all the penalties
applied to a distance object.
"""
penalties = []
for key in distance.keys():
key = key.replace('album_', '')
key = key.replace('track_', '')
key = key.replace('_', ' ')
penalties.append(key)
if penalties:
if limit and len(penalties) > limit:
penalties = penalties[:limit] + ['...']
return ui.colorize('text_warning', u'(%s)' % ', '.join(penalties))
def show_change(cur_artist, cur_album, match):
"""Print out a representation of the changes that will be made if an
album's tags are changed according to `match`, which must be an AlbumMatch
object.
"""
def show_album(artist, album):
if artist:
album_description = u' %s - %s' % (artist, album)
elif album:
album_description = u' %s' % album
else:
album_description = u' (unknown album)'
print_(album_description)
def format_index(track_info):
"""Return a string representing the track index of the given
TrackInfo or Item object.
"""
if isinstance(track_info, hooks.TrackInfo):
index = track_info.index
medium_index = track_info.medium_index
medium = track_info.medium
mediums = match.info.mediums
else:
index = medium_index = track_info.track
medium = track_info.disc
mediums = track_info.disctotal
if config['per_disc_numbering']:
if mediums and mediums > 1:
return u'{0}-{1}'.format(medium, medium_index)
else:
return six.text_type(medium_index if medium_index is not None
else index)
else:
return six.text_type(index)
# Identify the album in question.
if cur_artist != match.info.artist or \
(cur_album != match.info.album and
match.info.album != VARIOUS_ARTISTS):
artist_l, artist_r = cur_artist or '', match.info.artist
album_l, album_r = cur_album or '', match.info.album
if artist_r == VARIOUS_ARTISTS:
# Hide artists for VA releases.
artist_l, artist_r = u'', u''
if config['artist_credit']:
artist_r = match.info.artist_credit
artist_l, artist_r = ui.colordiff(artist_l, artist_r)
album_l, album_r = ui.colordiff(album_l, album_r)
print_(u"Correcting tags from:")
show_album(artist_l, album_l)
print_(u"To:")
show_album(artist_r, album_r)
else:
print_(u"Tagging:\n {0.artist} - {0.album}".format(match.info))
# Data URL.
if match.info.data_url:
print_(u'URL:\n %s' % match.info.data_url)
# Info line.
info = []
# Similarity.
info.append(u'(Similarity: %s)' % dist_string(match.distance))
# Penalties.
penalties = penalty_string(match.distance)
if penalties:
info.append(penalties)
# Disambiguation.
disambig = disambig_string(match.info)
if disambig:
info.append(ui.colorize('text_highlight_minor', u'(%s)' % disambig))
print_(' '.join(info))
# Tracks.
pairs = list(match.mapping.items())
pairs.sort(key=lambda item_and_track_info: item_and_track_info[1].index)
# Build up LHS and RHS for track difference display. The `lines` list
# contains ``(lhs, rhs, width)`` tuples where `width` is the length (in
# characters) of the uncolorized LHS.
lines = []
medium = disctitle = None
for item, track_info in pairs:
# Medium number and title.
if medium != track_info.medium or disctitle != track_info.disctitle:
media = match.info.media or 'Media'
if match.info.mediums > 1 and track_info.disctitle:
lhs = u'%s %s: %s' % (media, track_info.medium,
track_info.disctitle)
elif match.info.mediums > 1:
lhs = u'%s %s' % (media, track_info.medium)
elif track_info.disctitle:
lhs = u'%s: %s' % (media, track_info.disctitle)
else:
lhs = None
if lhs:
lines.append((lhs, u'', 0))
medium, disctitle = track_info.medium, track_info.disctitle
# Titles.
new_title = track_info.title
if not item.title.strip():
# If there's no title, we use the filename.
cur_title = displayable_path(os.path.basename(item.path))
lhs, rhs = cur_title, new_title
else:
cur_title = item.title.strip()
lhs, rhs = ui.colordiff(cur_title, new_title)
lhs_width = len(cur_title)
# Track number change.
cur_track, new_track = format_index(item), format_index(track_info)
if cur_track != new_track:
if item.track in (track_info.index, track_info.medium_index):
color = 'text_highlight_minor'
else:
color = 'text_highlight'
templ = ui.colorize(color, u' (#{0})')
lhs += templ.format(cur_track)
rhs += templ.format(new_track)
lhs_width += len(cur_track) + 4
# Length change.
if item.length and track_info.length and \
abs(item.length - track_info.length) > \
config['ui']['length_diff_thresh'].as_number():
cur_length = ui.human_seconds_short(item.length)
new_length = ui.human_seconds_short(track_info.length)
templ = ui.colorize('text_highlight', u' ({0})')
lhs += templ.format(cur_length)
rhs += templ.format(new_length)
lhs_width += len(cur_length) + 3
# Penalties.
penalties = penalty_string(match.distance.tracks[track_info])
if penalties:
rhs += ' %s' % penalties
if lhs != rhs:
lines.append((u' * %s' % lhs, rhs, lhs_width))
elif config['import']['detail']:
lines.append((u' * %s' % lhs, '', lhs_width))
# Print each track in two columns, or across two lines.
col_width = (ui.term_width() - len(''.join([' * ', ' -> ']))) // 2
if lines:
max_width = max(w for _, _, w in lines)
for lhs, rhs, lhs_width in lines:
if not rhs:
print_(lhs)
elif max_width > col_width:
print_(u'%s ->\n %s' % (lhs, rhs))
else:
pad = max_width - lhs_width
print_(u'%s%s -> %s' % (lhs, ' ' * pad, rhs))
# Missing and unmatched tracks.
if match.extra_tracks:
print_(u'Missing tracks ({0}/{1} - {2:.1%}):'.format(
len(match.extra_tracks),
len(match.info.tracks),
len(match.extra_tracks) / len(match.info.tracks)
))
pad_width = max(len(track_info.title) for track_info in
match.extra_tracks)
for track_info in match.extra_tracks:
line = u' ! {0: <{width}} (#{1: >2})'.format(track_info.title,
format_index(track_info),
width=pad_width)
if track_info.length:
line += u' (%s)' % ui.human_seconds_short(track_info.length)
print_(ui.colorize('text_warning', line))
if match.extra_items:
print_(u'Unmatched tracks ({0}):'.format(len(match.extra_items)))
pad_width = max(len(item.title) for item in match.extra_items)
for item in match.extra_items:
line = u' ! {0: <{width}} (#{1: >2})'.format(item.title,
format_index(item),
width=pad_width)
if item.length:
line += u' (%s)' % ui.human_seconds_short(item.length)
print_(ui.colorize('text_warning', line))
def show_item_change(item, match):
"""Print out the change that would occur by tagging `item` with the
metadata from `match`, a TrackMatch object.
"""
cur_artist, new_artist = item.artist, match.info.artist
cur_title, new_title = item.title, match.info.title
if cur_artist != new_artist or cur_title != new_title:
cur_artist, new_artist = ui.colordiff(cur_artist, new_artist)
cur_title, new_title = ui.colordiff(cur_title, new_title)
print_(u"Correcting track tags from:")
print_(u" %s - %s" % (cur_artist, cur_title))
print_(u"To:")
print_(u" %s - %s" % (new_artist, new_title))
else:
print_(u"Tagging track: %s - %s" % (cur_artist, cur_title))
# Data URL.
if match.info.data_url:
print_(u'URL:\n %s' % match.info.data_url)
# Info line.
info = []
# Similarity.
info.append(u'(Similarity: %s)' % dist_string(match.distance))
# Penalties.
penalties = penalty_string(match.distance)
if penalties:
info.append(penalties)
# Disambiguation.
disambig = disambig_string(match.info)
if disambig:
info.append(ui.colorize('text_highlight_minor', u'(%s)' % disambig))
print_(' '.join(info))
def summarize_items(items, singleton):
"""Produces a brief summary line describing a set of items. Used for
manually resolving duplicates during import.
`items` is a list of `Item` objects. `singleton` indicates whether
this is an album or single-item import (if the latter, them `items`
should only have one element).
"""
summary_parts = []
if not singleton:
summary_parts.append(u"{0} items".format(len(items)))
format_counts = {}
for item in items:
format_counts[item.format] = format_counts.get(item.format, 0) + 1
if len(format_counts) == 1:
# A single format.
summary_parts.append(items[0].format)
else:
# Enumerate all the formats by decreasing frequencies:
for fmt, count in sorted(
format_counts.items(),
key=lambda fmt_and_count: (-fmt_and_count[1], fmt_and_count[0])
):
summary_parts.append('{0} {1}'.format(fmt, count))
if items:
average_bitrate = sum([item.bitrate for item in items]) / len(items)
total_duration = sum([item.length for item in items])
total_filesize = sum([item.filesize for item in items])
summary_parts.append(u'{0}kbps'.format(int(average_bitrate / 1000)))
summary_parts.append(ui.human_seconds_short(total_duration))
summary_parts.append(ui.human_bytes(total_filesize))
return u', '.join(summary_parts)
def _summary_judgment(rec):
"""Determines whether a decision should be made without even asking
the user. This occurs in quiet mode and when an action is chosen for
NONE recommendations. Return None if the user should be queried.
Otherwise, returns an action. May also print to the console if a
summary judgment is made.
"""
if config['import']['quiet']:
if rec == Recommendation.strong:
return importer.action.APPLY
else:
action = config['import']['quiet_fallback'].as_choice({
'skip': importer.action.SKIP,
'asis': importer.action.ASIS,
})
elif config['import']['timid']:
return None
elif rec == Recommendation.none:
action = config['import']['none_rec_action'].as_choice({
'skip': importer.action.SKIP,
'asis': importer.action.ASIS,
'ask': None,
})
else:
return None
if action == importer.action.SKIP:
print_(u'Skipping.')
elif action == importer.action.ASIS:
print_(u'Importing as-is.')
return action
def choose_candidate(candidates, singleton, rec, cur_artist=None,
cur_album=None, item=None, itemcount=None,
choices=[]):
"""Given a sorted list of candidates, ask the user for a selection
of which candidate to use. Applies to both full albums and
singletons (tracks). Candidates are either AlbumMatch or TrackMatch
objects depending on `singleton`. for albums, `cur_artist`,
`cur_album`, and `itemcount` must be provided. For singletons,
`item` must be provided.
`choices` is a list of `PromptChoice`s to be used in each prompt.
Returns one of the following:
* the result of the choice, which may be SKIP or ASIS
* a candidate (an AlbumMatch/TrackMatch object)
* a chosen `PromptChoice` from `choices`
"""
# Sanity check.
if singleton:
assert item is not None
else:
assert cur_artist is not None
assert cur_album is not None
# Build helper variables for the prompt choices.
choice_opts = tuple(c.long for c in choices)
choice_actions = {c.short: c for c in choices}
# Zero candidates.
if not candidates:
if singleton:
print_(u"No matching recordings found.")
else:
print_(u"No matching release found for {0} tracks."
.format(itemcount))
print_(u'For help, see: '
u'https://beets.readthedocs.org/en/latest/faq.html#nomatch')
sel = ui.input_options(choice_opts)
if sel in choice_actions:
return choice_actions[sel]
else:
assert False
# Is the change good enough?
bypass_candidates = False
if rec != Recommendation.none:
match = candidates[0]
bypass_candidates = True
while True:
# Display and choose from candidates.
require = rec <= Recommendation.low
if not bypass_candidates:
# Display list of candidates.
print_(u'Finding tags for {0} "{1} - {2}".'.format(
u'track' if singleton else u'album',
item.artist if singleton else cur_artist,
item.title if singleton else cur_album,
))
print_(u'Candidates:')
for i, match in enumerate(candidates):
# Index, metadata, and distance.
line = [
u'{0}.'.format(i + 1),
u'{0} - {1}'.format(
match.info.artist,
match.info.title if singleton else match.info.album,
),
u'({0})'.format(dist_string(match.distance)),
]
# Penalties.
penalties = penalty_string(match.distance, 3)
if penalties:
line.append(penalties)
# Disambiguation
disambig = disambig_string(match.info)
if disambig:
line.append(ui.colorize('text_highlight_minor',
u'(%s)' % disambig))
print_(u' '.join(line))
# Ask the user for a choice.
sel = ui.input_options(choice_opts,
numrange=(1, len(candidates)))
if sel == u'm':
pass
elif sel in choice_actions:
return choice_actions[sel]
else: # Numerical selection.
match = candidates[sel - 1]
if sel != 1:
# When choosing anything but the first match,
# disable the default action.
require = True
bypass_candidates = False
# Show what we're about to do.
if singleton:
show_item_change(item, match)
else:
show_change(cur_artist, cur_album, match)
# Exact match => tag automatically if we're not in timid mode.
if rec == Recommendation.strong and not config['import']['timid']:
return match
# Ask for confirmation.
default = config['import']['default_action'].as_choice({
u'apply': u'a',
u'skip': u's',
u'asis': u'u',
u'none': None,
})
if default is None:
require = True
# Bell ring when user interaction is needed.
if config['import']['bell']:
ui.print_(u'\a', end=u'')
sel = ui.input_options((u'Apply', u'More candidates') + choice_opts,
require=require, default=default)
if sel == u'a':
return match
elif sel in choice_actions:
return choice_actions[sel]
def manual_search(session, task):
"""Get a new `Proposal` using manual search criteria.
Input either an artist and album (for full albums) or artist and
track name (for singletons) for manual search.
"""
artist = input_(u'Artist:').strip()
name = input_(u'Album:' if task.is_album else u'Track:').strip()
if task.is_album:
_, _, prop = autotag.tag_album(
task.items, artist, name
)
return prop
else:
return autotag.tag_item(task.item, artist, name)
def manual_id(session, task):
"""Get a new `Proposal` using a manually-entered ID.
Input an ID, either for an album ("release") or a track ("recording").
"""
prompt = u'Enter {0} ID:'.format(u'release' if task.is_album
else u'recording')
search_id = input_(prompt).strip()
if task.is_album:
_, _, prop = autotag.tag_album(
task.items, search_ids=search_id.split()
)
return prop
else:
return autotag.tag_item(task.item, search_ids=search_id.split())
def abort_action(session, task):
"""A prompt choice callback that aborts the importer.
"""
raise importer.ImportAbort()
class TerminalImportSession(importer.ImportSession):
"""An import session that runs in a terminal.
"""
def choose_match(self, task):
"""Given an initial autotagging of items, go through an interactive
dance with the user to ask for a choice of metadata. Returns an
AlbumMatch object, ASIS, or SKIP.
"""
# Show what we're tagging.
print_()
print_(displayable_path(task.paths, u'\n') +
u' ({0} items)'.format(len(task.items)))
# Take immediate action if appropriate.
action = _summary_judgment(task.rec)
if action == importer.action.APPLY:
match = task.candidates[0]
show_change(task.cur_artist, task.cur_album, match)
return match
elif action is not None:
return action
# Loop until we have a choice.
while True:
# Ask for a choice from the user. The result of
# `choose_candidate` may be an `importer.action`, an
# `AlbumMatch` object for a specific selection, or a
# `PromptChoice`.
choices = self._get_choices(task)
choice = choose_candidate(
task.candidates, False, task.rec, task.cur_artist,
task.cur_album, itemcount=len(task.items), choices=choices
)
# Basic choices that require no more action here.
if choice in (importer.action.SKIP, importer.action.ASIS):
# Pass selection to main control flow.
return choice
# Plugin-provided choices. We invoke the associated callback
# function.
elif choice in choices:
post_choice = choice.callback(self, task)
if isinstance(post_choice, importer.action):
return post_choice
elif isinstance(post_choice, autotag.Proposal):
# Use the new candidates and continue around the loop.
task.candidates = post_choice.candidates
task.rec = post_choice.recommendation
# Otherwise, we have a specific match selection.
else:
# We have a candidate! Finish tagging. Here, choice is an
# AlbumMatch object.
assert isinstance(choice, autotag.AlbumMatch)
return choice
def choose_item(self, task):
"""Ask the user for a choice about tagging a single item. Returns
either an action constant or a TrackMatch object.
"""
print_()
print_(displayable_path(task.item.path))
candidates, rec = task.candidates, task.rec
# Take immediate action if appropriate.
action = _summary_judgment(task.rec)
if action == importer.action.APPLY:
match = candidates[0]
show_item_change(task.item, match)
return match
elif action is not None:
return action
while True:
# Ask for a choice.
choices = self._get_choices(task)
choice = choose_candidate(candidates, True, rec, item=task.item,
choices=choices)
if choice in (importer.action.SKIP, importer.action.ASIS):
return choice
elif choice in choices:
post_choice = choice.callback(self, task)
if isinstance(post_choice, importer.action):
return post_choice
elif isinstance(post_choice, autotag.Proposal):
candidates = post_choice.candidates
rec = post_choice.recommendation
else:
# Chose a candidate.
assert isinstance(choice, autotag.TrackMatch)
return choice
def resolve_duplicate(self, task, found_duplicates):
"""Decide what to do when a new album or item seems similar to one
that's already in the library.
"""
log.warning(u"This {0} is already in the library!",
(u"album" if task.is_album else u"item"))
if config['import']['quiet']:
# In quiet mode, don't prompt -- just skip.
log.info(u'Skipping.')
sel = u's'
else:
# Print some detail about the existing and new items so the
# user can make an informed decision.
for duplicate in found_duplicates:
print_(u"Old: " + summarize_items(
list(duplicate.items()) if task.is_album else [duplicate],
not task.is_album,
))
print_(u"New: " + summarize_items(
task.imported_items(),
not task.is_album,
))
sel = ui.input_options(
(u'Skip new', u'Keep both', u'Remove old', u'Merge all')
)
if sel == u's':
# Skip new.
task.set_choice(importer.action.SKIP)
elif sel == u'k':
# Keep both. Do nothing; leave the choice intact.
pass
elif sel == u'r':
# Remove old.
task.should_remove_duplicates = True
elif sel == u'm':
task.should_merge_duplicates = True
else:
assert False
def should_resume(self, path):
return ui.input_yn(u"Import of the directory:\n{0}\n"
u"was interrupted. Resume (Y/n)?"
.format(displayable_path(path)))
def _get_choices(self, task):
"""Get the list of prompt choices that should be presented to the
user. This consists of both built-in choices and ones provided by
plugins.
The `before_choose_candidate` event is sent to the plugins, with
session and task as its parameters. Plugins are responsible for
checking the right conditions and returning a list of `PromptChoice`s,
which is flattened and checked for conflicts.
If two or more choices have the same short letter, a warning is
emitted and all but one choices are discarded, giving preference
to the default importer choices.
Returns a list of `PromptChoice`s.
"""
# Standard, built-in choices.
choices = [
PromptChoice(u's', u'Skip',
lambda s, t: importer.action.SKIP),
PromptChoice(u'u', u'Use as-is',
lambda s, t: importer.action.ASIS)
]
if task.is_album:
choices += [
PromptChoice(u't', u'as Tracks',
lambda s, t: importer.action.TRACKS),
PromptChoice(u'g', u'Group albums',
lambda s, t: importer.action.ALBUMS),
]
choices += [
PromptChoice(u'e', u'Enter search', manual_search),
PromptChoice(u'i', u'enter Id', manual_id),
PromptChoice(u'b', u'aBort', abort_action),
]
# Send the before_choose_candidate event and flatten list.
extra_choices = list(chain(*plugins.send('before_choose_candidate',
session=self, task=task)))
# Add a "dummy" choice for the other baked-in option, for
# duplicate checking.
all_choices = [
PromptChoice(u'a', u'Apply', None),
] + choices + extra_choices
# Check for conflicts.
short_letters = [c.short for c in all_choices]
if len(short_letters) != len(set(short_letters)):
# Duplicate short letter has been found.
duplicates = [i for i, count in Counter(short_letters).items()
if count > 1]
for short in duplicates:
# Keep the first of the choices, removing the rest.
dup_choices = [c for c in all_choices if c.short == short]
for c in dup_choices[1:]:
log.warning(u"Prompt choice '{0}' removed due to conflict "
u"with '{1}' (short letter: '{2}')",
c.long, dup_choices[0].long, c.short)
extra_choices.remove(c)
return choices + extra_choices
# The import command.
def import_files(lib, paths, query):
"""Import the files in the given list of paths or matching the
query.
"""
# Check the user-specified directories.
for path in paths:
if not os.path.exists(syspath(normpath(path))):
raise ui.UserError(u'no such file or directory: {0}'.format(
displayable_path(path)))
# Check parameter consistency.
if config['import']['quiet'] and config['import']['timid']:
raise ui.UserError(u"can't be both quiet and timid")
# Open the log.
if config['import']['log'].get() is not None:
logpath = syspath(config['import']['log'].as_filename())
try:
loghandler = logging.FileHandler(logpath)
except IOError:
raise ui.UserError(u"could not open log file for writing: "
u"{0}".format(displayable_path(logpath)))
else:
loghandler = None
# Never ask for input in quiet mode.
if config['import']['resume'].get() == 'ask' and \
config['import']['quiet']:
config['import']['resume'] = False
session = TerminalImportSession(lib, loghandler, paths, query)
session.run()
# Emit event.
plugins.send('import', lib=lib, paths=paths)
def import_func(lib, opts, args):
config['import'].set_args(opts)
# Special case: --copy flag suppresses import_move (which would
# otherwise take precedence).
if opts.copy:
config['import']['move'] = False
if opts.library:
query = decargs(args)
paths = []
else:
query = None
paths = args
if not paths:
raise ui.UserError(u'no path specified')
# On Python 2, we get filenames as raw bytes, which is what we
# need. On Python 3, we need to undo the "helpful" conversion to
# Unicode strings to get the real bytestring filename.
if not six.PY2:
paths = [p.encode(util.arg_encoding(), 'surrogateescape')
for p in paths]
import_files(lib, paths, query)
import_cmd = ui.Subcommand(
u'import', help=u'import new music', aliases=(u'imp', u'im')
)
import_cmd.parser.add_option(
u'-c', u'--copy', action='store_true', default=None,
help=u"copy tracks into library directory (default)"
)
import_cmd.parser.add_option(
u'-C', u'--nocopy', action='store_false', dest='copy',
help=u"don't copy tracks (opposite of -c)"
)
import_cmd.parser.add_option(
u'-m', u'--move', action='store_true', dest='move',
help=u"move tracks into the library (overrides -c)"
)
import_cmd.parser.add_option(
u'-w', u'--write', action='store_true', default=None,
help=u"write new metadata to files' tags (default)"
)
import_cmd.parser.add_option(
u'-W', u'--nowrite', action='store_false', dest='write',
help=u"don't write metadata (opposite of -w)"
)
import_cmd.parser.add_option(
u'-a', u'--autotag', action='store_true', dest='autotag',
help=u"infer tags for imported files (default)"
)
import_cmd.parser.add_option(
u'-A', u'--noautotag', action='store_false', dest='autotag',
help=u"don't infer tags for imported files (opposite of -a)"
)
import_cmd.parser.add_option(
u'-p', u'--resume', action='store_true', default=None,
help=u"resume importing if interrupted"
)
import_cmd.parser.add_option(
u'-P', u'--noresume', action='store_false', dest='resume',
help=u"do not try to resume importing"
)
import_cmd.parser.add_option(
u'-q', u'--quiet', action='store_true', dest='quiet',
help=u"never prompt for input: skip albums instead"
)
import_cmd.parser.add_option(
u'-l', u'--log', dest='log',
help=u'file to log untaggable albums for later review'
)
import_cmd.parser.add_option(
u'-s', u'--singletons', action='store_true',
help=u'import individual tracks instead of full albums'
)
import_cmd.parser.add_option(
u'-t', u'--timid', dest='timid', action='store_true',
help=u'always confirm all actions'
)
import_cmd.parser.add_option(
u'-L', u'--library', dest='library', action='store_true',
help=u'retag items matching a query'
)
import_cmd.parser.add_option(
u'-i', u'--incremental', dest='incremental', action='store_true',
help=u'skip already-imported directories'
)
import_cmd.parser.add_option(
u'-I', u'--noincremental', dest='incremental', action='store_false',
help=u'do not skip already-imported directories'
)
import_cmd.parser.add_option(
u'--from-scratch', dest='from_scratch', action='store_true',
help=u'erase existing metadata before applying new metadata'
)
import_cmd.parser.add_option(
u'--flat', dest='flat', action='store_true',
help=u'import an entire tree as a single album'
)
import_cmd.parser.add_option(
u'-g', u'--group-albums', dest='group_albums', action='store_true',
help=u'group tracks in a folder into separate albums'
)
import_cmd.parser.add_option(
u'--pretend', dest='pretend', action='store_true',
help=u'just print the files to import'
)
import_cmd.parser.add_option(
u'-S', u'--search-id', dest='search_ids', action='append',
metavar='ID',
help=u'restrict matching to a specific metadata backend ID'
)
import_cmd.parser.add_option(
u'--set', dest='set_fields', action='callback',
callback=_store_dict,
metavar='FIELD=VALUE',
help=u'set the given fields to the supplied values'
)
import_cmd.func = import_func
default_commands.append(import_cmd)
# list: Query and show library contents.
def list_items(lib, query, album, fmt=u''):
"""Print out items in lib matching query. If album, then search for
albums instead of single items.
"""
if album:
for album in lib.albums(query):
ui.print_(format(album, fmt))
else:
for item in lib.items(query):
ui.print_(format(item, fmt))
def list_func(lib, opts, args):
list_items(lib, decargs(args), opts.album)
list_cmd = ui.Subcommand(u'list', help=u'query the library', aliases=(u'ls',))
list_cmd.parser.usage += u"\n" \
u'Example: %prog -f \'$album: $title\' artist:beatles'
list_cmd.parser.add_all_common_options()
list_cmd.func = list_func
default_commands.append(list_cmd)
# update: Update library contents according to on-disk tags.
def update_items(lib, query, album, move, pretend, fields):
"""For all the items matched by the query, update the library to
reflect the item's embedded tags.
:param fields: The fields to be stored. If not specified, all fields will
be.
"""
with lib.transaction():
if move and fields is not None and 'path' not in fields:
# Special case: if an item needs to be moved, the path field has to
# updated; otherwise the new path will not be reflected in the
# database.
fields.append('path')
items, _ = _do_query(lib, query, album)
# Walk through the items and pick up their changes.
affected_albums = set()
for item in items:
# Item deleted?
if not os.path.exists(syspath(item.path)):
ui.print_(format(item))
ui.print_(ui.colorize('text_error', u' deleted'))
if not pretend:
item.remove(True)
affected_albums.add(item.album_id)
continue
# Did the item change since last checked?
if item.current_mtime() <= item.mtime:
log.debug(u'skipping {0} because mtime is up to date ({1})',
displayable_path(item.path), item.mtime)
continue
# Read new data.
try:
item.read()
except library.ReadError as exc:
log.error(u'error reading {0}: {1}',
displayable_path(item.path), exc)
continue
# Special-case album artist when it matches track artist. (Hacky
# but necessary for preserving album-level metadata for non-
# autotagged imports.)
if not item.albumartist:
old_item = lib.get_item(item.id)
if old_item.albumartist == old_item.artist == item.artist:
item.albumartist = old_item.albumartist
item._dirty.discard(u'albumartist')
# Check for and display changes.
changed = ui.show_model_changes(
item,
fields=fields or library.Item._media_fields)
# Save changes.
if not pretend:
if changed:
# Move the item if it's in the library.
if move and lib.directory in ancestry(item.path):
item.move(store=False)
item.store(fields=fields)
affected_albums.add(item.album_id)
else:
# The file's mtime was different, but there were no
# changes to the metadata. Store the new mtime,
# which is set in the call to read(), so we don't
# check this again in the future.
item.store(fields=fields)
# Skip album changes while pretending.
if pretend:
return
# Modify affected albums to reflect changes in their items.
for album_id in affected_albums:
if album_id is None: # Singletons.
continue
album = lib.get_album(album_id)
if not album: # Empty albums have already been removed.
log.debug(u'emptied album {0}', album_id)
continue
first_item = album.items().get()
# Update album structure to reflect an item in it.
for key in library.Album.item_keys:
album[key] = first_item[key]
album.store(fields=fields)
# Move album art (and any inconsistent items).
if move and lib.directory in ancestry(first_item.path):
log.debug(u'moving album {0}', album_id)
# Manually moving and storing the album.
items = list(album.items())
for item in items:
item.move(store=False, with_album=False)
item.store(fields=fields)
album.move(store=False)
album.store(fields=fields)
def update_func(lib, opts, args):
update_items(lib, decargs(args), opts.album, ui.should_move(opts.move),
opts.pretend, opts.fields)
update_cmd = ui.Subcommand(
u'update', help=u'update the library', aliases=(u'upd', u'up',)
)
update_cmd.parser.add_album_option()
update_cmd.parser.add_format_option()
update_cmd.parser.add_option(
u'-m', u'--move', action='store_true', dest='move',
help=u"move files in the library directory"
)
update_cmd.parser.add_option(
u'-M', u'--nomove', action='store_false', dest='move',
help=u"don't move files in library"
)
update_cmd.parser.add_option(
u'-p', u'--pretend', action='store_true',
help=u"show all changes but do nothing"
)
update_cmd.parser.add_option(
u'-F', u'--field', default=None, action='append', dest='fields',
help=u'list of fields to update'
)
update_cmd.func = update_func
default_commands.append(update_cmd)
# remove: Remove items from library, delete files.
def remove_items(lib, query, album, delete, force):
"""Remove items matching query from lib. If album, then match and
remove whole albums. If delete, also remove files from disk.
"""
# Get the matching items.
items, albums = _do_query(lib, query, album)
# Confirm file removal if not forcing removal.
if not force:
# Prepare confirmation with user.
print_()
if delete:
fmt = u'$path - $title'
prompt = u'Really DELETE %i file%s (y/n)?' % \
(len(items), 's' if len(items) > 1 else '')
else:
fmt = u''
prompt = u'Really remove %i item%s from the library (y/n)?' % \
(len(items), 's' if len(items) > 1 else '')
# Show all the items.
for item in items:
ui.print_(format(item, fmt))
# Confirm with user.
if not ui.input_yn(prompt, True):
return
# Remove (and possibly delete) items.
with lib.transaction():
for obj in (albums if album else items):
obj.remove(delete)
def remove_func(lib, opts, args):
remove_items(lib, decargs(args), opts.album, opts.delete, opts.force)
remove_cmd = ui.Subcommand(
u'remove', help=u'remove matching items from the library', aliases=(u'rm',)
)
remove_cmd.parser.add_option(
u"-d", u"--delete", action="store_true",
help=u"also remove files from disk"
)
remove_cmd.parser.add_option(
u"-f", u"--force", action="store_true",
help=u"do not ask when removing items"
)
remove_cmd.parser.add_album_option()
remove_cmd.func = remove_func
default_commands.append(remove_cmd)
# stats: Show library/query statistics.
def show_stats(lib, query, exact):
"""Shows some statistics about the matched items."""
items = lib.items(query)
total_size = 0
total_time = 0.0
total_items = 0
artists = set()
albums = set()
album_artists = set()
for item in items:
if exact:
try:
total_size += os.path.getsize(syspath(item.path))
except OSError as exc:
log.info(u'could not get size of {}: {}', item.path, exc)
else:
total_size += int(item.length * item.bitrate / 8)
total_time += item.length
total_items += 1
artists.add(item.artist)
album_artists.add(item.albumartist)
if item.album_id:
albums.add(item.album_id)
size_str = u'' + ui.human_bytes(total_size)
if exact:
size_str += u' ({0} bytes)'.format(total_size)
print_(u"""Tracks: {0}
Total time: {1}{2}
{3}: {4}
Artists: {5}
Albums: {6}
Album artists: {7}""".format(
total_items,
ui.human_seconds(total_time),
u' ({0:.2f} seconds)'.format(total_time) if exact else '',
u'Total size' if exact else u'Approximate total size',
size_str,
len(artists),
len(albums),
len(album_artists)),
)
def stats_func(lib, opts, args):
show_stats(lib, decargs(args), opts.exact)
stats_cmd = ui.Subcommand(
u'stats', help=u'show statistics about the library or a query'
)
stats_cmd.parser.add_option(
u'-e', u'--exact', action='store_true',
help=u'exact size and time'
)
stats_cmd.func = stats_func
default_commands.append(stats_cmd)
# version: Show current beets version.
def show_version(lib, opts, args):
print_(u'beets version %s' % beets.__version__)
print_(u'Python version {}'.format(python_version()))
# Show plugins.
names = sorted(p.name for p in plugins.find_plugins())
if names:
print_(u'plugins:', ', '.join(names))
else:
print_(u'no plugins loaded')
version_cmd = ui.Subcommand(
u'version', help=u'output version information'
)
version_cmd.func = show_version
default_commands.append(version_cmd)
# modify: Declaratively change metadata.
def modify_items(lib, mods, dels, query, write, move, album, confirm):
"""Modifies matching items according to user-specified assignments and
deletions.
`mods` is a dictionary of field and value pairse indicating
assignments. `dels` is a list of fields to be deleted.
"""
# Parse key=value specifications into a dictionary.
model_cls = library.Album if album else library.Item
for key, value in mods.items():
mods[key] = model_cls._parse(key, value)
# Get the items to modify.
items, albums = _do_query(lib, query, album, False)
objs = albums if album else items
# Apply changes *temporarily*, preview them, and collect modified
# objects.
print_(u'Modifying {0} {1}s.'
.format(len(objs), u'album' if album else u'item'))
changed = []
for obj in objs:
if print_and_modify(obj, mods, dels) and obj not in changed:
changed.append(obj)
# Still something to do?
if not changed:
print_(u'No changes to make.')
return
# Confirm action.
if confirm:
if write and move:
extra = u', move and write tags'
elif write:
extra = u' and write tags'
elif move:
extra = u' and move'
else:
extra = u''
changed = ui.input_select_objects(
u'Really modify%s' % extra, changed,
lambda o: print_and_modify(o, mods, dels)
)
# Apply changes to database and files
with lib.transaction():
for obj in changed:
obj.try_sync(write, move)
def print_and_modify(obj, mods, dels):
"""Print the modifications to an item and return a bool indicating
whether any changes were made.
`mods` is a dictionary of fields and values to update on the object;
`dels` is a sequence of fields to delete.
"""
obj.update(mods)
for field in dels:
try:
del obj[field]
except KeyError:
pass
return ui.show_model_changes(obj)
def modify_parse_args(args):
"""Split the arguments for the modify subcommand into query parts,
assignments (field=value), and deletions (field!). Returns the result as
a three-tuple in that order.
"""
mods = {}
dels = []
query = []
for arg in args:
if arg.endswith('!') and '=' not in arg and ':' not in arg:
dels.append(arg[:-1]) # Strip trailing !.
elif '=' in arg and ':' not in arg.split('=', 1)[0]:
key, val = arg.split('=', 1)
mods[key] = val
else:
query.append(arg)
return query, mods, dels
def modify_func(lib, opts, args):
query, mods, dels = modify_parse_args(decargs(args))
if not mods and not dels:
raise ui.UserError(u'no modifications specified')
modify_items(lib, mods, dels, query, ui.should_write(opts.write),
ui.should_move(opts.move), opts.album, not opts.yes)
modify_cmd = ui.Subcommand(
u'modify', help=u'change metadata fields', aliases=(u'mod',)
)
modify_cmd.parser.add_option(
u'-m', u'--move', action='store_true', dest='move',
help=u"move files in the library directory"
)
modify_cmd.parser.add_option(
u'-M', u'--nomove', action='store_false', dest='move',
help=u"don't move files in library"
)
modify_cmd.parser.add_option(
u'-w', u'--write', action='store_true', default=None,
help=u"write new metadata to files' tags (default)"
)
modify_cmd.parser.add_option(
u'-W', u'--nowrite', action='store_false', dest='write',
help=u"don't write metadata (opposite of -w)"
)
modify_cmd.parser.add_album_option()
modify_cmd.parser.add_format_option(target='item')
modify_cmd.parser.add_option(
u'-y', u'--yes', action='store_true',
help=u'skip confirmation'
)
modify_cmd.func = modify_func
default_commands.append(modify_cmd)
# move: Move/copy files to the library or a new base directory.
def move_items(lib, dest, query, copy, album, pretend, confirm=False,
export=False):
"""Moves or copies items to a new base directory, given by dest. If
dest is None, then the library's base directory is used, making the
command "consolidate" files.
"""
items, albums = _do_query(lib, query, album, False)
objs = albums if album else items
num_objs = len(objs)
# Filter out files that don't need to be moved.
isitemmoved = lambda item: item.path != item.destination(basedir=dest)
isalbummoved = lambda album: any(isitemmoved(i) for i in album.items())
objs = [o for o in objs if (isalbummoved if album else isitemmoved)(o)]
num_unmoved = num_objs - len(objs)
# Report unmoved files that match the query.
unmoved_msg = u''
if num_unmoved > 0:
unmoved_msg = u' ({} already in place)'.format(num_unmoved)
copy = copy or export # Exporting always copies.
action = u'Copying' if copy else u'Moving'
act = u'copy' if copy else u'move'
entity = u'album' if album else u'item'
log.info(u'{0} {1} {2}{3}{4}.', action, len(objs), entity,
u's' if len(objs) != 1 else u'', unmoved_msg)
if not objs:
return
if pretend:
if album:
show_path_changes([(item.path, item.destination(basedir=dest))
for obj in objs for item in obj.items()])
else:
show_path_changes([(obj.path, obj.destination(basedir=dest))
for obj in objs])
else:
if confirm:
objs = ui.input_select_objects(
u'Really %s' % act, objs,
lambda o: show_path_changes(
[(o.path, o.destination(basedir=dest))]))
for obj in objs:
log.debug(u'moving: {0}', util.displayable_path(obj.path))
if export:
# Copy without affecting the database.
obj.move(operation=MoveOperation.COPY, basedir=dest,
store=False)
else:
# Ordinary move/copy: store the new path.
if copy:
obj.move(operation=MoveOperation.COPY, basedir=dest)
else:
obj.move(operation=MoveOperation.MOVE, basedir=dest)
def move_func(lib, opts, args):
dest = opts.dest
if dest is not None:
dest = normpath(dest)
if not os.path.isdir(dest):
raise ui.UserError(u'no such directory: %s' % dest)
move_items(lib, dest, decargs(args), opts.copy, opts.album, opts.pretend,
opts.timid, opts.export)
move_cmd = ui.Subcommand(
u'move', help=u'move or copy items', aliases=(u'mv',)
)
move_cmd.parser.add_option(
u'-d', u'--dest', metavar='DIR', dest='dest',
help=u'destination directory'
)
move_cmd.parser.add_option(
u'-c', u'--copy', default=False, action='store_true',
help=u'copy instead of moving'
)
move_cmd.parser.add_option(
u'-p', u'--pretend', default=False, action='store_true',
help=u'show how files would be moved, but don\'t touch anything'
)
move_cmd.parser.add_option(
u'-t', u'--timid', dest='timid', action='store_true',
help=u'always confirm all actions'
)
move_cmd.parser.add_option(
u'-e', u'--export', default=False, action='store_true',
help=u'copy without changing the database path'
)
move_cmd.parser.add_album_option()
move_cmd.func = move_func
default_commands.append(move_cmd)
# write: Write tags into files.
def write_items(lib, query, pretend, force):
"""Write tag information from the database to the respective files
in the filesystem.
"""
items, albums = _do_query(lib, query, False, False)
for item in items:
# Item deleted?
if not os.path.exists(syspath(item.path)):
log.info(u'missing file: {0}', util.displayable_path(item.path))
continue
# Get an Item object reflecting the "clean" (on-disk) state.
try:
clean_item = library.Item.from_path(item.path)
except library.ReadError as exc:
log.error(u'error reading {0}: {1}',
displayable_path(item.path), exc)
continue
# Check for and display changes.
changed = ui.show_model_changes(item, clean_item,
library.Item._media_tag_fields, force)
if (changed or force) and not pretend:
# We use `try_sync` here to keep the mtime up to date in the
# database.
item.try_sync(True, False)
def write_func(lib, opts, args):
write_items(lib, decargs(args), opts.pretend, opts.force)
write_cmd = ui.Subcommand(u'write', help=u'write tag information to files')
write_cmd.parser.add_option(
u'-p', u'--pretend', action='store_true',
help=u"show all changes but do nothing"
)
write_cmd.parser.add_option(
u'-f', u'--force', action='store_true',
help=u"write tags even if the existing tags match the database"
)
write_cmd.func = write_func
default_commands.append(write_cmd)
# config: Show and edit user configuration.
def config_func(lib, opts, args):
# Make sure lazy configuration is loaded
config.resolve()
# Print paths.
if opts.paths:
filenames = []
for source in config.sources:
if not opts.defaults and source.default:
continue
if source.filename:
filenames.append(source.filename)
# In case the user config file does not exist, prepend it to the
# list.
user_path = config.user_config_path()
if user_path not in filenames:
filenames.insert(0, user_path)
for filename in filenames:
print_(displayable_path(filename))
# Open in editor.
elif opts.edit:
config_edit()
# Dump configuration.
else:
config_out = config.dump(full=opts.defaults, redact=opts.redact)
print_(util.text_string(config_out))
def config_edit():
"""Open a program to edit the user configuration.
An empty config file is created if no existing config file exists.
"""
path = config.user_config_path()
editor = util.editor_command()
try:
if not os.path.isfile(path):
open(path, 'w+').close()
util.interactive_open([path], editor)
except OSError as exc:
message = u"Could not edit configuration: {0}".format(exc)
if not editor:
message += u". Please set the EDITOR environment variable"
raise ui.UserError(message)
config_cmd = ui.Subcommand(u'config',
help=u'show or edit the user configuration')
config_cmd.parser.add_option(
u'-p', u'--paths', action='store_true',
help=u'show files that configuration was loaded from'
)
config_cmd.parser.add_option(
u'-e', u'--edit', action='store_true',
help=u'edit user configuration with $EDITOR'
)
config_cmd.parser.add_option(
u'-d', u'--defaults', action='store_true',
help=u'include the default configuration'
)
config_cmd.parser.add_option(
u'-c', u'--clear', action='store_false',
dest='redact', default=True,
help=u'do not redact sensitive fields'
)
config_cmd.func = config_func
default_commands.append(config_cmd)
# completion: print completion script
def print_completion(*args):
for line in completion_script(default_commands + plugins.commands()):
print_(line, end=u'')
if not any(map(os.path.isfile, BASH_COMPLETION_PATHS)):
log.warning(u'Warning: Unable to find the bash-completion package. '
u'Command line completion might not work.')
BASH_COMPLETION_PATHS = map(syspath, [
u'/etc/bash_completion',
u'/usr/share/bash-completion/bash_completion',
u'/usr/local/share/bash-completion/bash_completion',
# SmartOS
u'/opt/local/share/bash-completion/bash_completion',
# Homebrew (before bash-completion2)
u'/usr/local/etc/bash_completion',
])
def completion_script(commands):
"""Yield the full completion shell script as strings.
``commands`` is alist of ``ui.Subcommand`` instances to generate
completion data for.
"""
base_script = os.path.join(os.path.dirname(__file__), 'completion_base.sh')
with open(base_script, 'r') as base_script:
yield util.text_string(base_script.read())
options = {}
aliases = {}
command_names = []
# Collect subcommands
for cmd in commands:
name = cmd.name
command_names.append(name)
for alias in cmd.aliases:
if re.match(r'^\w+$', alias):
aliases[alias] = name
options[name] = {u'flags': [], u'opts': []}
for opts in cmd.parser._get_all_options()[1:]:
if opts.action in ('store_true', 'store_false'):
option_type = u'flags'
else:
option_type = u'opts'
options[name][option_type].extend(
opts._short_opts + opts._long_opts
)
# Add global options
options['_global'] = {
u'flags': [u'-v', u'--verbose'],
u'opts':
u'-l --library -c --config -d --directory -h --help'.split(u' ')
}
# Add flags common to all commands
options['_common'] = {
u'flags': [u'-h', u'--help']
}
# Start generating the script
yield u"_beet() {\n"
# Command names
yield u" local commands='%s'\n" % ' '.join(command_names)
yield u"\n"
# Command aliases
yield u" local aliases='%s'\n" % ' '.join(aliases.keys())
for alias, cmd in aliases.items():
yield u" local alias__%s=%s\n" % (alias.replace('-', '_'), cmd)
yield u'\n'
# Fields
yield u" fields='%s'\n" % ' '.join(
set(
list(library.Item._fields.keys()) +
list(library.Album._fields.keys())
)
)
# Command options
for cmd, opts in options.items():
for option_type, option_list in opts.items():
if option_list:
option_list = u' '.join(option_list)
yield u" local %s__%s='%s'\n" % (
option_type, cmd.replace('-', '_'), option_list)
yield u' _beet_dispatch\n'
yield u'}\n'
completion_cmd = ui.Subcommand(
'completion',
help=u'print shell script that provides command line completion'
)
completion_cmd.func = print_completion
completion_cmd.hide = True
default_commands.append(completion_cmd)
| 34.212707 | 79 | 0.600501 |
7941becb8231517b90e18e24c8ae941c3cd4c93d | 5,740 | py | Python | tests/data/dummy_aea/protocols/fipa/message.py | 8ball030/agents-aea | fcf470e3daa9bd8e272ca66542c6003feb0fd7a8 | [
"Apache-2.0"
] | 1 | 2021-07-25T18:50:18.000Z | 2021-07-25T18:50:18.000Z | tests/data/dummy_aea/protocols/fipa/message.py | 8ball030/agents-aea | fcf470e3daa9bd8e272ca66542c6003feb0fd7a8 | [
"Apache-2.0"
] | null | null | null | tests/data/dummy_aea/protocols/fipa/message.py | 8ball030/agents-aea | fcf470e3daa9bd8e272ca66542c6003feb0fd7a8 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# ------------------------------------------------------------------------------
#
# Copyright 2018-2019 Fetch.AI Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ------------------------------------------------------------------------------
"""This module contains the FIPA message definition."""
from enum import Enum
from typing import Dict, List, Optional, Tuple, Union, cast
from aea.protocols.base import Message
from aea.protocols.oef.models import Description, Query
class FIPAMessage(Message):
"""The FIPA message class."""
protocol_id = "fipa"
STARTING_MESSAGE_ID = 1
STARTING_TARGET = 0
class Performative(Enum):
"""FIPA performatives."""
CFP = "cfp"
PROPOSE = "propose"
ACCEPT = "accept"
MATCH_ACCEPT = "match_accept"
DECLINE = "decline"
INFORM = "inform"
ACCEPT_W_ADDRESS = "accept_w_address"
MATCH_ACCEPT_W_ADDRESS = "match_accept_w_address"
def __str__(self):
"""Get string representation."""
return self.value
def __init__(self, dialogue_reference: Tuple[str, str] = None,
message_id: Optional[int] = None,
target: Optional[int] = None,
performative: Optional[Union[str, Performative]] = None,
**kwargs):
"""
Initialize.
:param message_id: the message id.
:param dialogue_reference: the dialogue reference.
:param target: the message target.
:param performative: the message performative.
"""
super().__init__(message_id=message_id,
dialogue_reference=dialogue_reference,
target=target,
performative=FIPAMessage.Performative(performative),
**kwargs)
assert self.check_consistency(), "FIPAMessage initialization inconsistent."
def check_consistency(self) -> bool:
"""Check that the data is consistent."""
try:
assert self.is_set("dialogue_reference")
dialogue_reference = self.get("dialogue_reference")
assert type(dialogue_reference) == tuple
dialogue_reference = cast(Tuple, dialogue_reference)
assert type(dialogue_reference[0]) == str and type(dialogue_reference[0]) == str
assert self.is_set("message_id")
assert type(self.get("message_id")) == int
assert self.is_set("target")
assert type(self.get("target")) == int
performative = FIPAMessage.Performative(self.get("performative"))
if performative == FIPAMessage.Performative.CFP:
assert self.is_set("query")
query = self.get("query")
assert isinstance(query, Query) or isinstance(query, bytes) or query is None
assert len(self.body) == 5
elif performative == FIPAMessage.Performative.PROPOSE:
assert self.is_set("proposal")
proposal = self.get("proposal")
assert type(proposal) == list and all(isinstance(d, Description) or type(d) == bytes for d in proposal) # type: ignore
assert len(self.body) == 5
elif performative == FIPAMessage.Performative.ACCEPT \
or performative == FIPAMessage.Performative.MATCH_ACCEPT \
or performative == FIPAMessage.Performative.DECLINE:
assert len(self.body) == 4
elif performative == FIPAMessage.Performative.ACCEPT_W_ADDRESS\
or performative == FIPAMessage.Performative.MATCH_ACCEPT_W_ADDRESS:
assert self.is_set("address")
assert len(self.body) == 5
elif performative == FIPAMessage.Performative.INFORM:
assert self.is_set("json_data")
json_data = self.get("json_data")
assert isinstance(json_data, dict)
assert len(self.body) == 5
else:
raise ValueError("Performative not recognized.")
except (AssertionError, ValueError, KeyError):
return False
return True
VALID_PREVIOUS_PERFORMATIVES = {
FIPAMessage.Performative.CFP: [None],
FIPAMessage.Performative.PROPOSE: [FIPAMessage.Performative.CFP],
FIPAMessage.Performative.ACCEPT: [FIPAMessage.Performative.PROPOSE],
FIPAMessage.Performative.ACCEPT_W_ADDRESS: [FIPAMessage.Performative.PROPOSE],
FIPAMessage.Performative.MATCH_ACCEPT: [FIPAMessage.Performative.ACCEPT, FIPAMessage.Performative.ACCEPT_W_ADDRESS],
FIPAMessage.Performative.MATCH_ACCEPT_W_ADDRESS: [FIPAMessage.Performative.ACCEPT, FIPAMessage.Performative.ACCEPT_W_ADDRESS],
FIPAMessage.Performative.INFORM: [FIPAMessage.Performative.MATCH_ACCEPT, FIPAMessage.Performative.MATCH_ACCEPT_W_ADDRESS, FIPAMessage.Performative.INFORM],
FIPAMessage.Performative.DECLINE: [FIPAMessage.Performative.CFP, FIPAMessage.Performative.PROPOSE, FIPAMessage.Performative.ACCEPT, FIPAMessage.Performative.ACCEPT_W_ADDRESS]
} # type: Dict[FIPAMessage.Performative, List[Union[None, FIPAMessage.Performative]]]
| 44.84375 | 178 | 0.632753 |
7941bf0f82aa38d11193e277dd38c447c3de82b0 | 10,568 | py | Python | lib/sqlalchemy/testing/fixtures.py | smarkets/sqlalchemy | 9c96d96d0fe558d625755277e0b23a7bc82cd148 | [
"MIT"
] | 1 | 2015-11-07T12:34:26.000Z | 2015-11-07T12:34:26.000Z | lib/sqlalchemy/testing/fixtures.py | smarkets/sqlalchemy | 9c96d96d0fe558d625755277e0b23a7bc82cd148 | [
"MIT"
] | 4 | 2017-10-24T22:44:01.000Z | 2017-10-24T22:44:19.000Z | lib/sqlalchemy/testing/fixtures.py | smarkets/sqlalchemy | 9c96d96d0fe558d625755277e0b23a7bc82cd148 | [
"MIT"
] | 1 | 2021-03-25T00:26:15.000Z | 2021-03-25T00:26:15.000Z | # testing/fixtures.py
# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
from . import config
from . import assertions, schema
from .util import adict
from .. import util
from .engines import drop_all_tables
from .entities import BasicEntity, ComparableEntity
import sys
import sqlalchemy as sa
from sqlalchemy.ext.declarative import declarative_base, DeclarativeMeta
# whether or not we use unittest changes things dramatically,
# as far as how py.test collection works.
class TestBase(object):
# A sequence of database names to always run, regardless of the
# constraints below.
__whitelist__ = ()
# A sequence of requirement names matching testing.requires decorators
__requires__ = ()
# A sequence of dialect names to exclude from the test class.
__unsupported_on__ = ()
# If present, test class is only runnable for the *single* specified
# dialect. If you need multiple, use __unsupported_on__ and invert.
__only_on__ = None
# A sequence of no-arg callables. If any are True, the entire testcase is
# skipped.
__skip_if__ = None
def assert_(self, val, msg=None):
assert val, msg
# apparently a handful of tests are doing this....OK
def setup(self):
if hasattr(self, "setUp"):
self.setUp()
def teardown(self):
if hasattr(self, "tearDown"):
self.tearDown()
class TablesTest(TestBase):
# 'once', None
run_setup_bind = 'once'
# 'once', 'each', None
run_define_tables = 'once'
# 'once', 'each', None
run_create_tables = 'once'
# 'once', 'each', None
run_inserts = 'each'
# 'each', None
run_deletes = 'each'
# 'once', None
run_dispose_bind = None
bind = None
metadata = None
tables = None
other = None
@classmethod
def setup_class(cls):
cls._init_class()
cls._setup_once_tables()
cls._setup_once_inserts()
@classmethod
def _init_class(cls):
if cls.run_define_tables == 'each':
if cls.run_create_tables == 'once':
cls.run_create_tables = 'each'
assert cls.run_inserts in ('each', None)
if cls.other is None:
cls.other = adict()
if cls.tables is None:
cls.tables = adict()
if cls.bind is None:
setattr(cls, 'bind', cls.setup_bind())
if cls.metadata is None:
setattr(cls, 'metadata', sa.MetaData())
if cls.metadata.bind is None:
cls.metadata.bind = cls.bind
@classmethod
def _setup_once_inserts(cls):
if cls.run_inserts == 'once':
cls._load_fixtures()
cls.insert_data()
@classmethod
def _setup_once_tables(cls):
if cls.run_define_tables == 'once':
cls.define_tables(cls.metadata)
if cls.run_create_tables == 'once':
cls.metadata.create_all(cls.bind)
cls.tables.update(cls.metadata.tables)
def _setup_each_tables(self):
if self.run_define_tables == 'each':
self.tables.clear()
if self.run_create_tables == 'each':
drop_all_tables(self.metadata, self.bind)
self.metadata.clear()
self.define_tables(self.metadata)
if self.run_create_tables == 'each':
self.metadata.create_all(self.bind)
self.tables.update(self.metadata.tables)
elif self.run_create_tables == 'each':
drop_all_tables(self.metadata, self.bind)
self.metadata.create_all(self.bind)
def _setup_each_inserts(self):
if self.run_inserts == 'each':
self._load_fixtures()
self.insert_data()
def _teardown_each_tables(self):
# no need to run deletes if tables are recreated on setup
if self.run_define_tables != 'each' and self.run_deletes == 'each':
for table in reversed(self.metadata.sorted_tables):
try:
table.delete().execute().close()
except sa.exc.DBAPIError as ex:
util.print_(
("Error emptying table %s: %r" % (table, ex)),
file=sys.stderr)
def setup(self):
self._setup_each_tables()
self._setup_each_inserts()
def teardown(self):
self._teardown_each_tables()
@classmethod
def _teardown_once_metadata_bind(cls):
if cls.run_create_tables:
drop_all_tables(cls.metadata, cls.bind)
if cls.run_dispose_bind == 'once':
cls.dispose_bind(cls.bind)
cls.metadata.bind = None
if cls.run_setup_bind is not None:
cls.bind = None
@classmethod
def teardown_class(cls):
cls._teardown_once_metadata_bind()
@classmethod
def setup_bind(cls):
return config.db
@classmethod
def dispose_bind(cls, bind):
if hasattr(bind, 'dispose'):
bind.dispose()
elif hasattr(bind, 'close'):
bind.close()
@classmethod
def define_tables(cls, metadata):
pass
@classmethod
def fixtures(cls):
return {}
@classmethod
def insert_data(cls):
pass
def sql_count_(self, count, fn):
self.assert_sql_count(self.bind, fn, count)
def sql_eq_(self, callable_, statements, with_sequences=None):
self.assert_sql(self.bind,
callable_, statements, with_sequences)
@classmethod
def _load_fixtures(cls):
"""Insert rows as represented by the fixtures() method."""
headers, rows = {}, {}
for table, data in cls.fixtures().items():
if len(data) < 2:
continue
if isinstance(table, util.string_types):
table = cls.tables[table]
headers[table] = data[0]
rows[table] = data[1:]
for table in cls.metadata.sorted_tables:
if table not in headers:
continue
cls.bind.execute(
table.insert(),
[dict(zip(headers[table], column_values))
for column_values in rows[table]])
from sqlalchemy import event
class RemovesEvents(object):
@util.memoized_property
def _event_fns(self):
return set()
def event_listen(self, target, name, fn):
self._event_fns.add((target, name, fn))
event.listen(target, name, fn)
def teardown(self):
for key in self._event_fns:
event.remove(*key)
super_ = super(RemovesEvents, self)
if hasattr(super_, "teardown"):
super_.teardown()
class _ORMTest(object):
@classmethod
def teardown_class(cls):
sa.orm.session.Session.close_all()
sa.orm.clear_mappers()
class ORMTest(_ORMTest, TestBase):
pass
class MappedTest(_ORMTest, TablesTest, assertions.AssertsExecutionResults):
# 'once', 'each', None
run_setup_classes = 'once'
# 'once', 'each', None
run_setup_mappers = 'each'
classes = None
@classmethod
def setup_class(cls):
cls._init_class()
if cls.classes is None:
cls.classes = adict()
cls._setup_once_tables()
cls._setup_once_classes()
cls._setup_once_mappers()
cls._setup_once_inserts()
@classmethod
def teardown_class(cls):
cls._teardown_once_class()
cls._teardown_once_metadata_bind()
def setup(self):
self._setup_each_tables()
self._setup_each_mappers()
self._setup_each_inserts()
def teardown(self):
sa.orm.session.Session.close_all()
self._teardown_each_mappers()
self._teardown_each_tables()
@classmethod
def _teardown_once_class(cls):
cls.classes.clear()
_ORMTest.teardown_class()
@classmethod
def _setup_once_classes(cls):
if cls.run_setup_classes == 'once':
cls._with_register_classes(cls.setup_classes)
@classmethod
def _setup_once_mappers(cls):
if cls.run_setup_mappers == 'once':
cls._with_register_classes(cls.setup_mappers)
def _setup_each_mappers(self):
if self.run_setup_mappers == 'each':
self._with_register_classes(self.setup_mappers)
@classmethod
def _with_register_classes(cls, fn):
"""Run a setup method, framing the operation with a Base class
that will catch new subclasses to be established within
the "classes" registry.
"""
cls_registry = cls.classes
class FindFixture(type):
def __init__(cls, classname, bases, dict_):
cls_registry[classname] = cls
return type.__init__(cls, classname, bases, dict_)
class _Base(util.with_metaclass(FindFixture, object)):
pass
class Basic(BasicEntity, _Base):
pass
class Comparable(ComparableEntity, _Base):
pass
cls.Basic = Basic
cls.Comparable = Comparable
fn()
def _teardown_each_mappers(self):
# some tests create mappers in the test bodies
# and will define setup_mappers as None -
# clear mappers in any case
if self.run_setup_mappers != 'once':
sa.orm.clear_mappers()
@classmethod
def setup_classes(cls):
pass
@classmethod
def setup_mappers(cls):
pass
class DeclarativeMappedTest(MappedTest):
run_setup_classes = 'once'
run_setup_mappers = 'once'
@classmethod
def _setup_once_tables(cls):
pass
@classmethod
def _with_register_classes(cls, fn):
cls_registry = cls.classes
class FindFixtureDeclarative(DeclarativeMeta):
def __init__(cls, classname, bases, dict_):
cls_registry[classname] = cls
return DeclarativeMeta.__init__(
cls, classname, bases, dict_)
class DeclarativeBasic(object):
__table_cls__ = schema.Table
_DeclBase = declarative_base(metadata=cls.metadata,
metaclass=FindFixtureDeclarative,
cls=DeclarativeBasic)
cls.DeclarativeBasic = _DeclBase
fn()
if cls.metadata.tables and cls.run_create_tables:
cls.metadata.create_all(config.db)
| 27.737533 | 84 | 0.612226 |
7941bf5be5f779ae35d10e625fed60c32d0e7ba9 | 1,081 | py | Python | gentool/__main__.py | TheDudeFromCI/generative-toolkit | 4a0aed629b72e6eea807dadc460afa90dd330f7f | [
"MIT"
] | null | null | null | gentool/__main__.py | TheDudeFromCI/generative-toolkit | 4a0aed629b72e6eea807dadc460afa90dd330f7f | [
"MIT"
] | null | null | null | gentool/__main__.py | TheDudeFromCI/generative-toolkit | 4a0aed629b72e6eea807dadc460afa90dd330f7f | [
"MIT"
] | null | null | null | import sys
import argparse
import torch
from ModelLoader import load_model
def main():
parser = argparse.ArgumentParser(prog='gentool')
parser.add_argument("--training", action='store_true', help="Whether or not to start the model in training mode.")
parser.add_argument("--model", type=str, help="The model to loader.")
parser.add_argument("--iterations", type=int, default=10000, help="Number of iterations to train for.")
parser.add_argument("--itr_offset", type=int, default=0, help="Iteration count offset.")
parser.add_argument("--no_cuda", action='store_true', help="Disables loading to GPU.")
opt = parser.parse_args()
cuda = not opt.no_cuda
if opt.model is None:
print('Model not defined!')
sys.exit(1)
if cuda:
torch.set_default_tensor_type(torch.cuda.FloatTensor)
model = load_model(opt.model, cuda)
if opt.training:
model.train()
else:
model.eval()
if opt.training:
model.fit(opt.iterations, offset=opt.itr_offset)
if __name__ == '__main__':
main()
| 27.025 | 118 | 0.676226 |
7941c14a923233de518cd7c4d57e8b8a8943548c | 1,217 | py | Python | demos/time_tsts/bound_arr/ibeam.py | tbcole/majoranaJJ | dcf31f7786fa0a4874a940b7d8dcdd55f3921a46 | [
"MIT"
] | null | null | null | demos/time_tsts/bound_arr/ibeam.py | tbcole/majoranaJJ | dcf31f7786fa0a4874a940b7d8dcdd55f3921a46 | [
"MIT"
] | 2 | 2020-03-24T23:46:17.000Z | 2020-04-19T20:29:08.000Z | demos/time_tsts/bound_arr/ibeam.py | tbcole/majoranaJJ | dcf31f7786fa0a4874a940b7d8dcdd55f3921a46 | [
"MIT"
] | 3 | 2020-04-30T08:48:12.000Z | 2022-01-26T12:15:15.000Z | import time
import majoranaJJ.lattice.shapes as shps #lattice shapes
import majoranaJJ.junk.lattice.neighbors as nb2
import majoranaJJ.lattice.nbrs as nb #neighbor arrays
import majoranaJJ.modules.plots as plots #plotting functions
print("")
xbase = 40
xcut = 5
y1 = 10
y2 = 10
#Making square lattice, nothing has changed with this method
coor = shps.ibeam(xbase, xcut, y1, y2)
NN = nb.NN_Arr(coor)
print("size: ", coor.shape[0])
print("")
###################################
#Using old method, scaled by N^2 due to a loop within a loop
start = time.time()
NNb2 = nb2.Bound_Arr(NN, coor)
end = time.time()
print("Time to create Bound_Arr with original method = {} [s]".format(end-start))
print(NNb2[0:5, :])
idx = 0
plots.lattice(idx, coor, NNb = NNb2)
print(" ")
###################################
start = time.time()
NNb = nb.Bound_Arr(coor)
end = time.time()
print("Time to create Bound_Arr with revised method = {} [s]".format( end-start))
print(NNb[0:5, :])
idx = 0
plots.lattice(idx, coor, NNb = NNb)
print(" ")
###################################
#Verifying that the new method creates the same neighbor array as the old one
for i in [0,1,2,3]:
print("Same Values? ", all(NNb[:,i] == NNb2[:,i]))
| 25.354167 | 81 | 0.636812 |
7941c16c34070a51c64bf76aab1f8c34a314ac05 | 4,979 | py | Python | lesson5.4/index.py | magnusmel/Serverless-Deep-Learning-with-TensorFlow-and-AWS-Lambda | cc226deb7b46852407900f9fec0caf62638defe2 | [
"MIT"
] | 21 | 2018-12-11T20:07:47.000Z | 2021-11-08T13:12:32.000Z | lesson5.4/index.py | magnusmel/Serverless-Deep-Learning-with-TensorFlow-and-AWS-Lambda | cc226deb7b46852407900f9fec0caf62638defe2 | [
"MIT"
] | 1 | 2020-07-07T21:30:02.000Z | 2020-07-08T18:16:03.000Z | lesson5.4/index.py | magnusmel/Serverless-Deep-Learning-with-TensorFlow-and-AWS-Lambda | cc226deb7b46852407900f9fec0caf62638defe2 | [
"MIT"
] | 15 | 2018-12-12T02:32:28.000Z | 2021-11-05T20:40:10.000Z | import boto3
import numpy as np
import tensorflow as tf
import os.path
import re
from urllib.request import urlretrieve
import json
SESSION = None
strBucket = 'serverlessdeeplearning'
def handler(event, context):
global strBucket
global SESSION
if not os.path.exists('/tmp/imagenet/'):
os.makedirs('/tmp/imagenet/')
if SESSION is None:
downloadFromS3(strBucket,'imagenet/imagenet_2012_challenge_label_map_proto.pbtxt','/tmp/imagenet/imagenet_2012_challenge_label_map_proto.pbtxt')
downloadFromS3(strBucket,'imagenet/imagenet_synset_to_human_label_map.txt','/tmp/imagenet/imagenet_synset_to_human_label_map.txt')
strFile = '/tmp/imagenet/inputimage.png'
if ('queryStringParameters' in event):
if (event['queryStringParameters'] is not None):
if ('url' in event['queryStringParameters']):
urlretrieve(event['queryStringParameters']['url'], strFile)
else:
downloadFromS3(strBucket,'imagenet/inputimage.png',strFile)
else:
downloadFromS3(strBucket,'imagenet/inputimage.png',strFile)
else:
downloadFromS3(strBucket,'imagenet/inputimage.png',strFile)
strResult = run_inference_on_image(strFile)
return {
'statusCode': 200,
'body': json.dumps(strResult)
}
def run_inference_on_image(image):
image_data = tf.gfile.FastGFile(image, 'rb').read()
global SESSION
if SESSION is None:
SESSION = tf.InteractiveSession()
create_graph()
softmax_tensor = tf.get_default_graph().get_tensor_by_name('softmax:0')
predictions = SESSION.run(softmax_tensor, {'DecodeJpeg/contents:0': image_data})
predictions = np.squeeze(predictions)
top_k = predictions.argsort()[-5:][::-1]
node_lookup = NodeLookup()
strResult = '%s (score = %.5f)' % (node_lookup.id_to_string(top_k[0]), predictions[top_k[0]])
vecStr = []
for node_id in top_k:
human_string = node_lookup.id_to_string(node_id)
score = predictions[node_id]
vecStr.append('%s (score = %.5f)' % (human_string, score))
return vecStr
def downloadFromS3(strBucket,strKey,strFile):
s3_client = boto3.client('s3')
s3_client.download_file(strBucket, strKey, strFile)
def getObject(strBucket,strKey):
s3_client = boto3.client('s3')
s3_response_object = s3_client.get_object(Bucket=strBucket, Key=strKey)
return s3_response_object['Body'].read()
def create_graph():
global strBucket
graph_def = tf.GraphDef()
graph_def.ParseFromString(getObject(strBucket,'imagenet/classify_image_graph_def.pb'))
_ = tf.import_graph_def(graph_def, name='')
class NodeLookup(object):
"""Converts integer node ID's to human readable labels."""
def __init__(self,
label_lookup_path=None,
uid_lookup_path=None):
if not label_lookup_path:
label_lookup_path = os.path.join(
'/tmp/imagenet/', 'imagenet_2012_challenge_label_map_proto.pbtxt')
if not uid_lookup_path:
uid_lookup_path = os.path.join(
'/tmp/imagenet/', 'imagenet_synset_to_human_label_map.txt')
self.node_lookup = self.load(label_lookup_path, uid_lookup_path)
def load(self, label_lookup_path, uid_lookup_path):
if not tf.gfile.Exists(uid_lookup_path):
tf.logging.fatal('File does not exist %s', uid_lookup_path)
if not tf.gfile.Exists(label_lookup_path):
tf.logging.fatal('File does not exist %s', label_lookup_path)
# Loads mapping from string UID to human-readable string
proto_as_ascii_lines = tf.gfile.GFile(uid_lookup_path).readlines()
uid_to_human = {}
p = re.compile(r'[n\d]*[ \S,]*')
for line in proto_as_ascii_lines:
parsed_items = p.findall(line)
uid = parsed_items[0]
human_string = parsed_items[2]
uid_to_human[uid] = human_string
# Loads mapping from string UID to integer node ID.
node_id_to_uid = {}
proto_as_ascii = tf.gfile.GFile(label_lookup_path).readlines()
for line in proto_as_ascii:
if line.startswith(' target_class:'):
target_class = int(line.split(': ')[1])
if line.startswith(' target_class_string:'):
target_class_string = line.split(': ')[1]
node_id_to_uid[target_class] = target_class_string[1:-2]
# Loads the final mapping of integer node ID to human-readable string
node_id_to_name = {}
for key, val in node_id_to_uid.items():
if val not in uid_to_human:
tf.logging.fatal('Failed to locate: %s', val)
name = uid_to_human[val]
node_id_to_name[key] = name
return node_id_to_name
def id_to_string(self, node_id):
if node_id not in self.node_lookup:
return ''
return self.node_lookup[node_id]
| 37.43609 | 152 | 0.66198 |
7941c1f3859c4e7787c50eb7657d338c31cfbb8d | 7,433 | py | Python | bugzoo/mgr/build.py | pdreiter/BugZoo | e164ee67ff8bd3addfcc87b5e38ff1774992196b | [
"MIT"
] | 53 | 2017-12-02T03:22:06.000Z | 2022-03-10T22:20:52.000Z | bugzoo/mgr/build.py | pdreiter/BugZoo | e164ee67ff8bd3addfcc87b5e38ff1774992196b | [
"MIT"
] | 145 | 2017-11-29T23:23:06.000Z | 2020-09-17T22:17:44.000Z | bugzoo/mgr/build.py | pdreiter/BugZoo | e164ee67ff8bd3addfcc87b5e38ff1774992196b | [
"MIT"
] | 8 | 2018-06-26T17:58:49.000Z | 2021-09-07T14:03:41.000Z | from typing import Iterator
import os
import shutil
import json
import logging
import docker
from ..core.build import BuildInstructions
from ..exceptions import ImageBuildFailed
logger = logging.getLogger(__name__) # type: logging.Logger
logger.setLevel(logging.DEBUG)
class BuildManager(object):
def __init__(self, client_docker: docker.DockerClient):
self.__docker = client_docker
self.__blueprints = {}
def __getitem__(self, name: str) -> BuildInstructions:
"""
Retrieves the build instructions associated for a named Docker image.
Parameter:
name: the name of the Docker image.
Raises:
KeyError: if no build instructions for the named image have been
registered with this manager.
"""
return self.__blueprints[name]
def __iter__(self) -> Iterator[BuildInstructions]:
"""
Returns an iterator over all of the build instructions that are
registered with this server.
"""
return self.__blueprints.values().__iter__()
def register(self, blueprint: BuildInstructions) -> None:
"""
Attempts to register a blueprint for a given Docker image with this
manager.
"""
self.__blueprints[blueprint.name] = blueprint
add = register
def deregister(self, blueprint: BuildInstructions) -> None:
"""
Attempts to deregister a given blueprint from this manager.
"""
del self.__blueprints[blueprint.name]
remove = deregister
def is_installed(self, name: str) -> bool:
"""
Indicates a given Docker image is installed on this server.
Parameters:
name: the name of the Docker image.
Returns:
`True` if installed; `False` if not.
"""
assert name is not None
try:
self.__docker.images.get(name)
return True
except docker.errors.ImageNotFound:
return False
def build(self,
name: str,
force: bool = False,
quiet: bool = False
) -> None:
"""
Constructs a Docker image, given by its name, using the set of build
instructions associated with that image.
Parameters:
name: the name of the Docker image.
force: if `True`, the image will be rebuilt, regardless of whether
or not it is already installed on the server. If `False` and
a (possibly outdated) version of the image has already been
built, then the build will be skipped.
quiet: used to enable and disable output from the Docker build
process.
"""
logger.debug("request to build image: %s", name)
instructions = self[name]
if instructions.depends_on:
logger.info("building dependent image: %s",
instructions.depends_on)
self.build(instructions.depends_on, force=force, quiet=quiet)
if not force and self.is_installed(instructions.name):
return
if not quiet:
logger.info("building image: %s", name)
context = instructions.abs_context
tf = os.path.join(context, '.Dockerfile')
try:
success = False
shutil.copy(instructions.filename_abs, tf)
response = self.__docker.api.build(path=context,
dockerfile='.Dockerfile',
tag=name,
# pull=force,
buildargs=instructions.arguments,
target=instructions.build_stage,
decode=True,
rm=True)
log = [] # type: List[str]
for line in response:
if 'stream' in line:
line_msg = line['stream'].rstrip()
log.append(line_msg)
if not quiet:
print(line_msg)
if line_msg.startswith('Successfully built'):
success = True
if not success:
raise ImageBuildFailed(name, log)
if success and not quiet:
logger.info("built image: %s", name)
return
finally:
if os.path.exists(tf):
os.remove(tf)
def uninstall(self,
name: str,
force: bool = False,
noprune: bool = False
) -> None:
"""
Attempts to uninstall a given Docker image.
Parameters:
name: the name of the Docker image.
force: a flag indicating whether or not an exception should be
thrown if the image associated with the given build
instructions is not installed. If `True`, no exception
will be thrown; if `False`, exception will be thrown.
noprune: a flag indicating whether or not dangling image layers
should also be removed.
Raises:
docker.errors.ImageNotFound: if the image associated with the given
instructions can't be found.
"""
try:
self.__docker.images.remove(image=name,
force=force,
noprune=noprune)
except docker.errors.ImageNotFound as e:
if force:
return
raise e
def download(self,
name: str,
force: bool = False
) -> bool:
"""
Attempts to download a given Docker image. If `force=True`, then any
previously installed version of the image (described by the
instructions) will be replaced by the image on DockerHub.
Parameters:
name: the name of the Docker image.
Returns:
`True` if successfully downloaded, otherwise `False`.
"""
try:
self.__docker.images.pull(name)
return True
except docker.errors.NotFound:
print("Failed to locate image on DockerHub: {}".format(name))
return False
def upload(self, name: str) -> bool:
"""
Attempts to upload a given Docker image from this server to DockerHub.
Parameters:
name: the name of the Docker image.
Returns:
`True` if successfully uploaded, otherwise `False`.
"""
try:
out = self.__docker.images.push(name, stream=True)
for line in out:
line = line.strip().decode('utf-8')
jsn = json.loads(line)
if 'progress' in jsn:
line = "{}. {}.".format(jsn['status'], jsn['progress'])
print(line, end='\r')
elif 'status' in jsn:
print(jsn['status'])
print('uploaded image to DockerHub: {}'.format(name))
return True
except docker.errors.NotFound:
print("Failed to push image ({}): not installed.".format(name))
return False
| 34.09633 | 80 | 0.532625 |
7941c216a70c50b88b18f1bb6ce1feda73ebb1d6 | 1,008 | py | Python | batch/test/failure_injecting_client_session.py | tdeboer-ilmn/hail | 98fffc9b4e13cd5d5ced8322112894361d0b7052 | [
"MIT"
] | 789 | 2016-09-05T04:14:25.000Z | 2022-03-30T09:51:54.000Z | batch/test/failure_injecting_client_session.py | tdeboer-ilmn/hail | 98fffc9b4e13cd5d5ced8322112894361d0b7052 | [
"MIT"
] | 5,724 | 2016-08-29T18:58:40.000Z | 2022-03-31T23:49:42.000Z | batch/test/failure_injecting_client_session.py | tdeboer-ilmn/hail | 98fffc9b4e13cd5d5ced8322112894361d0b7052 | [
"MIT"
] | 233 | 2016-08-31T20:42:38.000Z | 2022-02-17T16:42:39.000Z | import aiohttp
from hailtop.utils import async_to_blocking
from hailtop.httpx import client_session
class FailureInjectingClientSession:
def __init__(self, should_fail):
self.should_fail = should_fail
self.real_session = client_session()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
async_to_blocking(self.real_session.close())
def maybe_fail(self, method, path, headers):
if self.should_fail():
raise aiohttp.ClientResponseError(
status=503,
message='Service Unavailable from FailureInjectingClientSession',
request_info=aiohttp.RequestInfo(url=path, method=method, headers=headers, real_url=path),
history=(),
)
async def request(self, method, path, *args, **kwargs):
self.maybe_fail(method, path, kwargs.get('headers', {}))
return await self.real_session.request(method, path, *args, **kwargs)
| 33.6 | 106 | 0.667659 |
7941c237d1074c4355efe59d4b15a4ebbbfcfecc | 959 | py | Python | selfdrive/camerad/test/frame_test.py | cheeseonhead/openpilot | 4262a15a5ac7999c029a1d33a825f12dbae27806 | [
"MIT"
] | 1 | 2021-09-15T14:42:15.000Z | 2021-09-15T14:42:15.000Z | selfdrive/camerad/test/frame_test.py | cheeseonhead/openpilot | 4262a15a5ac7999c029a1d33a825f12dbae27806 | [
"MIT"
] | null | null | null | selfdrive/camerad/test/frame_test.py | cheeseonhead/openpilot | 4262a15a5ac7999c029a1d33a825f12dbae27806 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
import numpy as np
import cereal.messaging as messaging
from PIL import ImageDraw, Image
# font = ImageFont.truetype("arial", size=72)
def get_frame(idx):
img = np.zeros((874, 1164, 3), np.uint8)
img[100:400, 100:100+(idx % 10) * 100] = 255
# big number
im2 = Image.new("RGB", (200, 200))
draw = ImageDraw.Draw(im2)
draw.text((10, 100), "%02d" % idx)
img[400:600, 400:600] = np.array(im2.getdata()).reshape((200, 200, 3))
return img.tostring()
if __name__ == "__main__":
from common.realtime import Ratekeeper
rk = Ratekeeper(20)
pm = messaging.PubMaster(['roadCameraState'])
frm = [get_frame(x) for x in range(30)]
idx = 0
while 1:
print("send %d" % idx)
dat = messaging.new_message('roadCameraState')
dat.valid = True
dat.frame = {
"frameId": idx,
"image": frm[idx % len(frm)],
}
pm.send('roadCameraState', dat)
idx += 1
rk.keep_time()
#time.sleep(1.0)
| 25.236842 | 72 | 0.629823 |
7941c3e92f738c5e5cf1937fd8a1bc8ddd9e0218 | 2,928 | py | Python | pkpdapp/pkpdapp/migrations/0007_initial_units.py | pkpdapp-team/pkpdapp | 8a9748ef52199ffdaa9194d0e1c384b4a78a70dc | [
"BSD-3-Clause"
] | 4 | 2020-11-10T10:13:32.000Z | 2021-09-22T15:24:23.000Z | pkpdapp/pkpdapp/migrations/0007_initial_units.py | pkpdapp-team/pkpdapp | 8a9748ef52199ffdaa9194d0e1c384b4a78a70dc | [
"BSD-3-Clause"
] | 232 | 2020-05-06T09:32:10.000Z | 2022-03-28T16:31:29.000Z | pkpdapp/pkpdapp/migrations/0007_initial_units.py | pkpdapp-team/pkpdapp | 8a9748ef52199ffdaa9194d0e1c384b4a78a70dc | [
"BSD-3-Clause"
] | 1 | 2020-10-13T11:40:06.000Z | 2020-10-13T11:40:06.000Z | #
# This file is part of PKPDApp (https://github.com/pkpdapp-team/pkpdapp) which
# is released under the BSD 3-clause license. See accompanying LICENSE.md for
# copyright notice and full license details.
#
from django.db import migrations
import myokit
def load_units(apps, schema_editor):
Unit = apps.get_model("pkpdapp", "Unit")
m = myokit.Unit.parse_simple('m')
L = myokit.Unit.parse_simple('L')
cL = myokit.Unit.parse_simple('cL')
h = myokit.Unit.parse_simple('h')
g = myokit.Unit.parse_simple('g')
dimensionless = myokit.Unit()
units = [
{
'symbol': 'h',
'unit': h,
},
{
'symbol': 'mg',
'unit': 1e-3 * g,
},
{
'symbol': 'd',
'unit': 24 * h,
},
{
'symbol': '1/d',
'unit': 1 / (24 * h),
},
{
'symbol': '1/h',
'unit': 1 / h,
},
{
'symbol': 'L/mg/d',
'unit': L / (1e-3 * g * 24 * h),
},
{
'symbol': 'L',
'unit': L
},
{
'symbol': 'L/h',
'unit': L / h
},
{
'symbol': '1/L',
'unit': 1 / L
},
{
'symbol': 'cm^3',
'unit': (1e-2 * m)**3,
},
{
'symbol': 'cm^3/d',
'unit': (1e-2 * m)**3 / (24 * h),
},
{
'symbol': 'g',
'unit': g,
},
{
'symbol': 'ng',
'unit': 1e-9 * g,
},
{
'symbol': 'ng/mL',
'unit': 1e-9 * g / (1e-3 * L),
},
{
'symbol': 'mg/L',
'unit': 1e-3 * g / L,
},
{
'symbol': 'ng/L',
'unit': 1e-9 * g / L,
},
{
'symbol': 'g/L',
'unit': g / L,
},
{
'symbol': '10^6/mcL',
'unit': 1e6 / (1e-3 * cL),
},
{
'symbol': '10^3/mcL',
'unit': 1e3 / (1e-3 * cL),
},
{
'symbol': 'g/dL',
'unit': g / (10 * cL),
},
{
'symbol': '',
'unit': dimensionless,
},
]
for u in units:
Unit.objects.create(
symbol=u['symbol'],
g=u['unit'].exponents()[0],
m=u['unit'].exponents()[1],
s=u['unit'].exponents()[2],
A=u['unit'].exponents()[3],
K=u['unit'].exponents()[4],
cd=u['unit'].exponents()[5],
mol=u['unit'].exponents()[6],
multiplier=u['unit'].multiplier_log_10(),
)
class Migration(migrations.Migration):
dependencies = [
('pkpdapp', '0001_initial'),
]
operations = [
migrations.RunPython(load_units),
]
| 22.523077 | 78 | 0.362022 |
7941c3f7dfd3bc52c010981b3b2369faf4484baf | 3,501 | py | Python | ruspy/test/estimation_tests/test_ambiguity.py | MaxBlesch/ruspy | 5e7fb9e584c7e0d4935f4669e108bbf4e05209c6 | [
"MIT"
] | null | null | null | ruspy/test/estimation_tests/test_ambiguity.py | MaxBlesch/ruspy | 5e7fb9e584c7e0d4935f4669e108bbf4e05209c6 | [
"MIT"
] | null | null | null | ruspy/test/estimation_tests/test_ambiguity.py | MaxBlesch/ruspy | 5e7fb9e584c7e0d4935f4669e108bbf4e05209c6 | [
"MIT"
] | null | null | null | """
This module contains tests for the data and estimation code of the ruspy project. The
settings for this tests is specified in resources/replication_test/init_replication.yml.
The test first reads the original data, then processes the data to a pandas DataFrame
suitable for the estimation process. After estimating all the relevant parameters,
they are compared to the results, from the paper. As this test runs the complete
data_reading, data processing and runs several times the NFXP it is the one with the
longest test time.
"""
import numpy as np
import pandas as pd
import pytest
from numpy.testing import assert_allclose
from numpy.testing import assert_array_almost_equal
from ruspy.config import TEST_RESOURCES_DIR
from ruspy.estimation.est_cost_params import create_state_matrix
from ruspy.estimation.est_cost_params import derivative_loglike_cost_params
from ruspy.estimation.estimation import estimate
from ruspy.estimation.estimation_transitions import create_transition_matrix
from ruspy.model_code.cost_functions import lin_cost
from ruspy.model_code.cost_functions import lin_cost_dev
TEST_FOLDER = TEST_RESOURCES_DIR + "replication_test/"
@pytest.fixture(scope="module")
def inputs():
out = {}
disc_fac = 0.9999
num_states = 90
scale = 1e-3
init_dict = {
"model_specifications": {
"discount_factor": disc_fac,
"number_states": num_states,
"maint_cost_func": "linear",
"cost_scale": scale,
},
"optimizer": {"approach": "NFXP", "algorithm": "scipy_L-BFGS-B",
"gradient": "No",
"params": pd.DataFrame(
data=[10, 1, 0],
columns=["value"],
index=["RC", "theta_11", "omega"]
),
"constraints": [{"loc": "omega", "type": "fixed"}]
},
}
df = pd.read_pickle(TEST_FOLDER + "group_4.pkl")
result_trans, result_fixp = estimate(init_dict, df)
out["trans_est"] = result_trans["x"]
out["params_est"] = result_fixp["x"]
out["trans_ll"] = result_trans["fun"]
out["cost_ll"] = result_fixp["fun"]
out["states"] = df.loc[(slice(None), slice(1, None)), "state"].to_numpy(int)
out["decisions"] = df.loc[(slice(None), slice(1, None)), "decision"].to_numpy(int)
out["disc_fac"] = disc_fac
out["num_states"] = num_states
out["scale"] = scale
out["status"] = result_fixp["status"]
out["params"] = init_dict["optimizer"]["params"]
return out
@pytest.fixture(scope="module")
def outputs():
out = {}
out["trans_base"] = np.loadtxt(TEST_FOLDER + "repl_test_trans.txt")
out["params_base"] = np.append(np.loadtxt(TEST_FOLDER +
"repl_params_linear.txt"), 0)
out["transition_count"] = np.loadtxt(TEST_FOLDER + "transition_count.txt")
out["trans_ll"] = 3140.570557
out["cost_ll"] = 163.584
return out
def test_repl_params(inputs, outputs):
# This is as precise as the paper gets
assert_array_almost_equal(inputs["params_est"], outputs["params_base"], decimal=2)
def test_repl_trans(inputs, outputs):
assert_array_almost_equal(inputs["trans_est"], outputs["trans_base"])
def test_trans_ll(inputs, outputs):
assert_allclose(inputs["trans_ll"], outputs["trans_ll"])
def test_cost_ll(inputs, outputs):
# This is as precise as the paper gets
assert_allclose(inputs["cost_ll"], outputs["cost_ll"], atol=1e-3) | 36.852632 | 88 | 0.674093 |
7941c4892a89d6aad54b39dd927244b33ac1017e | 1,623 | py | Python | examples/example_3d_simulate/example_3d_fit.py | gabemery/gammapy | 99e5c5d38e4920dddd7bca41fb1539ccda8bea2d | [
"BSD-3-Clause"
] | null | null | null | examples/example_3d_simulate/example_3d_fit.py | gabemery/gammapy | 99e5c5d38e4920dddd7bca41fb1539ccda8bea2d | [
"BSD-3-Clause"
] | null | null | null | examples/example_3d_simulate/example_3d_fit.py | gabemery/gammapy | 99e5c5d38e4920dddd7bca41fb1539ccda8bea2d | [
"BSD-3-Clause"
] | null | null | null | from astropy import log
from gammapy.image.models import SkyGaussian2D
from gammapy.spectrum.models import PowerLaw
from gammapy.maps import WcsNDMap
from gammapy.cube import SkyModel, SkyModelMapFit
from example_3d_simulate import get_sky_model
def load_cubes():
npred_cube = WcsNDMap.read('npred.fits')
exposure_cube = WcsNDMap.read('exposure.fits')
return dict(counts=npred_cube, exposure=exposure_cube)
def get_fit_model():
spatial_model = SkyGaussian2D(
lon_0='0 deg',
lat_0='0 deg',
sigma='1 deg',
)
spectral_model = PowerLaw(
index=2,
amplitude='1e-11 cm-2 s-1 TeV-1',
reference='1 TeV',
)
model = SkyModel(
spatial_model=spatial_model,
spectral_model=spectral_model,
)
model.parameters.set_parameter_errors(
{'lon_0': '0.1 deg',
'lat_0': '0.1 deg',
'sigma': '0.1 deg',
'index': '0.1',
'amplitude': '1e-12 cm-2 s-1 TeV-1'
})
model.parameters['sigma'].parmin = 0
return model
def main():
log.setLevel('INFO')
log.info('Starting ...')
cubes = load_cubes()
log.info('Loaded cubes: {}'.format(cubes))
model = get_fit_model()
log.info('Loaded model: {}'.format(model))
fit = SkyModelMapFit(model=model.copy(), **cubes)
log.info('Created analysis: {}'.format(fit))
fit.fit()
log.info('Starting values\n{}'.format(model.parameters))
log.info('Best fit values\n{}'.format(fit.model.parameters))
log.info('True values\n{}'.format(get_sky_model().parameters))
if __name__ == '__main__':
main()
| 24.969231 | 66 | 0.635243 |
7941c4a3692a06b54c50f1757bf27f5423b2d2c4 | 1,431 | py | Python | tests/test_sources_bh.py | mirochaj/ares | b3335ad30435ee0d7f17d0110aa164a35f252d78 | [
"MIT"
] | 10 | 2020-03-26T01:08:10.000Z | 2021-12-04T13:02:10.000Z | tests/test_sources_bh.py | mirochaj/ares | b3335ad30435ee0d7f17d0110aa164a35f252d78 | [
"MIT"
] | 25 | 2020-06-08T14:52:28.000Z | 2022-03-08T02:30:54.000Z | tests/test_sources_bh.py | mirochaj/ares | b3335ad30435ee0d7f17d0110aa164a35f252d78 | [
"MIT"
] | 8 | 2020-03-24T14:11:25.000Z | 2021-11-06T06:32:59.000Z | """
test_sed_mcd.py
Author: Jordan Mirocha
Affiliation: University of Colorado at Boulder
Created on: Thu May 2 10:46:44 2013
Description: Plot a simple multi-color disk accretion spectrum.
"""
import ares
import numpy as np
def test():
rmax = 1e2
mass = 10.
fsc = 0.1
alpha = -1.5
Emin = 1e2
Emax = 1e4
simpl = \
{
'source_type': 'bh',
'source_mass': mass,
'source_rmax': rmax,
'source_sed': 'simpl',
'source_Emin': Emin,
'source_Emax': Emax,
'source_EminNorm': Emin,
'source_EmaxNorm': Emax,
'source_alpha': alpha,
'source_fsc': fsc,
'source_logN': 22.,
}
mcd = \
{
'source_type': 'bh',
'source_sed': 'mcd',
'source_mass': mass,
'source_rmax': rmax,
'source_Emin': Emin,
'source_Emax': Emax,
'source_EminNorm': Emin,
'source_EmaxNorm': Emax,
}
agn = \
{
'source_type': 'bh',
'source_sed': 'sazonov2004',
'source_Emin': Emin,
'source_Emax': Emax,
'source_EminNorm': Emin,
'source_EmaxNorm': Emax,
}
bh_mcd = ares.sources.BlackHole(init_tabs=False, **mcd)
bh_sim = ares.sources.BlackHole(init_tabs=False, **simpl)
bh_s04 = ares.sources.BlackHole(init_tabs=False, **agn)
Earr = np.logspace(2, 4, 100)
for src in [bh_mcd, bh_sim, bh_s04]:
sed = bh_mcd.Spectrum(Earr)
if __name__ == '__main__':
test()
| 19.60274 | 63 | 0.592593 |
7941c549a9c0c6e82156307a47e2467a3f65ed9b | 29,924 | py | Python | billforward/models/tiered_volume_pricing_component.py | billforward/bf-python | d2b812329ca3ed1fd94364d7f46f69ad74665596 | [
"Apache-2.0"
] | 2 | 2016-11-23T17:32:37.000Z | 2022-02-24T05:13:20.000Z | billforward/models/tiered_volume_pricing_component.py | billforward/bf-python | d2b812329ca3ed1fd94364d7f46f69ad74665596 | [
"Apache-2.0"
] | null | null | null | billforward/models/tiered_volume_pricing_component.py | billforward/bf-python | d2b812329ca3ed1fd94364d7f46f69ad74665596 | [
"Apache-2.0"
] | 1 | 2016-12-30T20:02:48.000Z | 2016-12-30T20:02:48.000Z | # coding: utf-8
"""
BillForward REST API
OpenAPI spec version: 1.0.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from pprint import pformat
from six import iteritems
import re
class TieredVolumePricingComponent(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, created=None, changed_by=None, updated=None, type=None, version_id=None, crm_id=None, id=None, product_rate_plan_id=None, unit_of_measure_id=None, organization_id=None, name=None, public_name=None, description=None, charge_type=None, invoicing_type=None, charge_model=None, upgrade_mode=None, downgrade_mode=None, default_quantity=None, min_quantity=None, max_quantity=None, valid_from=None, valid_till=None, tiers=None, unit_of_measure=None):
"""
TieredVolumePricingComponent - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'created': 'datetime',
'changed_by': 'str',
'updated': 'datetime',
'type': 'str',
'version_id': 'str',
'crm_id': 'str',
'id': 'str',
'product_rate_plan_id': 'str',
'unit_of_measure_id': 'str',
'organization_id': 'str',
'name': 'str',
'public_name': 'str',
'description': 'str',
'charge_type': 'str',
'invoicing_type': 'str',
'charge_model': 'str',
'upgrade_mode': 'str',
'downgrade_mode': 'str',
'default_quantity': 'int',
'min_quantity': 'int',
'max_quantity': 'int',
'valid_from': 'datetime',
'valid_till': 'datetime',
'tiers': 'list[PricingComponentTier]',
'unit_of_measure': 'UnitOfMeasure'
}
self.attribute_map = {
'created': 'created',
'changed_by': 'changedBy',
'updated': 'updated',
'type': '@type',
'version_id': 'versionID',
'crm_id': 'crmID',
'id': 'id',
'product_rate_plan_id': 'productRatePlanID',
'unit_of_measure_id': 'unitOfMeasureID',
'organization_id': 'organizationID',
'name': 'name',
'public_name': 'publicName',
'description': 'description',
'charge_type': 'chargeType',
'invoicing_type': 'invoicingType',
'charge_model': 'chargeModel',
'upgrade_mode': 'upgradeMode',
'downgrade_mode': 'downgradeMode',
'default_quantity': 'defaultQuantity',
'min_quantity': 'minQuantity',
'max_quantity': 'maxQuantity',
'valid_from': 'validFrom',
'valid_till': 'validTill',
'tiers': 'tiers',
'unit_of_measure': 'unitOfMeasure'
}
self._created = created
self._changed_by = changed_by
self._updated = updated
self._type = type
self._version_id = version_id
self._crm_id = crm_id
self._id = id
self._product_rate_plan_id = product_rate_plan_id
self._unit_of_measure_id = unit_of_measure_id
self._organization_id = organization_id
self._name = name
self._public_name = public_name
self._description = description
self._charge_type = charge_type
self._invoicing_type = invoicing_type
self._charge_model = charge_model
self._upgrade_mode = upgrade_mode
self._downgrade_mode = downgrade_mode
self._default_quantity = default_quantity
self._min_quantity = min_quantity
self._max_quantity = max_quantity
self._valid_from = valid_from
self._valid_till = valid_till
self._tiers = tiers
self._unit_of_measure = unit_of_measure
@property
def created(self):
"""
Gets the created of this TieredVolumePricingComponent.
{ \"description\" : \"The UTC DateTime when the object was created.\", \"verbs\":[] }
:return: The created of this TieredVolumePricingComponent.
:rtype: datetime
"""
return self._created
@created.setter
def created(self, created):
"""
Sets the created of this TieredVolumePricingComponent.
{ \"description\" : \"The UTC DateTime when the object was created.\", \"verbs\":[] }
:param created: The created of this TieredVolumePricingComponent.
:type: datetime
"""
self._created = created
@property
def changed_by(self):
"""
Gets the changed_by of this TieredVolumePricingComponent.
{ \"description\" : \"ID of the user who last updated the entity.\", \"verbs\":[] }
:return: The changed_by of this TieredVolumePricingComponent.
:rtype: str
"""
return self._changed_by
@changed_by.setter
def changed_by(self, changed_by):
"""
Sets the changed_by of this TieredVolumePricingComponent.
{ \"description\" : \"ID of the user who last updated the entity.\", \"verbs\":[] }
:param changed_by: The changed_by of this TieredVolumePricingComponent.
:type: str
"""
self._changed_by = changed_by
@property
def updated(self):
"""
Gets the updated of this TieredVolumePricingComponent.
{ \"description\" : \"The UTC DateTime when the object was last updated.\", \"verbs\":[] }
:return: The updated of this TieredVolumePricingComponent.
:rtype: datetime
"""
return self._updated
@updated.setter
def updated(self, updated):
"""
Sets the updated of this TieredVolumePricingComponent.
{ \"description\" : \"The UTC DateTime when the object was last updated.\", \"verbs\":[] }
:param updated: The updated of this TieredVolumePricingComponent.
:type: datetime
"""
self._updated = updated
@property
def type(self):
"""
Gets the type of this TieredVolumePricingComponent.
{ \"description\" : \"\", \"default\" : \"\", \"verbs\":[\"POST\",\"GET\"] }
:return: The type of this TieredVolumePricingComponent.
:rtype: str
"""
return self._type
@type.setter
def type(self, type):
"""
Sets the type of this TieredVolumePricingComponent.
{ \"description\" : \"\", \"default\" : \"\", \"verbs\":[\"POST\",\"GET\"] }
:param type: The type of this TieredVolumePricingComponent.
:type: str
"""
allowed_values = ["tieredPricingComponent", "flatPricingComponent", "tieredVolumePricingComponent"]
if type not in allowed_values:
raise ValueError(
"Invalid value for `type` ({0}), must be one of {1}"
.format(type, allowed_values)
)
self._type = type
@property
def version_id(self):
"""
Gets the version_id of this TieredVolumePricingComponent.
{ \"description\" : \"\", \"verbs\":[\"GET\"] }
:return: The version_id of this TieredVolumePricingComponent.
:rtype: str
"""
return self._version_id
@version_id.setter
def version_id(self, version_id):
"""
Sets the version_id of this TieredVolumePricingComponent.
{ \"description\" : \"\", \"verbs\":[\"GET\"] }
:param version_id: The version_id of this TieredVolumePricingComponent.
:type: str
"""
self._version_id = version_id
@property
def crm_id(self):
"""
Gets the crm_id of this TieredVolumePricingComponent.
{ \"description\" : \"\", \"verbs\":[\"POST\",\"PUT\",\"GET\"] }
:return: The crm_id of this TieredVolumePricingComponent.
:rtype: str
"""
return self._crm_id
@crm_id.setter
def crm_id(self, crm_id):
"""
Sets the crm_id of this TieredVolumePricingComponent.
{ \"description\" : \"\", \"verbs\":[\"POST\",\"PUT\",\"GET\"] }
:param crm_id: The crm_id of this TieredVolumePricingComponent.
:type: str
"""
self._crm_id = crm_id
@property
def id(self):
"""
Gets the id of this TieredVolumePricingComponent.
{ \"description\" : \"\", \"verbs\":[\"GET\"] } When associating a pricing component with a product rate plan, this ID should be used.
:return: The id of this TieredVolumePricingComponent.
:rtype: str
"""
return self._id
@id.setter
def id(self, id):
"""
Sets the id of this TieredVolumePricingComponent.
{ \"description\" : \"\", \"verbs\":[\"GET\"] } When associating a pricing component with a product rate plan, this ID should be used.
:param id: The id of this TieredVolumePricingComponent.
:type: str
"""
self._id = id
@property
def product_rate_plan_id(self):
"""
Gets the product_rate_plan_id of this TieredVolumePricingComponent.
{ \"description\" : \"\", \"verbs\":[\"POST\",\"PUT\",\"GET\"] }
:return: The product_rate_plan_id of this TieredVolumePricingComponent.
:rtype: str
"""
return self._product_rate_plan_id
@product_rate_plan_id.setter
def product_rate_plan_id(self, product_rate_plan_id):
"""
Sets the product_rate_plan_id of this TieredVolumePricingComponent.
{ \"description\" : \"\", \"verbs\":[\"POST\",\"PUT\",\"GET\"] }
:param product_rate_plan_id: The product_rate_plan_id of this TieredVolumePricingComponent.
:type: str
"""
self._product_rate_plan_id = product_rate_plan_id
@property
def unit_of_measure_id(self):
"""
Gets the unit_of_measure_id of this TieredVolumePricingComponent.
{ \"description\" : \"\", \"verbs\":[\"POST\",\"PUT\",\"GET\"] }
:return: The unit_of_measure_id of this TieredVolumePricingComponent.
:rtype: str
"""
return self._unit_of_measure_id
@unit_of_measure_id.setter
def unit_of_measure_id(self, unit_of_measure_id):
"""
Sets the unit_of_measure_id of this TieredVolumePricingComponent.
{ \"description\" : \"\", \"verbs\":[\"POST\",\"PUT\",\"GET\"] }
:param unit_of_measure_id: The unit_of_measure_id of this TieredVolumePricingComponent.
:type: str
"""
self._unit_of_measure_id = unit_of_measure_id
@property
def organization_id(self):
"""
Gets the organization_id of this TieredVolumePricingComponent.
{ \"description\" : \"\", \"verbs\":[] }
:return: The organization_id of this TieredVolumePricingComponent.
:rtype: str
"""
return self._organization_id
@organization_id.setter
def organization_id(self, organization_id):
"""
Sets the organization_id of this TieredVolumePricingComponent.
{ \"description\" : \"\", \"verbs\":[] }
:param organization_id: The organization_id of this TieredVolumePricingComponent.
:type: str
"""
self._organization_id = organization_id
@property
def name(self):
"""
Gets the name of this TieredVolumePricingComponent.
{ \"description\" : \"\", \"verbs\":[\"POST\",\"PUT\",\"GET\"] }
:return: The name of this TieredVolumePricingComponent.
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""
Sets the name of this TieredVolumePricingComponent.
{ \"description\" : \"\", \"verbs\":[\"POST\",\"PUT\",\"GET\"] }
:param name: The name of this TieredVolumePricingComponent.
:type: str
"""
self._name = name
@property
def public_name(self):
"""
Gets the public_name of this TieredVolumePricingComponent.
{\"description\":\"A friendly non-unique name used to identify this pricing-component\",\"verbs\":[\"POST\",\"PUT\",\"GET\"]}
:return: The public_name of this TieredVolumePricingComponent.
:rtype: str
"""
return self._public_name
@public_name.setter
def public_name(self, public_name):
"""
Sets the public_name of this TieredVolumePricingComponent.
{\"description\":\"A friendly non-unique name used to identify this pricing-component\",\"verbs\":[\"POST\",\"PUT\",\"GET\"]}
:param public_name: The public_name of this TieredVolumePricingComponent.
:type: str
"""
self._public_name = public_name
@property
def description(self):
"""
Gets the description of this TieredVolumePricingComponent.
{ \"description\" : \"\", \"verbs\":[\"POST\",\"PUT\",\"GET\"] }
:return: The description of this TieredVolumePricingComponent.
:rtype: str
"""
return self._description
@description.setter
def description(self, description):
"""
Sets the description of this TieredVolumePricingComponent.
{ \"description\" : \"\", \"verbs\":[\"POST\",\"PUT\",\"GET\"] }
:param description: The description of this TieredVolumePricingComponent.
:type: str
"""
self._description = description
@property
def charge_type(self):
"""
Gets the charge_type of this TieredVolumePricingComponent.
{ \"description\" : \"The charge type of the pricing-component.\", \"verbs\":[\"POST\",\"PUT\",\"GET\"] }
:return: The charge_type of this TieredVolumePricingComponent.
:rtype: str
"""
return self._charge_type
@charge_type.setter
def charge_type(self, charge_type):
"""
Sets the charge_type of this TieredVolumePricingComponent.
{ \"description\" : \"The charge type of the pricing-component.\", \"verbs\":[\"POST\",\"PUT\",\"GET\"] }
:param charge_type: The charge_type of this TieredVolumePricingComponent.
:type: str
"""
allowed_values = ["setup", "subscription", "arrears", "usage"]
if charge_type not in allowed_values:
raise ValueError(
"Invalid value for `charge_type` ({0}), must be one of {1}"
.format(charge_type, allowed_values)
)
self._charge_type = charge_type
@property
def invoicing_type(self):
"""
Gets the invoicing_type of this TieredVolumePricingComponent.
{ \"default\" : \"Aggregated\", \"description\" : \"For <span class=\\\"label label-default\\\">setup</span> pricing components <span class=\\\"label label-default\\\">Immediate</span> invoicing will result in an invoice being issued on subscription being set to the AwaitingPayment state, irrespective of the subscription start date. <span class=\\\"label label-default\\\">Aggregated</span> invoicing will add a charge to the first invoice of the subscription.\", \"verbs\":[\"POST\",\"PUT\",\"GET\"] }
:return: The invoicing_type of this TieredVolumePricingComponent.
:rtype: str
"""
return self._invoicing_type
@invoicing_type.setter
def invoicing_type(self, invoicing_type):
"""
Sets the invoicing_type of this TieredVolumePricingComponent.
{ \"default\" : \"Aggregated\", \"description\" : \"For <span class=\\\"label label-default\\\">setup</span> pricing components <span class=\\\"label label-default\\\">Immediate</span> invoicing will result in an invoice being issued on subscription being set to the AwaitingPayment state, irrespective of the subscription start date. <span class=\\\"label label-default\\\">Aggregated</span> invoicing will add a charge to the first invoice of the subscription.\", \"verbs\":[\"POST\",\"PUT\",\"GET\"] }
:param invoicing_type: The invoicing_type of this TieredVolumePricingComponent.
:type: str
"""
allowed_values = ["Immediate", "Aggregated"]
if invoicing_type not in allowed_values:
raise ValueError(
"Invalid value for `invoicing_type` ({0}), must be one of {1}"
.format(invoicing_type, allowed_values)
)
self._invoicing_type = invoicing_type
@property
def charge_model(self):
"""
Gets the charge_model of this TieredVolumePricingComponent.
{ \"description\" : \"The charge model of the pricing-component.\", \"verbs\":[\"POST\",\"PUT\",\"GET\"] }
:return: The charge_model of this TieredVolumePricingComponent.
:rtype: str
"""
return self._charge_model
@charge_model.setter
def charge_model(self, charge_model):
"""
Sets the charge_model of this TieredVolumePricingComponent.
{ \"description\" : \"The charge model of the pricing-component.\", \"verbs\":[\"POST\",\"PUT\",\"GET\"] }
:param charge_model: The charge_model of this TieredVolumePricingComponent.
:type: str
"""
allowed_values = ["flat", "tiered", "tiered_volume"]
if charge_model not in allowed_values:
raise ValueError(
"Invalid value for `charge_model` ({0}), must be one of {1}"
.format(charge_model, allowed_values)
)
self._charge_model = charge_model
@property
def upgrade_mode(self):
"""
Gets the upgrade_mode of this TieredVolumePricingComponent.
{\"default\":\"<span class=\\\"label label-default\\\">immediate</span>\",\"description\":\"Default behaviour when a value is upgraded using this pricing component, this behaviour can be overridden when changing the value.<br><span class=\\\"label label-default\\\">immediate</span> — Upgrade will apply at the time the request is made.<br><span class=\\\"label label-default\\\">delayed</span> — Upgrade will apply at the end of the current billing cycle.\",\"verbs\":[\"POST\",\"GET\"]}
:return: The upgrade_mode of this TieredVolumePricingComponent.
:rtype: str
"""
return self._upgrade_mode
@upgrade_mode.setter
def upgrade_mode(self, upgrade_mode):
"""
Sets the upgrade_mode of this TieredVolumePricingComponent.
{\"default\":\"<span class=\\\"label label-default\\\">immediate</span>\",\"description\":\"Default behaviour when a value is upgraded using this pricing component, this behaviour can be overridden when changing the value.<br><span class=\\\"label label-default\\\">immediate</span> — Upgrade will apply at the time the request is made.<br><span class=\\\"label label-default\\\">delayed</span> — Upgrade will apply at the end of the current billing cycle.\",\"verbs\":[\"POST\",\"GET\"]}
:param upgrade_mode: The upgrade_mode of this TieredVolumePricingComponent.
:type: str
"""
allowed_values = ["immediate", "delayed"]
if upgrade_mode not in allowed_values:
raise ValueError(
"Invalid value for `upgrade_mode` ({0}), must be one of {1}"
.format(upgrade_mode, allowed_values)
)
self._upgrade_mode = upgrade_mode
@property
def downgrade_mode(self):
"""
Gets the downgrade_mode of this TieredVolumePricingComponent.
{\"default\":\"<span class=\\\"label label-default\\\">delayed</span>\",\"description\":\"Default behaviour when a value is downgraded using this pricing component, this behaviour can be overridden when changing the value.<br><span class=\\\"label label-default\\\">immediate</span> — Downgrade will apply at the time the request is made.<br><span class=\\\"label label-default\\\">delayed</span> — Downgrade will apply at the end of the current billing cycle.\",\"verbs\":[\"POST\",\"GET\"]}
:return: The downgrade_mode of this TieredVolumePricingComponent.
:rtype: str
"""
return self._downgrade_mode
@downgrade_mode.setter
def downgrade_mode(self, downgrade_mode):
"""
Sets the downgrade_mode of this TieredVolumePricingComponent.
{\"default\":\"<span class=\\\"label label-default\\\">delayed</span>\",\"description\":\"Default behaviour when a value is downgraded using this pricing component, this behaviour can be overridden when changing the value.<br><span class=\\\"label label-default\\\">immediate</span> — Downgrade will apply at the time the request is made.<br><span class=\\\"label label-default\\\">delayed</span> — Downgrade will apply at the end of the current billing cycle.\",\"verbs\":[\"POST\",\"GET\"]}
:param downgrade_mode: The downgrade_mode of this TieredVolumePricingComponent.
:type: str
"""
allowed_values = ["immediate", "delayed"]
if downgrade_mode not in allowed_values:
raise ValueError(
"Invalid value for `downgrade_mode` ({0}), must be one of {1}"
.format(downgrade_mode, allowed_values)
)
self._downgrade_mode = downgrade_mode
@property
def default_quantity(self):
"""
Gets the default_quantity of this TieredVolumePricingComponent.
{ \"description\" : \"The default quantity of the pricing-component. If no value is supplied on a subscription this value will be used. This is useful for setting an expected purchase level of for introducing new pricing components to existing subscriptions and not having to back-fill values\", \"verbs\":[\"POST\",\"PUT\",\"GET\"] }
:return: The default_quantity of this TieredVolumePricingComponent.
:rtype: int
"""
return self._default_quantity
@default_quantity.setter
def default_quantity(self, default_quantity):
"""
Sets the default_quantity of this TieredVolumePricingComponent.
{ \"description\" : \"The default quantity of the pricing-component. If no value is supplied on a subscription this value will be used. This is useful for setting an expected purchase level of for introducing new pricing components to existing subscriptions and not having to back-fill values\", \"verbs\":[\"POST\",\"PUT\",\"GET\"] }
:param default_quantity: The default_quantity of this TieredVolumePricingComponent.
:type: int
"""
self._default_quantity = default_quantity
@property
def min_quantity(self):
"""
Gets the min_quantity of this TieredVolumePricingComponent.
{ \"default\" : \"0\", \"description\" : \"The minimum quantity of the pricing-component.\", \"verbs\":[] }
:return: The min_quantity of this TieredVolumePricingComponent.
:rtype: int
"""
return self._min_quantity
@min_quantity.setter
def min_quantity(self, min_quantity):
"""
Sets the min_quantity of this TieredVolumePricingComponent.
{ \"default\" : \"0\", \"description\" : \"The minimum quantity of the pricing-component.\", \"verbs\":[] }
:param min_quantity: The min_quantity of this TieredVolumePricingComponent.
:type: int
"""
self._min_quantity = min_quantity
@property
def max_quantity(self):
"""
Gets the max_quantity of this TieredVolumePricingComponent.
{ \"description\" : \"The maximum quantity of the pricing-component.\", \"verbs\":[] }
:return: The max_quantity of this TieredVolumePricingComponent.
:rtype: int
"""
return self._max_quantity
@max_quantity.setter
def max_quantity(self, max_quantity):
"""
Sets the max_quantity of this TieredVolumePricingComponent.
{ \"description\" : \"The maximum quantity of the pricing-component.\", \"verbs\":[] }
:param max_quantity: The max_quantity of this TieredVolumePricingComponent.
:type: int
"""
self._max_quantity = max_quantity
@property
def valid_from(self):
"""
Gets the valid_from of this TieredVolumePricingComponent.
{ \"default\" : \"current time\", \"description\" : \"The DateTime specifying when the pricing-component is valid from.\", \"verbs\":[\"POST\",\"PUT\",\"GET\"] }
:return: The valid_from of this TieredVolumePricingComponent.
:rtype: datetime
"""
return self._valid_from
@valid_from.setter
def valid_from(self, valid_from):
"""
Sets the valid_from of this TieredVolumePricingComponent.
{ \"default\" : \"current time\", \"description\" : \"The DateTime specifying when the pricing-component is valid from.\", \"verbs\":[\"POST\",\"PUT\",\"GET\"] }
:param valid_from: The valid_from of this TieredVolumePricingComponent.
:type: datetime
"""
self._valid_from = valid_from
@property
def valid_till(self):
"""
Gets the valid_till of this TieredVolumePricingComponent.
{ \"default\" : \"null\", \"description\" : \"The UTC DateTime specifying when the pricing-component is valid till.\", \"verbs\":[\"POST\",\"PUT\",\"GET\"] }
:return: The valid_till of this TieredVolumePricingComponent.
:rtype: datetime
"""
return self._valid_till
@valid_till.setter
def valid_till(self, valid_till):
"""
Sets the valid_till of this TieredVolumePricingComponent.
{ \"default\" : \"null\", \"description\" : \"The UTC DateTime specifying when the pricing-component is valid till.\", \"verbs\":[\"POST\",\"PUT\",\"GET\"] }
:param valid_till: The valid_till of this TieredVolumePricingComponent.
:type: datetime
"""
self._valid_till = valid_till
@property
def tiers(self):
"""
Gets the tiers of this TieredVolumePricingComponent.
{ \"default\" : \"[]\", \"description\" : \"The pricing-component-tiers associated with the pricing-component.\", \"verbs\":[\"POST\",\"PUT\",\"GET\"] }
:return: The tiers of this TieredVolumePricingComponent.
:rtype: list[PricingComponentTier]
"""
return self._tiers
@tiers.setter
def tiers(self, tiers):
"""
Sets the tiers of this TieredVolumePricingComponent.
{ \"default\" : \"[]\", \"description\" : \"The pricing-component-tiers associated with the pricing-component.\", \"verbs\":[\"POST\",\"PUT\",\"GET\"] }
:param tiers: The tiers of this TieredVolumePricingComponent.
:type: list[PricingComponentTier]
"""
self._tiers = tiers
@property
def unit_of_measure(self):
"""
Gets the unit_of_measure of this TieredVolumePricingComponent.
{ \"description\" : \"The unit-of-measure associated with the pricing-component.\", \"verbs\":[\"POST\",\"PUT\",\"GET\"] }
:return: The unit_of_measure of this TieredVolumePricingComponent.
:rtype: UnitOfMeasure
"""
return self._unit_of_measure
@unit_of_measure.setter
def unit_of_measure(self, unit_of_measure):
"""
Sets the unit_of_measure of this TieredVolumePricingComponent.
{ \"description\" : \"The unit-of-measure associated with the pricing-component.\", \"verbs\":[\"POST\",\"PUT\",\"GET\"] }
:param unit_of_measure: The unit_of_measure of this TieredVolumePricingComponent.
:type: UnitOfMeasure
"""
self._unit_of_measure = unit_of_measure
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| 38.119745 | 513 | 0.613287 |
7941c693bebe870b3bc3f75f8815ad4bb4b775c8 | 4,786 | py | Python | calc.py | luk707/ScientificCalculator | 343a6f3f7e926bd354aebf4803450aee4d5e1daa | [
"MIT"
] | 1 | 2017-09-16T04:23:20.000Z | 2017-09-16T04:23:20.000Z | calc.py | luk707/ScientificCalculator | 343a6f3f7e926bd354aebf4803450aee4d5e1daa | [
"MIT"
] | null | null | null | calc.py | luk707/ScientificCalculator | 343a6f3f7e926bd354aebf4803450aee4d5e1daa | [
"MIT"
] | 2 | 2017-03-14T12:39:38.000Z | 2020-02-03T09:46:57.000Z | import re
import sys
import math
class Token:
def __init__(self, type, value):
self.type = type
self.value = value
ans = 0
functions = {
"ans": lambda p: ans,
"exit": lambda p: sys.exit(0),
"mean": lambda numbers: float(sum(numbers)) / max(len(numbers), 1),
"sum": lambda numbers: math.fsum(numbers)
}
tokenDefinitions = [
Token("number", r"\d+(\.\d+)?"),
Token("op_add", r"\+"),
Token("op_sub", r"\-"),
Token("op_mul", r"\*"),
Token("op_div", r"\/"),
Token("op_mod", r"\%"),
Token("function", r"[a-zA-Z]+"),
Token("paren_open", r"\("),
Token("paren_close", r"\)"),
Token("comma", r"\,"),
Token("whitespace", r"\s+")
]
grammar = [
[
"function",
"paren_open",
"expr",
"paren_close",
lambda tokens: [
tokens[0],
tokens[1],
Token("params", [tokens[2].value]),
tokens[3]
]
],
[
"number",
lambda tokens: [
Token("expr", float(tokens[0].value))
]
],
[
"expr",
"paren_open",
"expr",
"paren_close",
lambda tokens: [
Token("expr", tokens[0].value * tokens[2].value)
]
],
[
"paren_open",
"expr",
"paren_close",
lambda tokens: [
Token("expr", tokens[1].value)
]
],
[
"expr",
"op_div",
"expr",
lambda tokens: [
Token("expr", tokens[0].value / tokens[2].value)
]
],
[
"expr",
"op_mul",
"expr",
lambda tokens: [
Token("expr", tokens[0].value * tokens[2].value)
]
],
[
"expr",
"op_mod",
"expr",
lambda tokens: [
Token("expr", tokens[0].value % tokens[2].value)
]
],
[
"expr",
"op_add",
"expr",
lambda tokens: [
Token("expr", tokens[0].value + tokens[2].value)
]
],
[
"expr",
"op_sub",
"expr",
lambda tokens: [
Token("expr", tokens[0].value - tokens[2].value)
]
],
[
"expr",
"comma",
"expr",
lambda tokens: [
Token("params", [tokens[0].value, tokens[2].value])
]
],
[
"params",
"comma",
"expr",
lambda tokens: [
Token("params", tokens[0].value + [tokens[2].value])
]
],
[
"function",
"paren_open",
"paren_close",
lambda tokens: [
tokens[0],
tokens[1],
Token("params", []),
tokens[2]
]
],
[
"function",
"paren_open",
"params",
"paren_close",
lambda tokens: [
Token("expr", functions[tokens[0].value](tokens[2].value))
]
]
]
def lex (source):
pointer = 0
result = []
while pointer < len(source):
foundMatch = False
for token in tokenDefinitions:
match = re.search(token.value, source[pointer:])
if match and match.start() == 0:
if not token.type == "whitespace":
result.append(Token(token.type, match.group(0)))
pointer = pointer + match.end()
foundMatch = True
break
if not foundMatch:
print(source)
print(' ' * pointer + "^")
print("Unexpected character {0}".format(source[pointer]))
sys.exit(1)
return result
def parse (program):
while len(program) > 1:
for node in grammar:
pointerLen = len(node) - 2
pointer = 0
match = False
while pointer + pointerLen < len(program):
match = True
for i in range(0, pointerLen + 1):
if not program[pointer + i].type == node[i]:
match = False
if match:
newTokens = node[len(node) - 1](program[pointer: pointer + pointerLen + 1])
program = program[:pointer] + newTokens + program[pointer + pointerLen + 1:]
break
else:
pointer += 1
if match:
break
if program[0].type == "expr":
return float(program[0].value)
return False
if (len(sys.argv) > 1):
for i in range(1, len(sys.argv)):
program = lex(sys.argv[i])
ans = parse(program)
print(ans)
else:
while True:
source = raw_input("> ")
program = lex(source)
ans = parse(program)
print(ans)
| 23.009615 | 96 | 0.439616 |
7941c73ba594722a7d7028d097eabdbe0d26bf2e | 1,475 | py | Python | query_local_distro.py | jolivain/distro | 5d228805b91b4e2363703cb30185156645be433d | [
"Apache-2.0"
] | 30 | 2021-07-01T01:20:41.000Z | 2022-03-18T20:22:41.000Z | query_local_distro.py | jolivain/distro | 5d228805b91b4e2363703cb30185156645be433d | [
"Apache-2.0"
] | 64 | 2021-06-30T20:20:33.000Z | 2022-03-23T17:40:08.000Z | query_local_distro.py | jolivain/distro | 5d228805b91b4e2363703cb30185156645be433d | [
"Apache-2.0"
] | 10 | 2021-06-30T20:42:06.000Z | 2022-03-09T19:38:47.000Z | #!/usr/bin/env python
# Copyright 2015,2016 Nir Cohen
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pprint import pformat
import distro
def pprint(obj: object) -> None:
for line in pformat(obj).split("\n"):
print(4 * " " + line)
print("os_release_info:")
pprint(distro.os_release_info())
print("lsb_release_info:")
pprint(distro.lsb_release_info())
print("distro_release_info:")
pprint(distro.distro_release_info())
print(f"id: {distro.id()}")
print(f"name: {distro.name()}")
print(f"name_pretty: {distro.name(True)}")
print(f"version: {distro.version()}")
print(f"version_pretty: {distro.version(True)}")
print(f"like: {distro.like()}")
print(f"codename: {distro.codename()}")
print(f"linux_distribution_full: {distro.linux_distribution()}")
print(f"linux_distribution: {distro.linux_distribution(False)}")
print(f"major_version: {distro.major_version()}")
print(f"minor_version: {distro.minor_version()}")
print(f"build_number: {distro.build_number()}")
| 33.522727 | 74 | 0.742373 |
7941c7b37ba567f2ada60446a32f4137e8bbdc17 | 19,084 | py | Python | darts/datasets/__init__.py | bruecks2/darts | 4cf51f7cfffdc53049bdca6f9eb54eaf5810eaa5 | [
"Apache-2.0"
] | null | null | null | darts/datasets/__init__.py | bruecks2/darts | 4cf51f7cfffdc53049bdca6f9eb54eaf5810eaa5 | [
"Apache-2.0"
] | null | null | null | darts/datasets/__init__.py | bruecks2/darts | 4cf51f7cfffdc53049bdca6f9eb54eaf5810eaa5 | [
"Apache-2.0"
] | 1 | 2022-02-15T11:06:29.000Z | 2022-02-15T11:06:29.000Z | """
Datasets
--------
A few popular time series datasets
"""
from pathlib import Path
from typing import List
import numpy as np
import pandas as pd
from darts import TimeSeries
from darts.logging import get_logger, raise_if_not
from .dataset_loaders import DatasetLoaderCSV, DatasetLoaderMetadata
"""
Overall usage of this package:
from darts.datasets import AirPassengersDataset
ts: TimeSeries = AirPassengersDataset.load()
"""
logger = get_logger(__name__)
_DEFAULT_PATH = "https://raw.githubusercontent.com/unit8co/darts/master/datasets"
class AirPassengersDataset(DatasetLoaderCSV):
"""
Monthly Air Passengers Dataset, from 1949 to 1960.
"""
def __init__(self):
super().__init__(
metadata=DatasetLoaderMetadata(
"air_passengers.csv",
uri=_DEFAULT_PATH + "/AirPassengers.csv",
hash="167ffa96204a2b47339c21eea25baf32",
header_time="Month",
)
)
class AusBeerDataset(DatasetLoaderCSV):
"""
Total quarterly beer production in Australia (in megalitres) from 1956:Q1 to 2008:Q3 [1]_.
References
----------
.. [1] https://rdrr.io/cran/fpp/man/ausbeer.html
"""
def __init__(self):
super().__init__(
metadata=DatasetLoaderMetadata(
"ausbeer.csv",
uri=_DEFAULT_PATH + "/ausbeer.csv",
hash="1f4028a570a20939411cc04de7364bbd",
header_time="date",
format_time="%Y-%m-%d",
)
)
class EnergyDataset(DatasetLoaderCSV):
"""
Hourly energy dataset coming from [1]_.
Contains a time series with 28 hourly components between 2014-12-31 23:00:00 and 2018-12-31 22:00:00
References
----------
.. [1] https://www.kaggle.com/nicholasjhana/energy-consumption-generation-prices-and-weather
"""
def __init__(self):
super().__init__(
metadata=DatasetLoaderMetadata(
"energy.csv",
uri=_DEFAULT_PATH + "/energy_dataset.csv",
hash="f564ef18e01574734a0fa20806d1c7ee",
header_time="time",
format_time="%Y-%m-%d %H:%M:%S",
)
)
class GasRateCO2Dataset(DatasetLoaderCSV):
"""
Gas Rate CO2 dataset
Two components, length 296 (integer time index)
"""
def __init__(self):
super().__init__(
metadata=DatasetLoaderMetadata(
"gasrate_co2.csv",
uri=_DEFAULT_PATH + "/gasrate_co2.csv",
hash="77bf383715a9cf81459f81fe17baf3b0",
header_time=None,
format_time=None,
)
)
class HeartRateDataset(DatasetLoaderCSV):
"""
The series contains 1800 evenly-spaced measurements of instantaneous heart rate from a single subject.
The measurements (in units of beats per minute) occur at 0.5 second intervals, so that the length of
each series is exactly 15 minutes.
This is the series1 in [1]_.
It uses an integer time index.
References
----------
.. [1] http://ecg.mit.edu/time-series/
"""
def __init__(self):
super().__init__(
metadata=DatasetLoaderMetadata(
"heart_rate.csv",
uri=_DEFAULT_PATH + "/heart_rate.csv",
hash="3c4a108e1116867cf056dc5be2c95386",
header_time=None,
format_time=None,
)
)
class IceCreamHeaterDataset(DatasetLoaderCSV):
"""
Monthly sales of heaters and ice cream between January 2004 and June 2020.
"""
def __init__(self):
super().__init__(
metadata=DatasetLoaderMetadata(
"ice_cream_heater.csv",
uri=_DEFAULT_PATH + "/ice_cream_heater.csv",
hash="62031c7b5cdc9339fe7cf389173ef1c3",
header_time="Month",
format_time="%Y-%m",
)
)
class MonthlyMilkDataset(DatasetLoaderCSV):
"""
Monthly production of milk (in pounds per cow) between January 1962 and December 1975
"""
def __init__(self):
super().__init__(
metadata=DatasetLoaderMetadata(
"monthly_milk.csv",
uri=_DEFAULT_PATH + "/monthly-milk.csv",
hash="4784443e696da45d7082e76a67687b93",
header_time="Month",
format_time="%Y-%m",
)
)
class MonthlyMilkIncompleteDataset(DatasetLoaderCSV):
"""
Monthly production of milk (in pounds per cow) between January 1962 and December 1975.
Has some missing values.
"""
def __init__(self):
super().__init__(
metadata=DatasetLoaderMetadata(
"monthly_milk-incomplete.csv",
uri=_DEFAULT_PATH + "/monthly-milk-incomplete.csv",
hash="49b275c7e2f8f28a6a05224be1a049a4",
header_time="Month",
format_time="%Y-%m",
freq="MS",
)
)
class SunspotsDataset(DatasetLoaderCSV):
"""
Monthly Sunspot Numbers, 1749 - 1983
Monthly mean relative sunspot numbers from 1749 to 1983.
Collected at Swiss Federal Observatory, Zurich until 1960, then Tokyo Astronomical Observatory.
Source: [1]_
References
----------
.. [1] https://www.rdocumentation.org/packages/datasets/versions/3.6.1/topics/sunspots
"""
def __init__(self):
super().__init__(
metadata=DatasetLoaderMetadata(
"sunspots.csv",
uri=_DEFAULT_PATH + "/monthly-sunspots.csv",
hash="4d27019c43d9c256d528f1bd6c5f40e0",
header_time="Month",
format_time="%Y-%m",
)
)
class TaylorDataset(DatasetLoaderCSV):
"""
Half-hourly electricity demand in England and Wales from Monday 5 June 2000 to Sunday 27 August 2000.
Discussed in Taylor (2003) [1]_, and kindly provided by James W Taylor [2]_. Units: Megawatts
(Uses an integer time index).
References
----------
.. [1] Taylor, J.W. (2003) Short-term electricity demand forecasting using double seasonal exponential smoothing.
Journal of the Operational Research Society, 54, 799-805.
.. [2] https://www.rdocumentation.org/packages/forecast/versions/8.13/topics/taylor
"""
def __init__(self):
super().__init__(
metadata=DatasetLoaderMetadata(
"taylor.csv",
uri=_DEFAULT_PATH + "/taylor.csv",
hash="1ea355c90e8214cb177788a674801a22",
header_time=None,
format_time=None,
)
)
class TemperatureDataset(DatasetLoaderCSV):
"""
Daily temperature in Melbourne between 1981 and 1990
"""
def __init__(self):
super().__init__(
metadata=DatasetLoaderMetadata(
"temperatures.csv",
uri=_DEFAULT_PATH + "/temps.csv",
hash="ce5b5e4929793ec8b6a54711110acebf",
header_time="Date",
format_time="%m/%d/%Y",
freq="D",
)
)
class USGasolineDataset(DatasetLoaderCSV):
"""
Weekly U.S. Product Supplied of Finished Motor Gasoline between 1991-02-08 and 2021-04-30
Obtained from [1]_.
References
----------
.. [1] https://www.eia.gov/dnav/pet/hist/LeafHandler.ashx?n=PET&s=wgfupus2&f=W
"""
def __init__(self):
super().__init__(
metadata=DatasetLoaderMetadata(
"us_gasoline.csv",
uri=_DEFAULT_PATH + "/us_gasoline.csv",
hash="25d440337a06cbf83423e81d0337a1ce",
header_time="Week",
format_time="%m/%d/%Y",
)
)
class WineDataset(DatasetLoaderCSV):
"""
Australian total wine sales by wine makers in bottles <= 1 litre. Monthly between Jan 1980 and Aug 1994.
Source: [1]_
References
----------
.. [1] https://www.rdocumentation.org/packages/forecast/versions/8.1/topics/wineind
"""
def __init__(self):
super().__init__(
metadata=DatasetLoaderMetadata(
"wine.csv",
uri=_DEFAULT_PATH + "/wineind.csv",
hash="b68971d7e709ad0b7e6300cab977e3cd",
header_time="date",
format_time="%Y-%m-%d",
)
)
class WoolyDataset(DatasetLoaderCSV):
"""
Quarterly production of woollen yarn in Australia: tonnes. Mar 1965 -- Sep 1994.
Source: [1]_
References
----------
.. [1] https://www.rdocumentation.org/packages/forecast/versions/8.1/topics/woolyrnq
"""
def __init__(self):
super().__init__(
metadata=DatasetLoaderMetadata(
"wooly.csv",
uri=_DEFAULT_PATH + "/woolyrnq.csv",
hash="4be8b12314db94c8fd76f5c674454bf0",
header_time="date",
format_time="%Y-%m-%d",
)
)
class ETTh1Dataset(DatasetLoaderCSV):
"""
The data of 1 Electricity Transformers at 1 stations, including load, oil temperature.
The dataset ranges from 2016/07 to 2018/07 taken hourly.
Source: [1][2]_
Field Descriptions:
date: The recorded date
HUFL: High UseFul Load
HULL: High UseLess Load
MUFL: Medium UseFul Load
MULL: Medium UseLess Load
LUFL: Low UseFul Load
LULL: Low UseLess Load
OT: Oil Temperature (Target)
References
----------
.. [1] https://github.com/zhouhaoyi/ETDataset
.. [2] https://arxiv.org/abs/2012.07436
"""
def __init__(self):
super().__init__(
metadata=DatasetLoaderMetadata(
"ETTh1.csv",
uri=_DEFAULT_PATH + "/ETTh1.csv",
hash="8381763947c85f4be6ac456c508460d6",
header_time="date",
format_time="%Y-%m-%d %H:%M:%S",
)
)
class ETTh2Dataset(DatasetLoaderCSV):
"""
The data of 1 Electricity Transformers at 1 stations, including load, oil temperature.
The dataset ranges from 2016/07 to 2018/07 taken hourly.
Source: [1][2]_
Field Descriptions:
date: The recorded date
HUFL: High UseFul Load
HULL: High UseLess Load
MUFL: Medium UseFul Load
MULL: Medium UseLess Load
LUFL: Low UseFul Load
LULL: Low UseLess Load
OT: Oil Temperature (Target)
References
----------
.. [1] https://github.com/zhouhaoyi/ETDataset
.. [2] https://arxiv.org/abs/2012.07436
"""
def __init__(self):
super().__init__(
metadata=DatasetLoaderMetadata(
"ETTh2.csv",
uri=_DEFAULT_PATH + "/ETTh2.csv",
hash="51a229a3fc13579dd939364fefe9c7ab",
header_time="date",
format_time="%Y-%m-%d %H:%M:%S",
)
)
class ETTm1Dataset(DatasetLoaderCSV):
"""
The data of 1 Electricity Transformers at 1 stations, including load, oil temperature.
The dataset ranges from 2016/07 to 2018/07 recorded every 15 minutes.
Source: [1][2]_
Field Descriptions:
date: The recorded date
HUFL: High UseFul Load
HULL: High UseLess Load
MUFL: Medium UseFul Load
MULL: Medium UseLess Load
LUFL: Low UseFul Load
LULL: Low UseLess Load
OT: Oil Temperature (Target)
References
----------
.. [1] https://github.com/zhouhaoyi/ETDataset
.. [2] https://arxiv.org/abs/2012.07436
"""
def __init__(self):
super().__init__(
metadata=DatasetLoaderMetadata(
"ETTm1.csv",
uri=_DEFAULT_PATH + "/ETTm1.csv",
hash="82d6bd89109c63d075d99c1077b33f38",
header_time="date",
format_time="%Y-%m-%d %H:%M:%S",
)
)
class ETTm2Dataset(DatasetLoaderCSV):
"""
The data of 1 Electricity Transformers at 1 stations, including load, oil temperature.
The dataset ranges from 2016/07 to 2018/07 recorded every 15 minutes.
Source: [1][2]_
Field Descriptions:
date: The recorded date
HUFL: High UseFul Load
HULL: High UseLess Load
MUFL: Medium UseFul Load
MULL: Medium UseLess Load
LUFL: Low UseFul Load
LULL: Low UseLess Load
OT: Oil Temperature (Target)
References
----------
.. [1] https://github.com/zhouhaoyi/ETDataset
.. [2] https://arxiv.org/abs/2012.07436
"""
def __init__(self):
super().__init__(
metadata=DatasetLoaderMetadata(
"ETTm2.csv",
uri=_DEFAULT_PATH + "/ETTm2.csv",
hash="7687e47825335860bf58bccb31be0c56",
header_time="date",
format_time="%Y-%m-%d %H:%M:%S",
)
)
class ElectricityDataset(DatasetLoaderCSV):
"""
Measurements of electric power consumption in one household with 15 minute sampling rate.
370 client's consumption are recorded in kW.
Source: [1]_
Loading this dataset will provide a multivariate timeseries with 370 columns for each household.
The following code can be used to convert the dataset to a list of univariate timeseries,
one for each household.
References
----------
.. [1] https://archive.ics.uci.edu/ml/datasets/ElectricityLoadDiagrams20112014
"""
def __init__(self, multivariate: bool = True):
"""
Parameters
----------
multivariate: bool
Whether to return a single multivariate timeseries - if False returns a list of univariate TimeSeries. Default is True.
"""
def pre_proces_fn(extracted_dir, dataset_path):
with open(Path(extracted_dir, "LD2011_2014.txt")) as fin:
with open(dataset_path, "wt", newline="\n") as fout:
for line in fin:
fout.write(line.replace(",", ".").replace(";", ","))
super().__init__(
metadata=DatasetLoaderMetadata(
"Electricity.csv",
uri="https://archive.ics.uci.edu/ml/machine-learning-databases/00321/LD2011_2014.txt.zip",
hash="acfe6783eea43905e510f537add940fd",
header_time="Unnamed: 0",
format_time="%Y-%m-%d %H:%M:%S",
pre_process_zipped_csv_fn=pre_proces_fn,
multivariate=multivariate,
)
)
def _to_multi_series(self, series: pd.DataFrame) -> List[TimeSeries]:
"""
Load the electricity dataset as a list of univariate series, one for each household.
"""
ts_list = [] # list of timeseries
for label in series:
srs = series[label]
# filter column down to the period of recording
srs = srs.replace(0.0, np.nan)
start_date = min(srs.fillna(method="ffill").dropna().index)
end_date = max(srs.fillna(method="bfill").dropna().index)
active_range = (srs.index >= start_date) & (srs.index <= end_date)
srs = srs[active_range].fillna(0.0)
# convert to timeseries
tmp = pd.DataFrame({"power_usage": srs})
tmp["date"] = tmp.index
ts = TimeSeries.from_dataframe(tmp, "date", ["power_usage"])
ts_list.append(ts)
return ts_list
class UberTLCDataset(DatasetLoaderCSV):
"""
14.3 million Uber pickups from January to June 2015. The data is resampled to hourly or daily based sample_freq
on using the locationID as the target.
Source: [1]_
Loading this dataset will provide a multivariate timeseries with 262 columns for each locationID.
The following code can be used to convert the dataset to a list of univariate timeseries,
one for each locationID.
References
----------
.. [1] https://github.com/fivethirtyeight/uber-tlc-foil-response
"""
def __init__(self, sample_freq: str = "hourly", multivariate: bool = True):
"""
Parameters
----------
sample_freq: str
The sampling frequency of the data. Can be "hourly" or "daily". Default is "hourly".
multivariate: bool
Whether to return a single multivariate timeseries - if False returns a list of univariate TimeSeries. Default is True.
"""
valid_sample_freq = ["daily", "hourly"]
raise_if_not(
sample_freq in valid_sample_freq,
f"sample_freq must be one of {valid_sample_freq}",
logger,
)
def pre_proces_fn(extracted_dir, dataset_path):
df = pd.read_csv(
Path(extracted_dir, "uber-raw-data-janjune-15.csv"),
header=0,
usecols=["Pickup_date", "locationID"],
index_col=0,
)
output_dict = {}
freq_setting = "1H" if "hourly" in str(dataset_path) else "1D"
time_series_of_locations = list(df.groupby(by="locationID"))
for locationID, df in time_series_of_locations:
df.sort_index()
df.index = pd.to_datetime(df.index)
count_series = df.resample(rule=freq_setting).size()
output_dict[locationID] = count_series
output_df = pd.DataFrame(output_dict)
output_df.to_csv(dataset_path, line_terminator="\n")
super().__init__(
metadata=DatasetLoaderMetadata(
f"uber_tlc_{sample_freq}.csv",
uri="https://github.com/fivethirtyeight/uber-tlc-foil-response/raw/"
"63bb878b76f47f69b4527d50af57aac26dead983/"
"uber-trip-data/uber-raw-data-janjune-15.csv.zip",
hash="9ed84ebe0df4bc664748724b633b3fe6"
if sample_freq == "hourly"
else "24f9fd67e4b9e53f0214a90268cd9bee",
header_time="Pickup_date",
format_time="%Y-%m-%d %H:%M",
pre_process_zipped_csv_fn=pre_proces_fn,
multivariate=multivariate,
)
)
def _to_multi_series(self, series: pd.DataFrame) -> List[TimeSeries]:
"""
load the Uber TLC dataset as a list of univariate timeseries, one for each locationID.
"""
ts_list = [] # list of timeseries
for label in series:
srs = series[label]
# filter column down to the period of recording
start_date = min(srs.fillna(method="ffill").dropna().index)
end_date = max(srs.fillna(method="bfill").dropna().index)
active_range = (srs.index >= start_date) & (srs.index <= end_date)
srs = srs[active_range]
# convert to timeseries
tmp = pd.DataFrame({"locationID": srs})
tmp["date"] = tmp.index
ts = TimeSeries.from_dataframe(tmp, "date", ["locationID"])
ts_list.append(ts)
return ts_list
| 30.930308 | 131 | 0.586774 |
7941c9d2c59958b154a9fa01b4093b6872a1a085 | 1,370 | py | Python | examples/example.py | jkpubsrc/python-module-jk-trioping | a8a82530d1fab6056f3cbecf1c888cb39540c4a5 | [
"Apache-1.1"
] | null | null | null | examples/example.py | jkpubsrc/python-module-jk-trioping | a8a82530d1fab6056f3cbecf1c888cb39540c4a5 | [
"Apache-1.1"
] | null | null | null | examples/example.py | jkpubsrc/python-module-jk-trioping | a8a82530d1fab6056f3cbecf1c888cb39540c4a5 | [
"Apache-1.1"
] | null | null | null | #!/usr/bin/python3
import trio
import jk_trioping
async def main():
# single ping with host not answering
print("Pinging:", "7.7.7.7", "1 time")
t = trio.current_time()
duration = await jk_trioping.ping("7.7.7.7")
dt = trio.current_time() - t
print("Duration (1 ping):", duration, "ms")
print("Total time:", 1000*dt, "ms")
print()
# single ping with host answering
print("Pinging:", "8.8.8.8", "1 time")
t = trio.current_time()
duration = await jk_trioping.ping("8.8.8.8")
dt = trio.current_time() - t
print("Duration (1 ping):", duration, "ms")
print("Total time:", 1000*dt, "ms")
print()
# multiple pings with host answering
print("Pinging:", "8.8.8.8", "5 times")
t = trio.current_time()
duration = await jk_trioping.ping("8.8.8.8", repeats=5)
dt = trio.current_time() - t
print("Duration (5 pings):", duration, "ms")
print("Total time:", 1000*dt, "ms")
print("Avg time spent per ping:", 1000*dt/3, "ms")
print()
# single pings with hosts answering
print("Pinging:", ["8.8.8.8", "9.9.9.9", "google.com", "yahoo.com"], "1 time")
t = trio.current_time()
durations = await jk_trioping.multiPing(["8.8.8.8", "9.9.9.9", "google.com", "yahoo.com"])
dt = trio.current_time() - t
print("Durations (4 pings):", durations)
print("Total time:", 1000*dt, "ms")
print("Avg time spent per ping:", 1000*dt/4, "ms")
#
trio.run(main)
| 24.035088 | 91 | 0.638686 |
7941c9eff35a95930bee97c7b7356aa9f69a069a | 4,804 | py | Python | main.py | grebtsew/ARNumpad | 2956cf6c72faf64c236d153fe1274d5ed626e83a | [
"MIT"
] | 1 | 2022-02-13T13:14:17.000Z | 2022-02-13T13:14:17.000Z | main.py | grebtsew/ARNumpad | 2956cf6c72faf64c236d153fe1274d5ed626e83a | [
"MIT"
] | null | null | null | main.py | grebtsew/ARNumpad | 2956cf6c72faf64c236d153fe1274d5ed626e83a | [
"MIT"
] | 3 | 2021-10-16T23:03:04.000Z | 2022-02-13T13:14:20.000Z | # MediaPipe Hands
#
# Sources:
# https://google.github.io/mediapipe/solutions/hands.html // original code
# https://gist.github.com/TheJLifeX/74958cc59db477a91837244ff598ef4a // gesture detection
# https://github.com/Kazuhito00/mediapipe-python-sample // performance
# https://github.com/JuliaPoo/MultiHand-Tracking // left or right hand? Palm detection? 3d detection?
# https://towardsdatascience.com/handtrackjs-677c29c1d585 // more information, samples for .js
# Imported main
import cv2
import mediapipe as mp
from google.protobuf.json_format import MessageToDict # Convert google datatypes
# Own library
import calibrate as ca
import shared_variables as sv
import calculations as calc
import camera as cam
if __name__ == "__main__":
# Select camera to use
camera = 0
# Setup parameters
# Calculate appropriate detection confidence for camera used!
# Protocol 1. show hand press enter, 2. show back of hand press enter.
calibrated_detection_confidence = ca.calibrate(camera)
# Setup shared variables between threads
shared = sv.shared_variables()
# initiate detection
mp_drawing = mp.solutions.drawing_utils
mp_hands = mp.solutions.hands
hands = mp_hands.Hands(
min_detection_confidence=calibrated_detection_confidence[1],
min_tracking_confidence=0.99, # Nice visuals!
max_num_hands=1 # default, Remove possibility of "hands in hands"
)
# Start image read thread
cam.camera_handler(camera,shared).start()
while shared.running:
while shared.image is not None:
# Preform hand detection
image = shared.image
image.flags.writeable = False
results = hands.process(image)
image.flags.writeable = True
# Collect result from mediapipe, into format usable by default python
if (results.multi_handedness is not None):
for idx, hand_handedness in enumerate(results.multi_handedness):
shared.handedness = MessageToDict(hand_handedness)
else:
shared.handedness = None
if(results.multi_hand_landmarks is not None):
shared.landmarks = []
for idx, multi_hand_landmarks in enumerate(results.multi_hand_landmarks):
shared.landmarks.append(MessageToDict(multi_hand_landmarks))
else:
shared.landmarks = None
#
# Visualizations
#
default_image = image.copy()
if results.multi_hand_landmarks:
for hand_landmarks in results.multi_hand_landmarks:
mp_drawing.draw_landmarks(
default_image, hand_landmarks, mp_hands.HAND_CONNECTIONS)
hand_image = image.copy()
hand_image = calc.show_hand_and_score(hand_image, shared)
scaling_filter_image = image.copy()
scaling_filter_image = calc.calculate_and_draw_center_of_hand(scaling_filter_image, shared)
scaling_filter_image = calc.calculate_hand_size_and_draw_boxes(scaling_filter_image, shared)
directional_image = image.copy()
directional_image = calc.calculate_and_show_direction(directional_image, shared)
openClose_image = image.copy()
openClose_image = calc.calculate_show_is_open(openClose_image, shared)
all_image = image.copy()
all_image = calc.calculate_and_draw_center_of_hand(all_image, shared)
all_image = calc.calculate_hand_size_and_draw_boxes(all_image, shared)
all_image = calc.calculate_and_show_direction(all_image, shared)
all_image = calc.show_hand_and_score(all_image, shared)
all_image = calc.calculate_show_is_open(all_image,shared)
all_image = calc.calculate_and_show_numpad(all_image, shared, show_thumb_range=True)
all_image = calc.detect_clicked_button_show_click(all_image, shared)
result_image = image.copy()
result_image = calc.calculate_and_show_numpad(result_image, shared, hide_if_open=True, min_hand_size_limit=50000) # hide numpad if hand is close or hand-size too small!
result_image = calc.detect_clicked_button_show_click(result_image, shared)
# Place all images in one for visualization!
all_in_one_image1 = calc.add_image_vertical(image, default_image)
all_in_one_image1 = calc.add_image_vertical(all_in_one_image1, hand_image)
all_in_one_image1 = calc.add_image_vertical(all_in_one_image1, scaling_filter_image)
all_in_one_image2 = calc.add_image_vertical(directional_image, openClose_image)
all_in_one_image2 = calc.add_image_vertical(all_in_one_image2, all_image)
all_in_one_image2 = calc.add_image_vertical(all_in_one_image2, result_image)
all_in_one_image = calc.add_image_horizontal(all_in_one_image1, all_in_one_image2)
cv2.imshow('All in one!', all_in_one_image)
if cv2.waitKey(1) & 0xFF == ord('q'):
shared.running = False
break
hands.close()
| 39.056911 | 174 | 0.743963 |
Subsets and Splits