blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 5
283
| content_id
stringlengths 40
40
| detected_licenses
sequencelengths 0
41
| license_type
stringclasses 2
values | repo_name
stringlengths 7
96
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 58
values | visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 12.7k
662M
⌀ | star_events_count
int64 0
35.5k
| fork_events_count
int64 0
20.6k
| gha_license_id
stringclasses 11
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 43
values | src_encoding
stringclasses 9
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 7
5.88M
| extension
stringclasses 30
values | content
stringlengths 7
5.88M
| authors
sequencelengths 1
1
| author
stringlengths 0
73
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
8c1a596f8ba65d65f9c4899ecd364bcdf3594c90 | cbdd3a43a5c2939b4f823ea76eb9d1a2268048d8 | /script6.py | 130ec2091a1e2796c41e7170041dc3973786dbc7 | [] | no_license | wtyhome/Python-Learning_Examples | a27718cb6defd2ddd454eab64701e5a2435cc217 | f9b1578c314d4e3777751e07d696a97b26c6d903 | refs/heads/master | 2021-09-01T02:05:03.995507 | 2017-12-24T09:58:06 | 2017-12-24T09:58:06 | 111,981,861 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 143 | py | def f(n):
if (n <= 2):
return 1
else:
return f(n-1)+f(n-2)
while (1):
inp=input("Please input a integer: ")
print(f(int(inp)))
| [
"[email protected]"
] | |
c28c2671476fb15d870a763e1519e8780cb8bfe6 | 1948ee12b320691f0f72890db5b1c06421a26dd9 | /rpcClient.py | ad0ae678845aff6199adbd024ef248d381309d39 | [] | no_license | iaalm/ERNN | bb210c4c5ce74164160cabe573857750d6d05510 | e413e6db839f1d0ec2f6ee56581356339d6c629f | refs/heads/master | 2021-07-16T19:36:34.070610 | 2018-05-23T02:30:47 | 2018-05-23T02:30:47 | 86,991,255 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,268 | py | #! /usr/bin/python3
import os
import json
import argparse
import xmlrpc.client
parser = argparse.ArgumentParser()
parser.add_argument("--url",
default='http://10.60.150.247:8080',
help="model dirs")
parser.add_argument('--gpuid', default=0, help="gpuid")
args = parser.parse_args()
def rpcWorker(url, gpuid):
s = xmlrpc.client.ServerProxy(url)
if os.path.exists('work_%d' % gpuid):
os.system('rm -rf work_%d' % gpuid)
os.system('cp -lr neuraltalk2-ERNN work_%d' % gpuid)
os.chdir('work_%d' % gpuid)
os.system('rm coco_caption/lock')
os.system('rm coco_caption/*.json')
flag = True
while(flag):
try:
ret = s.born()
except:
continue
try:
val_max = {'CIDEr': -0.01}
nid = ret['id']
lua_code = ret['lua']
args = ret['args']
print('nid', nid)
with open('cell.lua', 'w') as fd:
fd.write(lua_code)
os.system('rm model_.json')
cmd = 'CUDA_VISIBLE_DEVICES=%d th train.lua ' % gpuid
cmd = cmd + ' '.join(['-%s %s' % (k, args[k]) for k in args])
os.system(cmd)
with open('model_.json') as fd:
data = json.load(fd)
val_max = {}
val_data = data["val_lang_stats_history"]
val_data = sorted(val_data.items(), key=lambda t: int(t[0]))
max_result = -0.01
for k, v in val_data:
if v['CIDEr'] > max_result:
max_result = v['CIDEr']
for metric in v:
val_max[metric] = v[metric]
val_max['pos_max'] = k
except FileNotFoundError:
print('FileNotFoundError')
val_max = {'CIDEr': -0.01}
except KeyError:
print('KeyError')
val_max = {'CIDEr': -0.01}
except KeyboardInterrupt:
if flag:
print('return next loop')
flag = False
else:
print('return now')
break
finally:
s.fight(nid, val_max)
if __name__ == '__main__':
rpcWorker(args.url, int(args.gpuid))
| [
"[email protected]"
] | |
e861e6f38778b3cb1012c01744ac21982d20005e | 600df3590cce1fe49b9a96e9ca5b5242884a2a70 | /third_party/catapult/third_party/gsutil/gslib/tests/test_resumable_streaming.py | 51df48bbcc78d88ac397d4f63362591402c5b98d | [
"LGPL-2.0-or-later",
"GPL-1.0-or-later",
"MIT",
"Apache-2.0",
"BSD-3-Clause"
] | permissive | metux/chromium-suckless | efd087ba4f4070a6caac5bfbfb0f7a4e2f3c438a | 72a05af97787001756bae2511b7985e61498c965 | refs/heads/orig | 2022-12-04T23:53:58.681218 | 2017-04-30T10:59:06 | 2017-04-30T23:35:58 | 89,884,931 | 5 | 3 | BSD-3-Clause | 2022-11-23T20:52:53 | 2017-05-01T00:09:08 | null | UTF-8 | Python | false | false | 12,093 | py | # -*- coding: utf-8 -*-
# Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit tests for resumable streaming upload functions and classes."""
from __future__ import absolute_import
from hashlib import md5
import os
import pkgutil
from gslib.exception import CommandException
from gslib.hashing_helper import CalculateHashesFromContents
from gslib.hashing_helper import CalculateMd5FromContents
from gslib.resumable_streaming_upload import ResumableStreamingJsonUploadWrapper
import gslib.tests.testcase as testcase
from gslib.util import GetJsonResumableChunkSize
from gslib.util import TRANSFER_BUFFER_SIZE
_TEST_FILE = 'test.txt'
class TestResumableStreamingJsonUploadWrapper(testcase.GsUtilUnitTestCase):
"""Unit tests for the TestResumableStreamingJsonUploadWrapper class."""
_temp_test_file = None
_temp_test_file_contents = None
_temp_test_file_len = None
def _GetTestFile(self):
if not self._temp_test_file:
self._temp_test_file_contents = pkgutil.get_data(
'gslib', 'tests/test_data/%s' % _TEST_FILE)
self._temp_test_file = self.CreateTempFile(
file_name=_TEST_FILE, contents=self._temp_test_file_contents)
self._temp_test_file_len = len(self._temp_test_file_contents)
return self._temp_test_file
def testReadInChunks(self):
tmp_file = self._GetTestFile()
with open(tmp_file, 'rb') as stream:
wrapper = ResumableStreamingJsonUploadWrapper(
stream, TRANSFER_BUFFER_SIZE, test_small_buffer=True)
hash_dict = {'md5': md5()}
# CalculateHashesFromContents reads in chunks, but does not seek.
CalculateHashesFromContents(wrapper, hash_dict)
with open(tmp_file, 'rb') as stream:
actual = CalculateMd5FromContents(stream)
self.assertEqual(actual, hash_dict['md5'].hexdigest())
def testReadInChunksWithSeekToBeginning(self):
"""Reads one buffer, then seeks to 0 and reads chunks until the end."""
tmp_file = self._GetTestFile()
for initial_read in (TRANSFER_BUFFER_SIZE - 1,
TRANSFER_BUFFER_SIZE,
TRANSFER_BUFFER_SIZE + 1,
TRANSFER_BUFFER_SIZE * 2 - 1,
TRANSFER_BUFFER_SIZE * 2,
TRANSFER_BUFFER_SIZE * 2 + 1,
TRANSFER_BUFFER_SIZE * 3 - 1,
TRANSFER_BUFFER_SIZE * 3,
TRANSFER_BUFFER_SIZE * 3 + 1):
for buffer_size in (TRANSFER_BUFFER_SIZE - 1,
TRANSFER_BUFFER_SIZE,
TRANSFER_BUFFER_SIZE + 1,
self._temp_test_file_len - 1,
self._temp_test_file_len,
self._temp_test_file_len + 1):
# Can't seek to 0 if the buffer is too small, so we expect an
# exception.
expect_exception = buffer_size < self._temp_test_file_len
with open(tmp_file, 'rb') as stream:
wrapper = ResumableStreamingJsonUploadWrapper(
stream, buffer_size, test_small_buffer=True)
wrapper.read(initial_read)
# CalculateMd5FromContents seeks to 0, reads in chunks, then seeks
# to 0 again.
try:
hex_digest = CalculateMd5FromContents(wrapper)
if expect_exception:
self.fail('Did not get expected CommandException for '
'initial read size %s, buffer size %s' %
(initial_read, buffer_size))
except CommandException, e:
if not expect_exception:
self.fail('Got unexpected CommandException "%s" for '
'initial read size %s, buffer size %s' %
(str(e), initial_read, buffer_size))
if not expect_exception:
with open(tmp_file, 'rb') as stream:
actual = CalculateMd5FromContents(stream)
self.assertEqual(
actual, hex_digest,
'Digests not equal for initial read size %s, buffer size %s' %
(initial_read, buffer_size))
def _testSeekBack(self, initial_reads, buffer_size, seek_back_amount):
"""Tests reading then seeking backwards.
This function simulates an upload that is resumed after a connection break.
It reads one transfer buffer at a time until it reaches initial_position,
then seeks backwards (as if the server did not receive some of the bytes)
and reads to the end of the file, ensuring the data read after the seek
matches the original file.
Args:
initial_reads: List of integers containing read sizes to perform
before seek.
buffer_size: Maximum buffer size for the wrapper.
seek_back_amount: Number of bytes to seek backward.
Raises:
AssertionError on wrong data returned by the wrapper.
"""
tmp_file = self._GetTestFile()
initial_position = 0
for read_size in initial_reads:
initial_position += read_size
self.assertGreaterEqual(
buffer_size, seek_back_amount,
'seek_back_amount must be less than initial position %s '
'(but was actually: %s)' % (buffer_size, seek_back_amount))
self.assertLess(
initial_position, self._temp_test_file_len,
'initial_position must be less than test file size %s '
'(but was actually: %s)' % (self._temp_test_file_len, initial_position))
with open(tmp_file, 'rb') as stream:
wrapper = ResumableStreamingJsonUploadWrapper(
stream, buffer_size, test_small_buffer=True)
position = 0
for read_size in initial_reads:
data = wrapper.read(read_size)
self.assertEqual(
self._temp_test_file_contents[position:position + read_size],
data, 'Data from position %s to %s did not match file contents.' %
(position, position + read_size))
position += len(data)
wrapper.seek(initial_position - seek_back_amount)
self.assertEqual(wrapper.tell(),
initial_position - seek_back_amount)
data = wrapper.read()
self.assertEqual(
self._temp_test_file_len - (initial_position - seek_back_amount),
len(data),
'Unexpected data length with initial pos %s seek_back_amount %s. '
'Expected: %s, actual: %s.' %
(initial_position, seek_back_amount,
self._temp_test_file_len - (initial_position - seek_back_amount),
len(data)))
self.assertEqual(
self._temp_test_file_contents[-len(data):], data,
'Data from position %s to EOF did not match file contents.' %
position)
def testReadSeekAndReadToEOF(self):
"""Tests performing reads on the wrapper, seeking, then reading to EOF."""
for initial_reads in ([1],
[TRANSFER_BUFFER_SIZE - 1],
[TRANSFER_BUFFER_SIZE],
[TRANSFER_BUFFER_SIZE + 1],
[1, TRANSFER_BUFFER_SIZE - 1],
[1, TRANSFER_BUFFER_SIZE],
[1, TRANSFER_BUFFER_SIZE + 1],
[TRANSFER_BUFFER_SIZE - 1, 1],
[TRANSFER_BUFFER_SIZE, 1],
[TRANSFER_BUFFER_SIZE + 1, 1],
[TRANSFER_BUFFER_SIZE - 1, TRANSFER_BUFFER_SIZE - 1],
[TRANSFER_BUFFER_SIZE - 1, TRANSFER_BUFFER_SIZE],
[TRANSFER_BUFFER_SIZE - 1, TRANSFER_BUFFER_SIZE + 1],
[TRANSFER_BUFFER_SIZE, TRANSFER_BUFFER_SIZE - 1],
[TRANSFER_BUFFER_SIZE, TRANSFER_BUFFER_SIZE],
[TRANSFER_BUFFER_SIZE, TRANSFER_BUFFER_SIZE + 1],
[TRANSFER_BUFFER_SIZE + 1, TRANSFER_BUFFER_SIZE - 1],
[TRANSFER_BUFFER_SIZE + 1, TRANSFER_BUFFER_SIZE],
[TRANSFER_BUFFER_SIZE + 1, TRANSFER_BUFFER_SIZE + 1],
[TRANSFER_BUFFER_SIZE, TRANSFER_BUFFER_SIZE,
TRANSFER_BUFFER_SIZE]):
initial_position = 0
for read_size in initial_reads:
initial_position += read_size
for buffer_size in (initial_position,
initial_position + 1,
initial_position * 2 - 1,
initial_position * 2):
for seek_back_amount in (
min(TRANSFER_BUFFER_SIZE - 1, initial_position),
min(TRANSFER_BUFFER_SIZE, initial_position),
min(TRANSFER_BUFFER_SIZE + 1, initial_position),
min(TRANSFER_BUFFER_SIZE * 2 - 1, initial_position),
min(TRANSFER_BUFFER_SIZE * 2, initial_position),
min(TRANSFER_BUFFER_SIZE * 2 + 1, initial_position)):
self._testSeekBack(initial_reads, buffer_size, seek_back_amount)
def testBufferSizeLessThanChunkSize(self):
ResumableStreamingJsonUploadWrapper(None, GetJsonResumableChunkSize())
try:
ResumableStreamingJsonUploadWrapper(None, GetJsonResumableChunkSize() - 1)
self.fail('Did not get expected CommandException')
except CommandException, e:
self.assertIn('Buffer size must be >= JSON resumable upload', str(e))
def testSeekPartialBuffer(self):
"""Tests seeking back partially within the buffer."""
tmp_file = self._GetTestFile()
read_size = TRANSFER_BUFFER_SIZE
with open(tmp_file, 'rb') as stream:
wrapper = ResumableStreamingJsonUploadWrapper(
stream, TRANSFER_BUFFER_SIZE * 3, test_small_buffer=True)
position = 0
for _ in xrange(3):
data = wrapper.read(read_size)
self.assertEqual(
self._temp_test_file_contents[position:position + read_size],
data, 'Data from position %s to %s did not match file contents.' %
(position, position + read_size))
position += len(data)
data = wrapper.read(read_size / 2)
# Buffer contents should now be have contents from:
# read_size/2 through 7*read_size/2.
position = read_size / 2
wrapper.seek(position)
data = wrapper.read()
self.assertEqual(
self._temp_test_file_contents[-len(data):], data,
'Data from position %s to EOF did not match file contents.' %
position)
def testSeekEnd(self):
tmp_file = self._GetTestFile()
for buffer_size in (TRANSFER_BUFFER_SIZE - 1,
TRANSFER_BUFFER_SIZE,
TRANSFER_BUFFER_SIZE + 1):
for seek_back in (TRANSFER_BUFFER_SIZE - 1,
TRANSFER_BUFFER_SIZE,
TRANSFER_BUFFER_SIZE + 1):
expect_exception = seek_back > buffer_size
with open(tmp_file, 'rb') as stream:
wrapper = ResumableStreamingJsonUploadWrapper(
stream, buffer_size, test_small_buffer=True)
# Read to the end.
while wrapper.read(TRANSFER_BUFFER_SIZE):
pass
try:
wrapper.seek(seek_back, whence=os.SEEK_END)
if expect_exception:
self.fail('Did not get expected CommandException for '
'seek_back size %s, buffer size %s' %
(seek_back, buffer_size))
except CommandException, e:
if not expect_exception:
self.fail('Got unexpected CommandException "%s" for '
'seek_back size %s, buffer size %s' %
(str(e), seek_back, buffer_size))
| [
"[email protected]"
] | |
ff57c1b5699b0b44e79c3333e9abf9901b9959d6 | 713477bb8aa751bf686580b23433be854fb62fbb | /tornado_chat/urls.py | 10779b8ceaa925b96c8352b13fc39d01320c87da | [] | no_license | SamuelMartens/tornado_chat | 25a2fbe4fc84ba414de6adf8c432134201008ae6 | d0c778a040ceb87e067a19fed8115c14fadf4b48 | refs/heads/master | 2021-01-01T20:35:53.785288 | 2016-12-23T01:49:13 | 2016-12-23T01:49:13 | 38,119,708 | 1 | 2 | null | null | null | null | UTF-8 | Python | false | false | 491 | py | from django.conf.urls import patterns, include, url
from django.contrib import admin
from django.conf.urls.static import static
from django.contrib.staticfiles.urls import staticfiles_urlpatterns
urlpatterns = patterns('',
url(r'^messages/', include('privatemessages.urls')),
url(r'^admin/', include(admin.site.urls)),
)
urlpatterns += patterns('tornado_chat.views',
url(r'^$','log_in_chat' ),
url(r'^registration/$', 'reg'),
)
urlpatterns+=staticfiles_urlpatterns()
| [
"[email protected]"
] | |
7424ae8760e5af74ad03fa3cfef712ef7a6b3fd9 | 32ba9f1c35ae916d33b121daeeea8e1910a447d7 | /tools/autoRivet.py | 38e77ea55cc2928a7a2be0a6c84e7583fedcd208 | [
"MIT"
] | permissive | rituparna/glTools | 8b02fa2751e1b997f7a202c7df8a3dd3d3032722 | c512a96c20ba7a4ee93a123690b626bb408a8fcd | refs/heads/master | 2020-03-19T19:23:47.684580 | 2018-06-10T23:53:58 | 2018-06-10T23:53:58 | 136,853,456 | 0 | 0 | null | 2018-06-10T23:46:54 | 2018-06-10T23:46:53 | null | UTF-8 | Python | false | false | 9,798 | py | import maya.cmds as mc
import maya.OpenMaya as OpenMaya
import glTools.utils.base
import glTools.utils.component
import glTools.utils.mesh
import glTools.utils.stringUtils
import glTools.utils.surface
def autoRivet(createRivetTransform=True,suffix='rvt'):
'''
'''
# Get User Selection
sel = mc.ls(sl=1)
# Check Selection
if not len(sel) == 2:
raise Exception('Select object to rivet and then the target mesh!')
# Determine rivet object and mesh
rvtObj = sel[0]
mesh = sel[1]
prefix = glTools.utils.stringUtils.stripSuffix(rvtObj)
# Get rivet object position
pos = glTools.utils.base.getPosition(rvtObj)
pt = OpenMaya.MPoint(pos[0],pos[1],pos[2],1.0)
# Get closest face on mesh
faceId = glTools.utils.mesh.closestFace(mesh,pos)
# =========================
# - Determine Rivet Edges -
# =========================
edgeId1 = -1
edgeId2 = -1
# Create MItMeshEdge
edgeIter = glTools.utils.mesh.getMeshEdgeIter(mesh)
# Create edgeId MScriptUtil
edgeIdUtil = OpenMaya.MScriptUtil()
edgeIdUtil.createFromInt(0)
edgeIdPtr = edgeIdUtil.asIntPtr()
# Get face edges
faceEdges = glTools.utils.mesh.getFaceEdgeIndices(mesh,faceId)
# Get closest edge
maxDist = 9999.0
for edgeId in faceEdges:
edgeIter.setIndex(edgeId,edgeIdPtr)
edgePt = edgeIter.center(OpenMaya.MSpace.kWorld)
edgeDist = (edgePt - pt).length()
if edgeDist < maxDist:
edgeId1 = edgeId
maxDist = edgeDist
# Set current edge
edgeIter.setIndex(edgeId1,edgeIdPtr)
# Get opposing edge
faceEdges.remove(edgeId1)
for edgeId in faceEdges:
edgeId2 = edgeId
# Check edge connectivity
if not edgeIter.connectedToEdge(edgeId): break
# ========================
# - Create Utility Nodes -
# ========================
# Rivet Edge 1
edgeCrv1 = prefix+'_edge'+str(edgeId1)+'_rivet_curveFromMeshEdge'
if not mc.objExists(edgeCrv1):
edgeCrv1 = mc.createNode('curveFromMeshEdge',n=edgeCrv1)
mc.setAttr(edgeCrv1+'.edgeIndex[0]',edgeId1)
mc.connectAttr(mesh+'.worldMesh[0]',edgeCrv1+'.inputMesh',f=True)
# Rivet Edge 2
edgeCrv2 = prefix+'_edge'+str(edgeId2)+'_rivet_curveFromMeshEdge'
if not mc.objExists(edgeCrv2):
edgeCrv2 = mc.createNode('curveFromMeshEdge',n=edgeCrv2)
mc.setAttr(edgeCrv2+'.edgeIndex[0]',edgeId2)
mc.connectAttr(mesh+'.worldMesh[0]',edgeCrv2+'.inputMesh',f=True)
# Rivet Loft
rivetLoft = prefix+'_face'+str(faceId)+'_rivet_loft'
if not mc.objExists(rivetLoft):
rivetLoft = mc.createNode('loft',n=rivetLoft)
mc.connectAttr(edgeCrv1+'.outputCurve',rivetLoft+'.inputCurve[0]',f=True)
mc.connectAttr(edgeCrv2+'.outputCurve',rivetLoft+'.inputCurve[1]',f=True)
# Rivet Point on Surface Info
rivetPosi = prefix+'_face'+str(faceId)+'_rivet_pointOnSurfaceInfo'
rivetPosi = mc.createNode('pointOnSurfaceInfo',n=rivetPosi)
mc.connectAttr(rivetLoft+'.outputSurface',rivetPosi+'.inputSurface')
# ===========================
# - Get Rivet UV Parameter -
# ===========================
# Build Temp Surface
tmpSrfShape = mc.createNode('nurbsSurface')
tmpSrf = mc.listRelatives(tmpSrfShape,p=True,pa=True)[0]
mc.connectAttr(rivetLoft+'.outputSurface',tmpSrfShape+'.create',f=True)
# Get closest point on surface
uv = glTools.utils.surface.closestPoint(tmpSrf,pos)
# Set rivet parameter
mc.setAttr(rivetPosi+'.parameterU',uv[0])
mc.setAttr(rivetPosi+'.parameterV',uv[1])
# Delete Temp Surface
mc.delete(tmpSrf)
# ==========================
# - Attach Rivet Transform -
# ==========================
# Determine rivet transform
rvtTransform = rvtObj
if createRivetTransform: rvtTransform = mc.group(em=True,n=prefix+'_rvt')
# Connect rivet transform
mc.connectAttr(rivetPosi+'.position',rvtTransform+'.t',f=True)
# Parent to rivet transform
if createRivetTransform: mc.parent(rvtObj,rvtTransform)
# =================
# - Return Result -
# =================
return rvtTransform
def meshFaceConstraint(face='',transform='',orient=True,prefix=''):
'''
'''
# ==========
# - Checks -
# ==========
if not prefix: prefix = 'meshFaceConstraint'
if not face:
faceList = mc.filterExpand(sm=34)
if not faceList: raise Exception('No mesh face specified for constraint!')
face = faceList[0]
if not transform:
transformList = mc.ls(sl=True,type='transform')
if not transformList: transformList = mc.spaceLocator(n=prefix+'_locator')
transform = transformList[0]
# ======================
# - Get Face UV Center -
# ======================
# Get Face Details
mesh = mc.ls(face,o=True)[0]
faceId = glTools.utils.component.index(face)
# Get Mesh Face Function Set
uArray = OpenMaya.MFloatArray()
vArray = OpenMaya.MFloatArray()
faceIdUtil = OpenMaya.MScriptUtil()
faceIdUtil.createFromInt(0)
faceIdPtr = faceIdUtil.asIntPtr()
faceIt = glTools.utils.mesh.getMeshFaceIter(mesh)
faceIt.setIndex(faceId,faceIdPtr)
# Get UV Center
uvSet = mc.polyUVSet(mesh,q=True,cuv=True)
faceIt.getUVs(uArray,vArray)
uArray = list(uArray)
vArray = list(vArray)
uvCount = len(uArray)
u = 0.0
v = 0.0
for i in range(uvCount):
u += (uArray[i] / uvCount)
v += (vArray[i] / uvCount)
# =====================
# - Create Constraint -
# =====================
r = mc.getAttr(transform+'.r')[0]
meshCon = mc.pointOnPolyConstraint(mesh,transform,n=prefix+'_pointOnPolyConstraint')[0]
wtAlias = mc.pointOnPolyConstraint(meshCon,q=True,wal=True)[0]
mc.setAttr(meshCon+'.'+wtAlias.replace('W0','U0'),u)
mc.setAttr(meshCon+'.'+wtAlias.replace('W0','V0'),v)
# Orient
if not orient:
rxConn = mc.listConnections(transform+'.rx',s=True,d=False,p=True)[0]
mc.disconnectAttr(rxConn,transform+'.rx')
ryConn = mc.listConnections(transform+'.ry',s=True,d=False,p=True)[0]
mc.disconnectAttr(ryConn,transform+'.ry')
rzConn = mc.listConnections(transform+'.rz',s=True,d=False,p=True)[0]
mc.disconnectAttr(rzConn,transform+'.rz')
mc.setAttr(transform+'.r',*r)
# =================
# - Return Result -
# =================
return meshCon
def meshVertexConstraint(vertex='',transform='',orient=True,prefix=''):
'''
'''
# ==========
# - Checks -
# ==========
if not prefix: prefix = 'meshVertexConstraint'
if not vertex:
vtxList = mc.filterExpand(sm=31)
if not vtxList: raise Exception('No mesh vertex specified for constraint!')
vertex = vtxList[0]
if not transform:
transformList = mc.ls(sl=True,type='transform')
if not transformList: transformList = mc.spaceLocator(n=prefix+'_locator')
transform = transformList[0]
# =================
# - Get Vertex UV -
# =================
# Get Vertex Details
mesh = mc.ls(vertex,o=True)[0]
vtxId = glTools.utils.component.index(vertex)
# Get Mesh Vertex Function Set
uArray = OpenMaya.MFloatArray()
vArray = OpenMaya.MFloatArray()
faceArray = OpenMaya.MIntArray()
vtxIdUtil = OpenMaya.MScriptUtil()
vtxIdUtil.createFromInt(0)
vtxIdPtr = vtxIdUtil.asIntPtr()
vtxIt = glTools.utils.mesh.getMeshVertexIter(mesh)
vtxIt.setIndex(vtxId,vtxIdPtr)
# Get UV Center
uvSet = mc.polyUVSet(mesh,q=True,cuv=True)
vtxIt.getUVs(uArray,vArray,faceArray)
uArray = list(uArray)
vArray = list(vArray)
u = uArray[0]
v = vArray[0]
# =====================
# - Create Constraint -
# =====================
r = mc.getAttr(transform+'.r')[0]
meshCon = mc.pointOnPolyConstraint(mesh,transform,n=prefix+'_pointOnPolyConstraint')[0]
wtAlias = mc.pointOnPolyConstraint(meshCon,q=True,wal=True)[0]
mc.setAttr(meshCon+'.'+wtAlias.replace('W0','U0'),u)
mc.setAttr(meshCon+'.'+wtAlias.replace('W0','V0'),v)
# Orient
if not orient:
rxConn = mc.listConnections(transform+'.rx',s=True,d=False,p=True)[0]
mc.disconnectAttr(rxConn,transform+'.rx')
ryConn = mc.listConnections(transform+'.ry',s=True,d=False,p=True)[0]
mc.disconnectAttr(ryConn,transform+'.ry')
rzConn = mc.listConnections(transform+'.rz',s=True,d=False,p=True)[0]
mc.disconnectAttr(rzConn,transform+'.rz')
mc.setAttr(transform+'.r',*r)
# =================
# - Return Result -
# =================
return meshCon
def meshFaceConstraintList(faceList=[],transformList=[],orient=True,prefix=''):
'''
'''
# ==========
# - Checks -
# ==========
# Face List
if not faceList:
faceList = mc.filterExpand(sm=34)
if not faceList: raise Exception('No mesh face list specified for constraint!')
# Transform List
if not transformList:
transformList = ['' for vtx in vtxList]
# Vertex / Face list length
if not len(faceList) == len(transformList):
raise Exception('Face and Transform list length mis-match!')
# ======================
# - Create Constraints -
# ======================
constraintList = []
for i in range(len(faceList)):
mc.select(cl=True)
itPrefix = prefix+'_'+str(i)
constraintList.append(meshFaceConstraint(faceList[i],transformList[i],orient=orient,prefix=itPrefix))
# =================
# - Return Result -
# =================
return constraintList
def meshVertexConstraintList(vtxList=[],transformList=[],orient=True,prefix=''):
'''
'''
# ==========
# - Checks -
# ==========
# Vertex List
if not vtxList:
vtxList = mc.filterExpand(sm=31)
if not vtxList: raise Exception('No mesh vertex list specified for constraint!')
# Transform List
if not transformList:
transformList = ['' for vtx in vtxList]
# Vertex / Transform list length
if not len(vtxList) == len(transformList):
raise Exception('Vertex and Transform list length mis-match!')
# ======================
# - Create Constraints -
# ======================
constraintList = []
for i in range(len(vtxList)):
mc.select(cl=True)
itPrefix = prefix+'_'+str(i)
constraintList.append(meshVertexConstraint(vtxList[i],transformList[i],orient=orient,prefix=itPrefix))
# =================
# - Return Result -
# =================
return constraintList
| [
"[email protected]"
] | |
d9f4e1dbccdff6ef654c8b6565d4707c7371f01e | b86eadecf9291fd0daf98f5742103e35e3689302 | /examples/noisychannel/rerank.py | 13036926e0d99518dabd36fa91e8b8f7de4b1140 | [
"MIT"
] | permissive | fe1ixxu/fairseq-for-en-ar | 1aaf47137b21caf17dac347c894c8a4f698a4d0b | 9477ffa2ac3a4fda99c596a4741f394e77aff9ab | refs/heads/master | 2023-07-05T03:33:08.358307 | 2020-12-09T17:04:41 | 2020-12-09T17:04:41 | 313,188,203 | 0 | 1 | MIT | 2021-01-06T21:08:16 | 2020-11-16T04:09:47 | Python | UTF-8 | Python | false | false | 14,069 | py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import math
from multiprocessing import Pool
import numpy as np
from fairseq import options
from fairseq.data import dictionary
from fairseq.scoring import bleu
from . import (
rerank_generate,
rerank_options,
rerank_score_bw,
rerank_score_lm,
rerank_utils,
)
def score_target_hypo(
args, a, b, c, lenpen, target_outfile, hypo_outfile, write_hypos, normalize
):
print("lenpen", lenpen, "weight1", a, "weight2", b, "weight3", c)
gen_output_lst, bitext1_lst, bitext2_lst, lm_res_lst = load_score_files(args)
dict = dictionary.Dictionary()
scorer = scorer = bleu.Scorer(
bleu.BleuConfig(
pad=dict.pad(),
eos=dict.eos(),
unk=dict.unk(),
)
)
ordered_hypos = {}
ordered_targets = {}
for shard_id in range(len(bitext1_lst)):
bitext1 = bitext1_lst[shard_id]
bitext2 = bitext2_lst[shard_id]
gen_output = gen_output_lst[shard_id]
lm_res = lm_res_lst[shard_id]
total = len(bitext1.rescore_source.keys())
source_lst = []
hypo_lst = []
score_lst = []
reference_lst = []
j = 1
best_score = -math.inf
for i in range(total):
# length is measured in terms of words, not bpe tokens, since models may not share the same bpe
target_len = len(bitext1.rescore_hypo[i].split())
if lm_res is not None:
lm_score = lm_res.score[i]
else:
lm_score = 0
if bitext2 is not None:
bitext2_score = bitext2.rescore_score[i]
bitext2_backwards = bitext2.backwards
else:
bitext2_score = None
bitext2_backwards = None
score = rerank_utils.get_score(
a,
b,
c,
target_len,
bitext1.rescore_score[i],
bitext2_score,
lm_score=lm_score,
lenpen=lenpen,
src_len=bitext1.source_lengths[i],
tgt_len=bitext1.target_lengths[i],
bitext1_backwards=bitext1.backwards,
bitext2_backwards=bitext2_backwards,
normalize=normalize,
)
if score > best_score:
best_score = score
best_hypo = bitext1.rescore_hypo[i]
if j == gen_output.num_hypos[i] or j == args.num_rescore:
j = 1
hypo_lst.append(best_hypo)
score_lst.append(best_score)
source_lst.append(bitext1.rescore_source[i])
reference_lst.append(bitext1.rescore_target[i])
best_score = -math.inf
best_hypo = ""
else:
j += 1
gen_keys = list(sorted(gen_output.no_bpe_target.keys()))
for key in range(len(gen_keys)):
if args.prefix_len is None:
assert hypo_lst[key] in gen_output.no_bpe_hypo[gen_keys[key]], (
"pred and rescore hypo mismatch: i: "
+ str(key)
+ ", "
+ str(hypo_lst[key])
+ str(gen_keys[key])
+ str(gen_output.no_bpe_hypo[key])
)
sys_tok = dict.encode_line(hypo_lst[key])
ref_tok = dict.encode_line(gen_output.no_bpe_target[gen_keys[key]])
scorer.add(ref_tok, sys_tok)
else:
full_hypo = rerank_utils.get_full_from_prefix(
hypo_lst[key], gen_output.no_bpe_hypo[gen_keys[key]]
)
sys_tok = dict.encode_line(full_hypo)
ref_tok = dict.encode_line(gen_output.no_bpe_target[gen_keys[key]])
scorer.add(ref_tok, sys_tok)
# if only one set of hyper parameters is provided, write the predictions to a file
if write_hypos:
# recover the orinal ids from n best list generation
for key in range(len(gen_output.no_bpe_target)):
if args.prefix_len is None:
assert hypo_lst[key] in gen_output.no_bpe_hypo[gen_keys[key]], (
"pred and rescore hypo mismatch:"
+ "i:"
+ str(key)
+ str(hypo_lst[key])
+ str(gen_output.no_bpe_hypo[key])
)
ordered_hypos[gen_keys[key]] = hypo_lst[key]
ordered_targets[gen_keys[key]] = gen_output.no_bpe_target[
gen_keys[key]
]
else:
full_hypo = rerank_utils.get_full_from_prefix(
hypo_lst[key], gen_output.no_bpe_hypo[gen_keys[key]]
)
ordered_hypos[gen_keys[key]] = full_hypo
ordered_targets[gen_keys[key]] = gen_output.no_bpe_target[
gen_keys[key]
]
# write the hypos in the original order from nbest list generation
if args.num_shards == (len(bitext1_lst)):
with open(target_outfile, "w") as t:
with open(hypo_outfile, "w") as h:
for key in range(len(ordered_hypos)):
t.write(ordered_targets[key])
h.write(ordered_hypos[key])
res = scorer.result_string(4)
if write_hypos:
print(res)
score = rerank_utils.parse_bleu_scoring(res)
return score
def match_target_hypo(args, target_outfile, hypo_outfile):
"""combine scores from the LM and bitext models, and write the top scoring hypothesis to a file"""
if len(args.weight1) == 1:
res = score_target_hypo(
args,
args.weight1[0],
args.weight2[0],
args.weight3[0],
args.lenpen[0],
target_outfile,
hypo_outfile,
True,
args.normalize,
)
rerank_scores = [res]
else:
print("launching pool")
with Pool(32) as p:
rerank_scores = p.starmap(
score_target_hypo,
[
(
args,
args.weight1[i],
args.weight2[i],
args.weight3[i],
args.lenpen[i],
target_outfile,
hypo_outfile,
False,
args.normalize,
)
for i in range(len(args.weight1))
],
)
if len(rerank_scores) > 1:
best_index = np.argmax(rerank_scores)
best_score = rerank_scores[best_index]
print("best score", best_score)
print("best lenpen", args.lenpen[best_index])
print("best weight1", args.weight1[best_index])
print("best weight2", args.weight2[best_index])
print("best weight3", args.weight3[best_index])
return (
args.lenpen[best_index],
args.weight1[best_index],
args.weight2[best_index],
args.weight3[best_index],
best_score,
)
else:
return (
args.lenpen[0],
args.weight1[0],
args.weight2[0],
args.weight3[0],
rerank_scores[0],
)
def load_score_files(args):
if args.all_shards:
shard_ids = list(range(args.num_shards))
else:
shard_ids = [args.shard_id]
gen_output_lst = []
bitext1_lst = []
bitext2_lst = []
lm_res1_lst = []
for shard_id in shard_ids:
using_nbest = args.nbest_list is not None
(
pre_gen,
left_to_right_preprocessed_dir,
right_to_left_preprocessed_dir,
backwards_preprocessed_dir,
lm_preprocessed_dir,
) = rerank_utils.get_directories(
args.data_dir_name,
args.num_rescore,
args.gen_subset,
args.gen_model_name,
shard_id,
args.num_shards,
args.sampling,
args.prefix_len,
args.target_prefix_frac,
args.source_prefix_frac,
)
rerank1_is_gen = (
args.gen_model == args.score_model1 and args.source_prefix_frac is None
)
rerank2_is_gen = (
args.gen_model == args.score_model2 and args.source_prefix_frac is None
)
score1_file = rerank_utils.rescore_file_name(
pre_gen,
args.prefix_len,
args.model1_name,
target_prefix_frac=args.target_prefix_frac,
source_prefix_frac=args.source_prefix_frac,
backwards=args.backwards1,
)
if args.score_model2 is not None:
score2_file = rerank_utils.rescore_file_name(
pre_gen,
args.prefix_len,
args.model2_name,
target_prefix_frac=args.target_prefix_frac,
source_prefix_frac=args.source_prefix_frac,
backwards=args.backwards2,
)
if args.language_model is not None:
lm_score_file = rerank_utils.rescore_file_name(
pre_gen, args.prefix_len, args.lm_name, lm_file=True
)
# get gen output
predictions_bpe_file = pre_gen + "/generate_output_bpe.txt"
if using_nbest:
print("Using predefined n-best list from interactive.py")
predictions_bpe_file = args.nbest_list
gen_output = rerank_utils.BitextOutputFromGen(
predictions_bpe_file,
bpe_symbol=args.remove_bpe,
nbest=using_nbest,
prefix_len=args.prefix_len,
target_prefix_frac=args.target_prefix_frac,
)
if rerank1_is_gen:
bitext1 = gen_output
else:
bitext1 = rerank_utils.BitextOutput(
score1_file,
args.backwards1,
args.right_to_left1,
args.remove_bpe,
args.prefix_len,
args.target_prefix_frac,
args.source_prefix_frac,
)
if args.score_model2 is not None or args.nbest_list is not None:
if rerank2_is_gen:
bitext2 = gen_output
else:
bitext2 = rerank_utils.BitextOutput(
score2_file,
args.backwards2,
args.right_to_left2,
args.remove_bpe,
args.prefix_len,
args.target_prefix_frac,
args.source_prefix_frac,
)
assert (
bitext2.source_lengths == bitext1.source_lengths
), "source lengths for rescoring models do not match"
assert (
bitext2.target_lengths == bitext1.target_lengths
), "target lengths for rescoring models do not match"
else:
if args.diff_bpe:
assert args.score_model2 is None
bitext2 = gen_output
else:
bitext2 = None
if args.language_model is not None:
lm_res1 = rerank_utils.LMOutput(
lm_score_file,
args.lm_dict,
args.prefix_len,
args.remove_bpe,
args.target_prefix_frac,
)
else:
lm_res1 = None
gen_output_lst.append(gen_output)
bitext1_lst.append(bitext1)
bitext2_lst.append(bitext2)
lm_res1_lst.append(lm_res1)
return gen_output_lst, bitext1_lst, bitext2_lst, lm_res1_lst
def rerank(args):
if type(args.lenpen) is not list:
args.lenpen = [args.lenpen]
if type(args.weight1) is not list:
args.weight1 = [args.weight1]
if type(args.weight2) is not list:
args.weight2 = [args.weight2]
if type(args.weight3) is not list:
args.weight3 = [args.weight3]
if args.all_shards:
shard_ids = list(range(args.num_shards))
else:
shard_ids = [args.shard_id]
for shard_id in shard_ids:
(
pre_gen,
left_to_right_preprocessed_dir,
right_to_left_preprocessed_dir,
backwards_preprocessed_dir,
lm_preprocessed_dir,
) = rerank_utils.get_directories(
args.data_dir_name,
args.num_rescore,
args.gen_subset,
args.gen_model_name,
shard_id,
args.num_shards,
args.sampling,
args.prefix_len,
args.target_prefix_frac,
args.source_prefix_frac,
)
rerank_generate.gen_and_reprocess_nbest(args)
rerank_score_bw.score_bw(args)
rerank_score_lm.score_lm(args)
if args.write_hypos is None:
write_targets = pre_gen + "/matched_targets"
write_hypos = pre_gen + "/matched_hypos"
else:
write_targets = args.write_hypos + "_targets" + args.gen_subset
write_hypos = args.write_hypos + "_hypos" + args.gen_subset
if args.all_shards:
write_targets += "_all_shards"
write_hypos += "_all_shards"
(
best_lenpen,
best_weight1,
best_weight2,
best_weight3,
best_score,
) = match_target_hypo(args, write_targets, write_hypos)
return best_lenpen, best_weight1, best_weight2, best_weight3, best_score
def cli_main():
parser = rerank_options.get_reranking_parser()
args = options.parse_args_and_arch(parser)
rerank(args)
if __name__ == "__main__":
cli_main()
| [
"[email protected]"
] | |
54addb95c6462365f87d2f1983a65aaf4653ee16 | c9d99f8a8e2f41bba4605c6564329eaa35d93857 | /day1/lingxing.py | 264fb96a8b237e131033f33e2c319af4d61d427e | [] | no_license | rhflocef521/myproject | c86be05b5029ffe9e8a11ae660a3205253a51449 | 996ddee17b86f8378a4ebeb4ca14c13f66e3900f | refs/heads/master | 2020-04-29T06:52:50.709035 | 2019-03-16T06:11:51 | 2019-03-16T06:11:51 | 175,928,043 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 495 | py | if __name__ == '__main__':
h=int(input("请输入行数:"))
n9=range(1,2*h)
for i in n9[0:h]:
for j in n9[0:h-i]:
print(" ",end="")
for j in n9[0:2*i-1]:
print("*",end="")
print()
for i in n9[0:h-1]:
for j in n9[0:i]:
print(" ",end="")
for j in n9[2*(h-i)-1:0:-1]:
print("*",end="")
print()
for i in range(h+1):
print(" "*(h-i),end="")
print(" *"*i,sep=" ")
| [
"[email protected]"
] | |
1e35a3a9015a41b2e9efe5869fe8c89e96675507 | c5b9f0fabffb6b2d13c6e350c8187a922709ac60 | /devel/.private/pal_interaction_msgs/lib/python2.7/dist-packages/pal_interaction_msgs/msg/_AudioPlayActionFeedback.py | a169c9fb26b06ceeec9384b82fc172628fb734df | [] | no_license | MohamedEhabHafez/Sorting_Aruco_Markers | cae079fdce4a14561f5e092051771d299b06e789 | 0f820921c9f42b39867565441ed6ea108663ef6c | refs/heads/master | 2020-12-09T02:43:00.731223 | 2020-01-15T17:31:29 | 2020-01-15T17:31:29 | 233,154,293 | 0 | 0 | null | 2020-10-13T18:46:44 | 2020-01-11T00:41:38 | Makefile | UTF-8 | Python | false | false | 12,092 | py | # This Python file uses the following encoding: utf-8
"""autogenerated by genpy from pal_interaction_msgs/AudioPlayActionFeedback.msg. Do not edit."""
import sys
python3 = True if sys.hexversion > 0x03000000 else False
import genpy
import struct
import pal_interaction_msgs.msg
import genpy
import actionlib_msgs.msg
import std_msgs.msg
class AudioPlayActionFeedback(genpy.Message):
_md5sum = "aae20e09065c3809e8a8e87c4c8953fd"
_type = "pal_interaction_msgs/AudioPlayActionFeedback"
_has_header = True #flag to mark the presence of a Header object
_full_text = """# ====== DO NOT MODIFY! AUTOGENERATED FROM AN ACTION DEFINITION ======
Header header
actionlib_msgs/GoalStatus status
AudioPlayFeedback feedback
================================================================================
MSG: std_msgs/Header
# Standard metadata for higher-level stamped data types.
# This is generally used to communicate timestamped data
# in a particular coordinate frame.
#
# sequence ID: consecutively increasing ID
uint32 seq
#Two-integer timestamp that is expressed as:
# * stamp.sec: seconds (stamp_secs) since epoch (in Python the variable is called 'secs')
# * stamp.nsec: nanoseconds since stamp_secs (in Python the variable is called 'nsecs')
# time-handling sugar is provided by the client library
time stamp
#Frame this data is associated with
# 0: no frame
# 1: global frame
string frame_id
================================================================================
MSG: actionlib_msgs/GoalStatus
GoalID goal_id
uint8 status
uint8 PENDING = 0 # The goal has yet to be processed by the action server
uint8 ACTIVE = 1 # The goal is currently being processed by the action server
uint8 PREEMPTED = 2 # The goal received a cancel request after it started executing
# and has since completed its execution (Terminal State)
uint8 SUCCEEDED = 3 # The goal was achieved successfully by the action server (Terminal State)
uint8 ABORTED = 4 # The goal was aborted during execution by the action server due
# to some failure (Terminal State)
uint8 REJECTED = 5 # The goal was rejected by the action server without being processed,
# because the goal was unattainable or invalid (Terminal State)
uint8 PREEMPTING = 6 # The goal received a cancel request after it started executing
# and has not yet completed execution
uint8 RECALLING = 7 # The goal received a cancel request before it started executing,
# but the action server has not yet confirmed that the goal is canceled
uint8 RECALLED = 8 # The goal received a cancel request before it started executing
# and was successfully cancelled (Terminal State)
uint8 LOST = 9 # An action client can determine that a goal is LOST. This should not be
# sent over the wire by an action server
#Allow for the user to associate a string with GoalStatus for debugging
string text
================================================================================
MSG: actionlib_msgs/GoalID
# The stamp should store the time at which this goal was requested.
# It is used by an action server when it tries to preempt all
# goals that were requested before a certain time
time stamp
# The id provides a way to associate feedback and
# result message with specific goal requests. The id
# specified must be unique.
string id
================================================================================
MSG: pal_interaction_msgs/AudioPlayFeedback
# ====== DO NOT MODIFY! AUTOGENERATED FROM AN ACTION DEFINITION ======
"""
__slots__ = ['header','status','feedback']
_slot_types = ['std_msgs/Header','actionlib_msgs/GoalStatus','pal_interaction_msgs/AudioPlayFeedback']
def __init__(self, *args, **kwds):
"""
Constructor. Any message fields that are implicitly/explicitly
set to None will be assigned a default value. The recommend
use is keyword arguments as this is more robust to future message
changes. You cannot mix in-order arguments and keyword arguments.
The available fields are:
header,status,feedback
:param args: complete set of field values, in .msg order
:param kwds: use keyword arguments corresponding to message field names
to set specific fields.
"""
if args or kwds:
super(AudioPlayActionFeedback, self).__init__(*args, **kwds)
#message fields cannot be None, assign default values for those that are
if self.header is None:
self.header = std_msgs.msg.Header()
if self.status is None:
self.status = actionlib_msgs.msg.GoalStatus()
if self.feedback is None:
self.feedback = pal_interaction_msgs.msg.AudioPlayFeedback()
else:
self.header = std_msgs.msg.Header()
self.status = actionlib_msgs.msg.GoalStatus()
self.feedback = pal_interaction_msgs.msg.AudioPlayFeedback()
def _get_types(self):
"""
internal API method
"""
return self._slot_types
def serialize(self, buff):
"""
serialize message into buffer
:param buff: buffer, ``StringIO``
"""
try:
_x = self
buff.write(_get_struct_3I().pack(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs))
_x = self.header.frame_id
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
_x = self
buff.write(_get_struct_2I().pack(_x.status.goal_id.stamp.secs, _x.status.goal_id.stamp.nsecs))
_x = self.status.goal_id.id
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
buff.write(_get_struct_B().pack(self.status.status))
_x = self.status.text
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize(self, str):
"""
unpack serialized message in str into this message instance
:param str: byte array of serialized message, ``str``
"""
try:
if self.header is None:
self.header = std_msgs.msg.Header()
if self.status is None:
self.status = actionlib_msgs.msg.GoalStatus()
if self.feedback is None:
self.feedback = pal_interaction_msgs.msg.AudioPlayFeedback()
end = 0
_x = self
start = end
end += 12
(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs,) = _get_struct_3I().unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.header.frame_id = str[start:end].decode('utf-8')
else:
self.header.frame_id = str[start:end]
_x = self
start = end
end += 8
(_x.status.goal_id.stamp.secs, _x.status.goal_id.stamp.nsecs,) = _get_struct_2I().unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.status.goal_id.id = str[start:end].decode('utf-8')
else:
self.status.goal_id.id = str[start:end]
start = end
end += 1
(self.status.status,) = _get_struct_B().unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.status.text = str[start:end].decode('utf-8')
else:
self.status.text = str[start:end]
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
def serialize_numpy(self, buff, numpy):
"""
serialize message with numpy array types into buffer
:param buff: buffer, ``StringIO``
:param numpy: numpy python module
"""
try:
_x = self
buff.write(_get_struct_3I().pack(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs))
_x = self.header.frame_id
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
_x = self
buff.write(_get_struct_2I().pack(_x.status.goal_id.stamp.secs, _x.status.goal_id.stamp.nsecs))
_x = self.status.goal_id.id
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
buff.write(_get_struct_B().pack(self.status.status))
_x = self.status.text
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize_numpy(self, str, numpy):
"""
unpack serialized message in str into this message instance using numpy for array types
:param str: byte array of serialized message, ``str``
:param numpy: numpy python module
"""
try:
if self.header is None:
self.header = std_msgs.msg.Header()
if self.status is None:
self.status = actionlib_msgs.msg.GoalStatus()
if self.feedback is None:
self.feedback = pal_interaction_msgs.msg.AudioPlayFeedback()
end = 0
_x = self
start = end
end += 12
(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs,) = _get_struct_3I().unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.header.frame_id = str[start:end].decode('utf-8')
else:
self.header.frame_id = str[start:end]
_x = self
start = end
end += 8
(_x.status.goal_id.stamp.secs, _x.status.goal_id.stamp.nsecs,) = _get_struct_2I().unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.status.goal_id.id = str[start:end].decode('utf-8')
else:
self.status.goal_id.id = str[start:end]
start = end
end += 1
(self.status.status,) = _get_struct_B().unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.status.text = str[start:end].decode('utf-8')
else:
self.status.text = str[start:end]
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
_struct_I = genpy.struct_I
def _get_struct_I():
global _struct_I
return _struct_I
_struct_3I = None
def _get_struct_3I():
global _struct_3I
if _struct_3I is None:
_struct_3I = struct.Struct("<3I")
return _struct_3I
_struct_B = None
def _get_struct_B():
global _struct_B
if _struct_B is None:
_struct_B = struct.Struct("<B")
return _struct_B
_struct_2I = None
def _get_struct_2I():
global _struct_2I
if _struct_2I is None:
_struct_2I = struct.Struct("<2I")
return _struct_2I
| [
"[email protected]"
] | |
19c8b2f32b652f4db16b8edc0ecdc3cdbd64a368 | 404943d3a02d4e6331a739b0cbdc58fd2f72f080 | /hgntransformers/modeling_tf_auto.py | 6712e5dcda8bb454cff73e5da37a0c797a46d683 | [] | no_license | enterpriseih/sentgraph | d62319d4cfc1d787b947c118586f99f49cf8d271 | 32ba2c94d8a2736881327037ce33801ed157bf47 | refs/heads/main | 2023-08-19T02:58:12.095142 | 2021-05-28T17:34:05 | 2021-05-28T17:34:05 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 58,498 | py | # coding=utf-8
# Copyright 2018 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Auto Model class. """
import logging
from .configuration_auto import (
BertConfig,
CTRLConfig,
DistilBertConfig,
GPT2Config,
OpenAIGPTConfig,
RobertaConfig,
TransfoXLConfig,
XLMConfig,
XLNetConfig,
)
from .modeling_tf_albert import (
TF_ALBERT_PRETRAINED_MODEL_ARCHIVE_MAP,
TFAlbertForMaskedLM,
TFAlbertForSequenceClassification,
TFAlbertModel,
)
from .modeling_tf_bert import (
TF_BERT_PRETRAINED_MODEL_ARCHIVE_MAP,
TFBertForMaskedLM,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertForTokenClassification,
TFBertModel,
)
from .modeling_tf_ctrl import TF_CTRL_PRETRAINED_MODEL_ARCHIVE_MAP, TFCTRLLMHeadModel, TFCTRLModel
from .modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_MAP,
TFDistilBertForMaskedLM,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertModel,
)
from .modeling_tf_gpt2 import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_MAP, TFGPT2LMHeadModel, TFGPT2Model
from .modeling_tf_openai import TF_OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_MAP, TFOpenAIGPTLMHeadModel, TFOpenAIGPTModel
from .modeling_tf_roberta import (
TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_MAP,
TFRobertaForMaskedLM,
TFRobertaForSequenceClassification,
TFRobertaForTokenClassification,
TFRobertaModel,
)
from .modeling_tf_t5 import TF_T5_PRETRAINED_MODEL_ARCHIVE_MAP, TFT5Model, TFT5WithLMHeadModel
from .modeling_tf_transfo_xl import (
TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_MAP,
TFTransfoXLLMHeadModel,
TFTransfoXLModel,
)
from .modeling_tf_xlm import (
TF_XLM_PRETRAINED_MODEL_ARCHIVE_MAP,
TFXLMForQuestionAnsweringSimple,
TFXLMForSequenceClassification,
TFXLMModel,
TFXLMWithLMHeadModel,
)
from .modeling_tf_xlnet import (
TF_XLNET_PRETRAINED_MODEL_ARCHIVE_MAP,
TFXLNetForQuestionAnsweringSimple,
TFXLNetForSequenceClassification,
TFXLNetForTokenClassification,
TFXLNetLMHeadModel,
TFXLNetModel,
)
logger = logging.getLogger(__name__)
TF_ALL_PRETRAINED_MODEL_ARCHIVE_MAP = dict(
(key, value)
for pretrained_map in [
TF_BERT_PRETRAINED_MODEL_ARCHIVE_MAP,
TF_OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_MAP,
TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_MAP,
TF_GPT2_PRETRAINED_MODEL_ARCHIVE_MAP,
TF_CTRL_PRETRAINED_MODEL_ARCHIVE_MAP,
TF_XLNET_PRETRAINED_MODEL_ARCHIVE_MAP,
TF_XLM_PRETRAINED_MODEL_ARCHIVE_MAP,
TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_MAP,
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_MAP,
TF_ALBERT_PRETRAINED_MODEL_ARCHIVE_MAP,
TF_T5_PRETRAINED_MODEL_ARCHIVE_MAP,
]
for key, value, in pretrained_map.items()
)
class TFAutoModel(object):
r"""
:class:`~hgntransformers.TFAutoModel` is a generic model class
that will be instantiated as one of the base model classes of the library
when created with the `TFAutoModel.from_pretrained(pretrained_model_name_or_path)`
class method.
The `from_pretrained()` method takes care of returning the correct model class instance
using pattern matching on the `pretrained_model_name_or_path` string.
The base model class to instantiate is selected as the first pattern matching
in the `pretrained_model_name_or_path` string (in the following order):
- contains `t5`: TFT5Model (T5 model)
- contains `distilbert`: TFDistilBertModel (DistilBERT model)
- contains `roberta`: TFRobertaModel (RoBERTa model)
- contains `bert`: TFBertModel (Bert model)
- contains `openai-gpt`: TFOpenAIGPTModel (OpenAI GPT model)
- contains `gpt2`: TFGPT2Model (OpenAI GPT-2 model)
- contains `transfo-xl`: TFTransfoXLModel (Transformer-XL model)
- contains `xlnet`: TFXLNetModel (XLNet model)
- contains `xlm`: TFXLMModel (XLM model)
- contains `ctrl`: TFCTRLModel (CTRL model)
This class cannot be instantiated using `__init__()` (throws an error).
"""
def __init__(self):
raise EnvironmentError(
"TFAutoModel is designed to be instantiated "
"using the `TFAutoModel.from_pretrained(pretrained_model_name_or_path)` or "
"`TFAutoModel.from_config(config)` methods."
)
@classmethod
def from_config(cls, config):
r""" Instantiates one of the base model classes of the library
from a configuration.
config: (`optional`) instance of a class derived from :class:`~hgntransformers.PretrainedConfig`:
The model class to instantiate is selected based on the configuration class:
- isInstance of `distilbert` configuration class: TFDistilBertModel (DistilBERT model)
- isInstance of `roberta` configuration class: TFRobertaModel (RoBERTa model)
- isInstance of `bert` configuration class: TFBertModel (Bert model)
- isInstance of `openai-gpt` configuration class: TFOpenAIGPTModel (OpenAI GPT model)
- isInstance of `gpt2` configuration class: TFGPT2Model (OpenAI GPT-2 model)
- isInstance of `ctrl` configuration class: TFCTRLModel (Salesforce CTRL model)
- isInstance of `transfo-xl` configuration class: TFTransfoXLModel (Transformer-XL model)
- isInstance of `xlnet` configuration class: TFXLNetModel (XLNet model)
- isInstance of `xlm` configuration class: TFXLMModel (XLM model)
Examples::
config = BertConfig.from_pretrained('bert-base-uncased') # Download configuration from S3 and cache.
model = TFAutoModel.from_config(config) # E.g. model was saved using `save_pretrained('./test/saved_model/')`
"""
if isinstance(config, DistilBertConfig):
return TFDistilBertModel(config)
elif isinstance(config, RobertaConfig):
return TFRobertaModel(config)
elif isinstance(config, BertConfig):
return TFBertModel(config)
elif isinstance(config, OpenAIGPTConfig):
return TFOpenAIGPTModel(config)
elif isinstance(config, GPT2Config):
return TFGPT2Model(config)
elif isinstance(config, TransfoXLConfig):
return TFTransfoXLModel(config)
elif isinstance(config, XLNetConfig):
return TFXLNetModel(config)
elif isinstance(config, XLMConfig):
return TFXLMModel(config)
elif isinstance(config, CTRLConfig):
return TFCTRLModel(config)
raise ValueError("Unrecognized configuration class {}".format(config))
@classmethod
def from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs):
r""" Instantiates one of the base model classes of the library
from a pre-trained model configuration.
The model class to instantiate is selected as the first pattern matching
in the `pretrained_model_name_or_path` string (in the following order):
- contains `t5`: TFT5Model (T5 model)
- contains `distilbert`: TFDistilBertModel (DistilBERT model)
- contains `roberta`: TFRobertaModel (RoBERTa model)
- contains `bert`: TFTFBertModel (Bert model)
- contains `openai-gpt`: TFOpenAIGPTModel (OpenAI GPT model)
- contains `gpt2`: TFGPT2Model (OpenAI GPT-2 model)
- contains `transfo-xl`: TFTransfoXLModel (Transformer-XL model)
- contains `xlnet`: TFXLNetModel (XLNet model)
- contains `ctrl`: TFCTRLModel (CTRL model)
Params:
pretrained_model_name_or_path: either:
- a string with the `shortcut name` of a pre-trained model to load from cache or download, e.g.: ``bert-base-uncased``.
- a string with the `identifier name` of a pre-trained model that was user-uploaded to our S3, e.g.: ``dbmdz/bert-base-german-cased``.
- a path to a `directory` containing model weights saved using :func:`~hgntransformers.PreTrainedModel.save_pretrained`, e.g.: ``./my_model_directory/``.
- a path or url to a `PyTorch, TF 1.X or TF 2.0 checkpoint file` (e.g. `./tf_model/model.ckpt.index`). In the case of a PyTorch checkpoint, ``from_pt`` should be set to True and a configuration object should be provided as ``config`` argument.
from_pt: (`Optional`) Boolean
Set to True if the Checkpoint is a PyTorch checkpoint.
model_args: (`optional`) Sequence of positional arguments:
All remaning positional arguments will be passed to the underlying model's ``__init__`` method
config: (`optional`) instance of a class derived from :class:`~hgntransformers.PretrainedConfig`:
Configuration for the model to use instead of an automatically loaded configuation. Configuration can be automatically loaded when:
- the model is a model provided by the library (loaded with the ``shortcut-name`` string of a pretrained model), or
- the model was saved using :func:`~hgntransformers.PreTrainedModel.save_pretrained` and is reloaded by suppling the save directory.
- the model is loaded by suppling a local directory as ``pretrained_model_name_or_path`` and a configuration JSON file named `config.json` is found in the directory.
state_dict: (`optional`) dict:
an optional state dictionnary for the model to use instead of a state dictionary loaded from saved weights file.
This option can be used if you want to create a model from a pretrained configuration but load your own weights.
In this case though, you should check if using :func:`~hgntransformers.PreTrainedModel.save_pretrained` and :func:`~hgntransformers.PreTrainedModel.from_pretrained` is not a simpler option.
cache_dir: (`optional`) string:
Path to a directory in which a downloaded pre-trained model
configuration should be cached if the standard cache should not be used.
force_download: (`optional`) boolean, default False:
Force to (re-)download the model weights and configuration files and override the cached versions if they exists.
resume_download: (`optional`) boolean, default False:
Do not delete incompletely recieved file. Attempt to resume the download if such a file exists.
proxies: (`optional`) dict, default None:
A dictionary of proxy servers to use by protocol or endpoint, e.g.: {'http': 'foo.bar:3128', 'http://hostname': 'foo.bar:4012'}.
The proxies are used on each request.
output_loading_info: (`optional`) boolean:
Set to ``True`` to also return a dictionnary containing missing keys, unexpected keys and error messages.
kwargs: (`optional`) Remaining dictionary of keyword arguments:
Can be used to update the configuration object (after it being loaded) and initiate the model. (e.g. ``output_attention=True``). Behave differently depending on whether a `config` is provided or automatically loaded:
- If a configuration is provided with ``config``, ``**kwargs`` will be directly passed to the underlying model's ``__init__`` method (we assume all relevant updates to the configuration have already been done)
- If a configuration is not provided, ``kwargs`` will be first passed to the configuration class initialization function (:func:`~hgntransformers.PretrainedConfig.from_pretrained`). Each key of ``kwargs`` that corresponds to a configuration attribute will be used to override said attribute with the supplied ``kwargs`` value. Remaining keys that do not correspond to any configuration attribute will be passed to the underlying model's ``__init__`` function.
Examples::
model = TFAutoModel.from_pretrained('bert-base-uncased') # Download model and configuration from S3 and cache.
model = TFAutoModel.from_pretrained('./test/bert_model/') # E.g. model was saved using `save_pretrained('./test/saved_model/')`
model = TFAutoModel.from_pretrained('bert-base-uncased', output_attention=True) # Update configuration during loading
assert model.config.output_attention == True
# Loading from a TF checkpoint file instead of a PyTorch model (slower)
config = AutoConfig.from_json_file('./tf_model/bert_tf_model_config.json')
model = TFAutoModel.from_pretrained('./pt_model/bert_pytorch_model.bin', from_pt=True, config=config)
"""
if "t5" in pretrained_model_name_or_path:
return TFT5Model.from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs)
elif "distilbert" in pretrained_model_name_or_path:
return TFDistilBertModel.from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs)
elif "albert" in pretrained_model_name_or_path:
return TFAlbertModel.from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs)
elif "roberta" in pretrained_model_name_or_path:
return TFRobertaModel.from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs)
elif "bert" in pretrained_model_name_or_path:
return TFBertModel.from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs)
elif "openai-gpt" in pretrained_model_name_or_path:
return TFOpenAIGPTModel.from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs)
elif "gpt2" in pretrained_model_name_or_path:
return TFGPT2Model.from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs)
elif "transfo-xl" in pretrained_model_name_or_path:
return TFTransfoXLModel.from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs)
elif "xlnet" in pretrained_model_name_or_path:
return TFXLNetModel.from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs)
elif "xlm" in pretrained_model_name_or_path:
return TFXLMModel.from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs)
elif "ctrl" in pretrained_model_name_or_path:
return TFCTRLModel.from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs)
raise ValueError(
"Unrecognized model identifier in {}. Should contains one of "
"'distilbert', 'bert', 'openai-gpt', 'gpt2', 'transfo-xl', 'xlnet', "
"'xlm', 'roberta', 'ctrl'".format(pretrained_model_name_or_path)
)
class TFAutoModelWithLMHead(object):
r"""
:class:`~hgntransformers.TFAutoModelWithLMHead` is a generic model class
that will be instantiated as one of the language modeling model classes of the library
when created with the `TFAutoModelWithLMHead.from_pretrained(pretrained_model_name_or_path)`
class method.
The `from_pretrained()` method takes care of returning the correct model class instance
using pattern matching on the `pretrained_model_name_or_path` string.
The model class to instantiate is selected as the first pattern matching
in the `pretrained_model_name_or_path` string (in the following order):
- contains `t5`: TFT5WithLMHeadModel (T5 model)
- contains `distilbert`: TFDistilBertForMaskedLM (DistilBERT model)
- contains `roberta`: TFRobertaForMaskedLM (RoBERTa model)
- contains `bert`: TFBertForMaskedLM (Bert model)
- contains `openai-gpt`: TFOpenAIGPTLMHeadModel (OpenAI GPT model)
- contains `gpt2`: TFGPT2LMHeadModel (OpenAI GPT-2 model)
- contains `transfo-xl`: TFTransfoXLLMHeadModel (Transformer-XL model)
- contains `xlnet`: TFXLNetLMHeadModel (XLNet model)
- contains `xlm`: TFXLMWithLMHeadModel (XLM model)
- contains `ctrl`: TFCTRLLMHeadModel (CTRL model)
This class cannot be instantiated using `__init__()` (throws an error).
"""
def __init__(self):
raise EnvironmentError(
"TFAutoModelWithLMHead is designed to be instantiated "
"using the `TFAutoModelWithLMHead.from_pretrained(pretrained_model_name_or_path)` or "
"`TFAutoModelWithLMHead.from_config(config)` methods."
)
@classmethod
def from_config(cls, config):
r""" Instantiates one of the base model classes of the library
from a configuration.
config: (`optional`) instance of a class derived from :class:`~hgntransformers.PretrainedConfig`:
The model class to instantiate is selected based on the configuration class:
- isInstance of `distilbert` configuration class: DistilBertModel (DistilBERT model)
- isInstance of `roberta` configuration class: RobertaModel (RoBERTa model)
- isInstance of `bert` configuration class: BertModel (Bert model)
- isInstance of `openai-gpt` configuration class: OpenAIGPTModel (OpenAI GPT model)
- isInstance of `gpt2` configuration class: GPT2Model (OpenAI GPT-2 model)
- isInstance of `ctrl` configuration class: CTRLModel (Salesforce CTRL model)
- isInstance of `transfo-xl` configuration class: TransfoXLModel (Transformer-XL model)
- isInstance of `xlnet` configuration class: XLNetModel (XLNet model)
- isInstance of `xlm` configuration class: XLMModel (XLM model)
Examples::
config = BertConfig.from_pretrained('bert-base-uncased') # Download configuration from S3 and cache.
model = AutoModelWithLMHead.from_config(config) # E.g. model was saved using `save_pretrained('./test/saved_model/')`
"""
if isinstance(config, DistilBertConfig):
return TFDistilBertForMaskedLM(config)
elif isinstance(config, RobertaConfig):
return TFRobertaForMaskedLM(config)
elif isinstance(config, BertConfig):
return TFBertForMaskedLM(config)
elif isinstance(config, OpenAIGPTConfig):
return TFOpenAIGPTLMHeadModel(config)
elif isinstance(config, GPT2Config):
return TFGPT2LMHeadModel(config)
elif isinstance(config, TransfoXLConfig):
return TFTransfoXLLMHeadModel(config)
elif isinstance(config, XLNetConfig):
return TFXLNetLMHeadModel(config)
elif isinstance(config, XLMConfig):
return TFXLMWithLMHeadModel(config)
elif isinstance(config, CTRLConfig):
return TFCTRLLMHeadModel(config)
raise ValueError("Unrecognized configuration class {}".format(config))
@classmethod
def from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs):
r""" Instantiates one of the language modeling model classes of the library
from a pre-trained model configuration.
The `from_pretrained()` method takes care of returning the correct model class instance
using pattern matching on the `pretrained_model_name_or_path` string.
The model class to instantiate is selected as the first pattern matching
in the `pretrained_model_name_or_path` string (in the following order):
- contains `t5`: TFT5WithLMHeadModel (T5 model)
- contains `distilbert`: TFDistilBertForMaskedLM (DistilBERT model)
- contains `roberta`: TFRobertaForMaskedLM (RoBERTa model)
- contains `bert`: TFBertForMaskedLM (Bert model)
- contains `openai-gpt`: TFOpenAIGPTLMHeadModel (OpenAI GPT model)
- contains `gpt2`: TFGPT2LMHeadModel (OpenAI GPT-2 model)
- contains `transfo-xl`: TFTransfoXLLMHeadModel (Transformer-XL model)
- contains `xlnet`: TFXLNetLMHeadModel (XLNet model)
- contains `xlm`: TFXLMWithLMHeadModel (XLM model)
- contains `ctrl`: TFCTRLLMHeadModel (CTRL model)
Params:
pretrained_model_name_or_path: either:
- a string with the `shortcut name` of a pre-trained model to load from cache or download, e.g.: ``bert-base-uncased``.
- a string with the `identifier name` of a pre-trained model that was user-uploaded to our S3, e.g.: ``dbmdz/bert-base-german-cased``.
- a path to a `directory` containing model weights saved using :func:`~hgntransformers.PreTrainedModel.save_pretrained`, e.g.: ``./my_model_directory/``.
- a path or url to a `PyTorch, TF 1.X or TF 2.0 checkpoint file` (e.g. `./tf_model/model.ckpt.index`). In the case of a PyTorch checkpoint, ``from_pt`` should be set to True and a configuration object should be provided as ``config`` argument.
from_pt: (`Optional`) Boolean
Set to True if the Checkpoint is a PyTorch checkpoint.
model_args: (`optional`) Sequence of positional arguments:
All remaning positional arguments will be passed to the underlying model's ``__init__`` method
config: (`optional`) instance of a class derived from :class:`~hgntransformers.PretrainedConfig`:
Configuration for the model to use instead of an automatically loaded configuation. Configuration can be automatically loaded when:
- the model is a model provided by the library (loaded with the ``shortcut-name`` string of a pretrained model), or
- the model was saved using :func:`~hgntransformers.PreTrainedModel.save_pretrained` and is reloaded by suppling the save directory.
- the model is loaded by suppling a local directory as ``pretrained_model_name_or_path`` and a configuration JSON file named `config.json` is found in the directory.
state_dict: (`optional`) dict:
an optional state dictionnary for the model to use instead of a state dictionary loaded from saved weights file.
This option can be used if you want to create a model from a pretrained configuration but load your own weights.
In this case though, you should check if using :func:`~hgntransformers.PreTrainedModel.save_pretrained` and :func:`~hgntransformers.PreTrainedModel.from_pretrained` is not a simpler option.
cache_dir: (`optional`) string:
Path to a directory in which a downloaded pre-trained model
configuration should be cached if the standard cache should not be used.
force_download: (`optional`) boolean, default False:
Force to (re-)download the model weights and configuration files and override the cached versions if they exists.
resume_download: (`optional`) boolean, default False:
Do not delete incompletely recieved file. Attempt to resume the download if such a file exists.
proxies: (`optional`) dict, default None:
A dictionary of proxy servers to use by protocol or endpoint, e.g.: {'http': 'foo.bar:3128', 'http://hostname': 'foo.bar:4012'}.
The proxies are used on each request.
output_loading_info: (`optional`) boolean:
Set to ``True`` to also return a dictionnary containing missing keys, unexpected keys and error messages.
kwargs: (`optional`) Remaining dictionary of keyword arguments:
Can be used to update the configuration object (after it being loaded) and initiate the model. (e.g. ``output_attention=True``). Behave differently depending on whether a `config` is provided or automatically loaded:
- If a configuration is provided with ``config``, ``**kwargs`` will be directly passed to the underlying model's ``__init__`` method (we assume all relevant updates to the configuration have already been done)
- If a configuration is not provided, ``kwargs`` will be first passed to the configuration class initialization function (:func:`~hgntransformers.PretrainedConfig.from_pretrained`). Each key of ``kwargs`` that corresponds to a configuration attribute will be used to override said attribute with the supplied ``kwargs`` value. Remaining keys that do not correspond to any configuration attribute will be passed to the underlying model's ``__init__`` function.
Examples::
model = TFAutoModelWithLMHead.from_pretrained('bert-base-uncased') # Download model and configuration from S3 and cache.
model = TFAutoModelWithLMHead.from_pretrained('./test/bert_model/') # E.g. model was saved using `save_pretrained('./test/saved_model/')`
model = TFAutoModelWithLMHead.from_pretrained('bert-base-uncased', output_attention=True) # Update configuration during loading
assert model.config.output_attention == True
# Loading from a TF checkpoint file instead of a PyTorch model (slower)
config = AutoConfig.from_json_file('./tf_model/bert_tf_model_config.json')
model = TFAutoModelWithLMHead.from_pretrained('./pt_model/bert_pytorch_model.bin', from_pt=True, config=config)
"""
if "t5" in pretrained_model_name_or_path:
return TFT5WithLMHeadModel.from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs)
elif "distilbert" in pretrained_model_name_or_path:
return TFDistilBertForMaskedLM.from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs)
elif "albert" in pretrained_model_name_or_path:
return TFAlbertForMaskedLM.from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs)
elif "roberta" in pretrained_model_name_or_path:
return TFRobertaForMaskedLM.from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs)
elif "bert" in pretrained_model_name_or_path:
return TFBertForMaskedLM.from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs)
elif "openai-gpt" in pretrained_model_name_or_path:
return TFOpenAIGPTLMHeadModel.from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs)
elif "gpt2" in pretrained_model_name_or_path:
return TFGPT2LMHeadModel.from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs)
elif "transfo-xl" in pretrained_model_name_or_path:
return TFTransfoXLLMHeadModel.from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs)
elif "xlnet" in pretrained_model_name_or_path:
return TFXLNetLMHeadModel.from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs)
elif "xlm" in pretrained_model_name_or_path:
return TFXLMWithLMHeadModel.from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs)
elif "ctrl" in pretrained_model_name_or_path:
return TFCTRLLMHeadModel.from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs)
raise ValueError(
"Unrecognized model identifier in {}. Should contains one of "
"'distilbert', 'bert', 'openai-gpt', 'gpt2', 'transfo-xl', 'xlnet', "
"'xlm', 'roberta', 'ctrl'".format(pretrained_model_name_or_path)
)
class TFAutoModelForSequenceClassification(object):
r"""
:class:`~hgntransformers.TFAutoModelForSequenceClassification` is a generic model class
that will be instantiated as one of the sequence classification model classes of the library
when created with the `TFAutoModelForSequenceClassification.from_pretrained(pretrained_model_name_or_path)`
class method.
The `from_pretrained()` method takes care of returning the correct model class instance
using pattern matching on the `pretrained_model_name_or_path` string.
The model class to instantiate is selected as the first pattern matching
in the `pretrained_model_name_or_path` string (in the following order):
- contains `distilbert`: TFDistilBertForSequenceClassification (DistilBERT model)
- contains `roberta`: TFRobertaForSequenceClassification (RoBERTa model)
- contains `bert`: TFBertForSequenceClassification (Bert model)
- contains `xlnet`: TFXLNetForSequenceClassification (XLNet model)
- contains `xlm`: TFXLMForSequenceClassification (XLM model)
This class cannot be instantiated using `__init__()` (throws an error).
"""
def __init__(self):
raise EnvironmentError(
"TFAutoModelForSequenceClassification is designed to be instantiated "
"using the `TFAutoModelForSequenceClassification.from_pretrained(pretrained_model_name_or_path)` or "
"`TFAutoModelForSequenceClassification.from_config(config)` methods."
)
@classmethod
def from_config(cls, config):
r""" Instantiates one of the base model classes of the library
from a configuration.
config: (`optional`) instance of a class derived from :class:`~hgntransformers.PretrainedConfig`:
The model class to instantiate is selected based on the configuration class:
- isInstance of `distilbert` configuration class: DistilBertModel (DistilBERT model)
- isInstance of `roberta` configuration class: RobertaModel (RoBERTa model)
- isInstance of `bert` configuration class: BertModel (Bert model)
- isInstance of `xlnet` configuration class: XLNetModel (XLNet model)
- isInstance of `xlm` configuration class: XLMModel (XLM model)
Examples::
config = BertConfig.from_pretrained('bert-base-uncased') # Download configuration from S3 and cache.
model = AutoModelForSequenceClassification.from_config(config) # E.g. model was saved using `save_pretrained('./test/saved_model/')`
"""
if isinstance(config, DistilBertConfig):
return TFDistilBertForSequenceClassification(config)
elif isinstance(config, RobertaConfig):
return TFRobertaForSequenceClassification(config)
elif isinstance(config, BertConfig):
return TFBertForSequenceClassification(config)
elif isinstance(config, XLNetConfig):
return TFXLNetForSequenceClassification(config)
elif isinstance(config, XLMConfig):
return TFXLMForSequenceClassification(config)
raise ValueError("Unrecognized configuration class {}".format(config))
@classmethod
def from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs):
r""" Instantiates one of the sequence classification model classes of the library
from a pre-trained model configuration.
The `from_pretrained()` method takes care of returning the correct model class instance
using pattern matching on the `pretrained_model_name_or_path` string.
The model class to instantiate is selected as the first pattern matching
in the `pretrained_model_name_or_path` string (in the following order):
- contains `distilbert`: TFDistilBertForSequenceClassification (DistilBERT model)
- contains `roberta`: TFRobertaForSequenceClassification (RoBERTa model)
- contains `bert`: TFBertForSequenceClassification (Bert model)
- contains `xlnet`: TFXLNetForSequenceClassification (XLNet model)
- contains `xlm`: TFXLMForSequenceClassification (XLM model)
The model is set in evaluation mode by default using `model.eval()` (Dropout modules are deactivated)
To train the model, you should first set it back in training mode with `model.train()`
Params:
pretrained_model_name_or_path: either:
- a string with the `shortcut name` of a pre-trained model to load from cache or download, e.g.: ``bert-base-uncased``.
- a string with the `identifier name` of a pre-trained model that was user-uploaded to our S3, e.g.: ``dbmdz/bert-base-german-cased``.
- a path to a `directory` containing model weights saved using :func:`~hgntransformers.PreTrainedModel.save_pretrained`, e.g.: ``./my_model_directory/``.
- a path or url to a `PyTorch, TF 1.X or TF 2.0 checkpoint file` (e.g. `./tf_model/model.ckpt.index`). In the case of a PyTorch checkpoint, ``from_pt`` should be set to True and a configuration object should be provided as ``config`` argument.
from_pt: (`Optional`) Boolean
Set to True if the Checkpoint is a PyTorch checkpoint.
model_args: (`optional`) Sequence of positional arguments:
All remaning positional arguments will be passed to the underlying model's ``__init__`` method
config: (`optional`) instance of a class derived from :class:`~hgntransformers.PretrainedConfig`:
Configuration for the model to use instead of an automatically loaded configuation. Configuration can be automatically loaded when:
- the model is a model provided by the library (loaded with the ``shortcut-name`` string of a pretrained model), or
- the model was saved using :func:`~hgntransformers.PreTrainedModel.save_pretrained` and is reloaded by suppling the save directory.
- the model is loaded by suppling a local directory as ``pretrained_model_name_or_path`` and a configuration JSON file named `config.json` is found in the directory.
state_dict: (`optional`) dict:
an optional state dictionnary for the model to use instead of a state dictionary loaded from saved weights file.
This option can be used if you want to create a model from a pretrained configuration but load your own weights.
In this case though, you should check if using :func:`~hgntransformers.PreTrainedModel.save_pretrained` and :func:`~hgntransformers.PreTrainedModel.from_pretrained` is not a simpler option.
cache_dir: (`optional`) string:
Path to a directory in which a downloaded pre-trained model
configuration should be cached if the standard cache should not be used.
force_download: (`optional`) boolean, default False:
Force to (re-)download the model weights and configuration files and override the cached versions if they exists.
resume_download: (`optional`) boolean, default False:
Do not delete incompletely recieved file. Attempt to resume the download if such a file exists.
proxies: (`optional`) dict, default None:
A dictionary of proxy servers to use by protocol or endpoint, e.g.: {'http': 'foo.bar:3128', 'http://hostname': 'foo.bar:4012'}.
The proxies are used on each request.
output_loading_info: (`optional`) boolean:
Set to ``True`` to also return a dictionnary containing missing keys, unexpected keys and error messages.
kwargs: (`optional`) Remaining dictionary of keyword arguments:
Can be used to update the configuration object (after it being loaded) and initiate the model. (e.g. ``output_attention=True``). Behave differently depending on whether a `config` is provided or automatically loaded:
- If a configuration is provided with ``config``, ``**kwargs`` will be directly passed to the underlying model's ``__init__`` method (we assume all relevant updates to the configuration have already been done)
- If a configuration is not provided, ``kwargs`` will be first passed to the configuration class initialization function (:func:`~hgntransformers.PretrainedConfig.from_pretrained`). Each key of ``kwargs`` that corresponds to a configuration attribute will be used to override said attribute with the supplied ``kwargs`` value. Remaining keys that do not correspond to any configuration attribute will be passed to the underlying model's ``__init__`` function.
Examples::
model = TFAutoModelForSequenceClassification.from_pretrained('bert-base-uncased') # Download model and configuration from S3 and cache.
model = TFAutoModelForSequenceClassification.from_pretrained('./test/bert_model/') # E.g. model was saved using `save_pretrained('./test/saved_model/')`
model = TFAutoModelForSequenceClassification.from_pretrained('bert-base-uncased', output_attention=True) # Update configuration during loading
assert model.config.output_attention == True
# Loading from a TF checkpoint file instead of a PyTorch model (slower)
config = AutoConfig.from_json_file('./tf_model/bert_tf_model_config.json')
model = TFAutoModelForSequenceClassification.from_pretrained('./pt_model/bert_pytorch_model.bin', from_pt=True, config=config)
"""
if "distilbert" in pretrained_model_name_or_path:
return TFDistilBertForSequenceClassification.from_pretrained(
pretrained_model_name_or_path, *model_args, **kwargs
)
elif "albert" in pretrained_model_name_or_path:
return TFAlbertForSequenceClassification.from_pretrained(
pretrained_model_name_or_path, *model_args, **kwargs
)
elif "roberta" in pretrained_model_name_or_path:
return TFRobertaForSequenceClassification.from_pretrained(
pretrained_model_name_or_path, *model_args, **kwargs
)
elif "bert" in pretrained_model_name_or_path:
return TFBertForSequenceClassification.from_pretrained(
pretrained_model_name_or_path, *model_args, **kwargs
)
elif "xlnet" in pretrained_model_name_or_path:
return TFXLNetForSequenceClassification.from_pretrained(
pretrained_model_name_or_path, *model_args, **kwargs
)
elif "xlm" in pretrained_model_name_or_path:
return TFXLMForSequenceClassification.from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs)
raise ValueError(
"Unrecognized model identifier in {}. Should contains one of "
"'distilbert', 'bert', 'xlnet', 'xlm', 'roberta'".format(pretrained_model_name_or_path)
)
class TFAutoModelForQuestionAnswering(object):
r"""
:class:`~hgntransformers.TFAutoModelForQuestionAnswering` is a generic model class
that will be instantiated as one of the question answering model classes of the library
when created with the `TFAutoModelForQuestionAnswering.from_pretrained(pretrained_model_name_or_path)`
class method.
The `from_pretrained()` method takes care of returning the correct model class instance
using pattern matching on the `pretrained_model_name_or_path` string.
The model class to instantiate is selected as the first pattern matching
in the `pretrained_model_name_or_path` string (in the following order):
- contains `distilbert`: TFDistilBertForQuestionAnswering (DistilBERT model)
- contains `bert`: TFBertForQuestionAnswering (Bert model)
- contains `xlnet`: TFXLNetForQuestionAnswering (XLNet model)
- contains `xlm`: TFXLMForQuestionAnswering (XLM model)
This class cannot be instantiated using `__init__()` (throws an error).
"""
def __init__(self):
raise EnvironmentError(
"TFAutoModelForQuestionAnswering is designed to be instantiated "
"using the `TFAutoModelForQuestionAnswering.from_pretrained(pretrained_model_name_or_path)` or "
"`TFAutoModelForQuestionAnswering.from_config(config)` methods."
)
@classmethod
def from_config(cls, config):
r""" Instantiates one of the base model classes of the library
from a configuration.
config: (`optional`) instance of a class derived from :class:`~hgntransformers.PretrainedConfig`:
The model class to instantiate is selected based on the configuration class:
- isInstance of `distilbert` configuration class: DistilBertModel (DistilBERT model)
- isInstance of `bert` configuration class: BertModel (Bert model)
- isInstance of `xlnet` configuration class: XLNetModel (XLNet model)
- isInstance of `xlm` configuration class: XLMModel (XLM model)
Examples::
config = BertConfig.from_pretrained('bert-base-uncased') # Download configuration from S3 and cache.
model = AutoModelForSequenceClassification.from_config(config) # E.g. model was saved using `save_pretrained('./test/saved_model/')`
"""
if isinstance(config, DistilBertConfig):
return TFDistilBertForQuestionAnswering(config)
elif isinstance(config, BertConfig):
return TFBertForQuestionAnswering(config)
elif isinstance(config, XLNetConfig):
raise NotImplementedError("TFXLNetForQuestionAnswering isn't implemented")
elif isinstance(config, XLMConfig):
raise NotImplementedError("TFXLMForQuestionAnswering isn't implemented")
raise ValueError("Unrecognized configuration class {}".format(config))
@classmethod
def from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs):
r""" Instantiates one of the question answering model classes of the library
from a pre-trained model configuration.
The `from_pretrained()` method takes care of returning the correct model class instance
using pattern matching on the `pretrained_model_name_or_path` string.
The model class to instantiate is selected as the first pattern matching
in the `pretrained_model_name_or_path` string (in the following order):
- contains `distilbert`: TFDistilBertForQuestionAnswering (DistilBERT model)
- contains `bert`: TFBertForQuestionAnswering (Bert model)
- contains `xlnet`: TFXLNetForQuestionAnswering (XLNet model)
- contains `xlm`: TFXLMForQuestionAnswering (XLM model)
The model is set in evaluation mode by default using `model.eval()` (Dropout modules are deactivated)
To train the model, you should first set it back in training mode with `model.train()`
Params:
pretrained_model_name_or_path: either:
- a string with the `shortcut name` of a pre-trained model to load from cache or download, e.g.: ``bert-base-uncased``.
- a string with the `identifier name` of a pre-trained model that was user-uploaded to our S3, e.g.: ``dbmdz/bert-base-german-cased``.
- a path to a `directory` containing model weights saved using :func:`~hgntransformers.PreTrainedModel.save_pretrained`, e.g.: ``./my_model_directory/``.
- a path or url to a `PyTorch, TF 1.X or TF 2.0 checkpoint file` (e.g. `./tf_model/model.ckpt.index`). In the case of a PyTorch checkpoint, ``from_pt`` should be set to True and a configuration object should be provided as ``config`` argument.
from_pt: (`Optional`) Boolean
Set to True if the Checkpoint is a PyTorch checkpoint.
model_args: (`optional`) Sequence of positional arguments:
All remaning positional arguments will be passed to the underlying model's ``__init__`` method
config: (`optional`) instance of a class derived from :class:`~hgntransformers.PretrainedConfig`:
Configuration for the model to use instead of an automatically loaded configuation. Configuration can be automatically loaded when:
- the model is a model provided by the library (loaded with the ``shortcut-name`` string of a pretrained model), or
- the model was saved using :func:`~hgntransformers.PreTrainedModel.save_pretrained` and is reloaded by suppling the save directory.
- the model is loaded by suppling a local directory as ``pretrained_model_name_or_path`` and a configuration JSON file named `config.json` is found in the directory.
state_dict: (`optional`) dict:
an optional state dictionnary for the model to use instead of a state dictionary loaded from saved weights file.
This option can be used if you want to create a model from a pretrained configuration but load your own weights.
In this case though, you should check if using :func:`~hgntransformers.PreTrainedModel.save_pretrained` and :func:`~hgntransformers.PreTrainedModel.from_pretrained` is not a simpler option.
cache_dir: (`optional`) string:
Path to a directory in which a downloaded pre-trained model
configuration should be cached if the standard cache should not be used.
force_download: (`optional`) boolean, default False:
Force to (re-)download the model weights and configuration files and override the cached versions if they exists.
resume_download: (`optional`) boolean, default False:
Do not delete incompletely recieved file. Attempt to resume the download if such a file exists.
proxies: (`optional`) dict, default None:
A dictionary of proxy servers to use by protocol or endpoint, e.g.: {'http': 'foo.bar:3128', 'http://hostname': 'foo.bar:4012'}.
The proxies are used on each request.
output_loading_info: (`optional`) boolean:
Set to ``True`` to also return a dictionnary containing missing keys, unexpected keys and error messages.
kwargs: (`optional`) Remaining dictionary of keyword arguments:
Can be used to update the configuration object (after it being loaded) and initiate the model. (e.g. ``output_attention=True``). Behave differently depending on whether a `config` is provided or automatically loaded:
- If a configuration is provided with ``config``, ``**kwargs`` will be directly passed to the underlying model's ``__init__`` method (we assume all relevant updates to the configuration have already been done)
- If a configuration is not provided, ``kwargs`` will be first passed to the configuration class initialization function (:func:`~hgntransformers.PretrainedConfig.from_pretrained`). Each key of ``kwargs`` that corresponds to a configuration attribute will be used to override said attribute with the supplied ``kwargs`` value. Remaining keys that do not correspond to any configuration attribute will be passed to the underlying model's ``__init__`` function.
Examples::
model = TFAutoModelForQuestionAnswering.from_pretrained('bert-base-uncased') # Download model and configuration from S3 and cache.
model = TFAutoModelForQuestionAnswering.from_pretrained('./test/bert_model/') # E.g. model was saved using `save_pretrained('./test/saved_model/')`
model = TFAutoModelForQuestionAnswering.from_pretrained('bert-base-uncased', output_attention=True) # Update configuration during loading
assert model.config.output_attention == True
# Loading from a TF checkpoint file instead of a PyTorch model (slower)
config = AutoConfig.from_json_file('./tf_model/bert_tf_model_config.json')
model = TFAutoModelForQuestionAnswering.from_pretrained('./pt_model/bert_pytorch_model.bin', from_pt=True, config=config)
"""
if "distilbert" in pretrained_model_name_or_path:
return TFDistilBertForQuestionAnswering.from_pretrained(
pretrained_model_name_or_path, *model_args, **kwargs
)
elif "bert" in pretrained_model_name_or_path:
return TFBertForQuestionAnswering.from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs)
elif "xlnet" in pretrained_model_name_or_path:
return TFXLNetForQuestionAnsweringSimple.from_pretrained(
pretrained_model_name_or_path, *model_args, **kwargs
)
elif "xlm" in pretrained_model_name_or_path:
return TFXLMForQuestionAnsweringSimple.from_pretrained(
pretrained_model_name_or_path, *model_args, **kwargs
)
raise ValueError(
"Unrecognized model identifier in {}. Should contains one of "
"'distilbert', 'bert', 'xlnet', 'xlm'".format(pretrained_model_name_or_path)
)
class TFAutoModelForTokenClassification:
def __init__(self):
raise EnvironmentError(
"TFAutoModelForTokenClassification is designed to be instantiated "
"using the `TFAutoModelForTokenClassification.from_pretrained(pretrained_model_name_or_path)` or "
"`AutoModelForTokenClassification.from_config(config)` methods."
)
@classmethod
def from_config(cls, config):
r""" Instantiates one of the base model classes of the library
from a configuration.
config: (`optional`) instance of a class derived from :class:`~hgntransformers.PretrainedConfig`:
The model class to instantiate is selected based on the configuration class:
- isInstance of `bert` configuration class: BertModel (Bert model)
- isInstance of `xlnet` configuration class: XLNetModel (XLNet model)
- isInstance of `distilbert` configuration class: DistilBertModel (DistilBert model)
- isInstance of `roberta` configuration class: RobteraModel (Roberta model)
Examples::
config = BertConfig.from_pretrained('bert-base-uncased') # Download configuration from S3 and cache.
model = TFAutoModelForTokenClassification.from_config(config) # E.g. model was saved using `save_pretrained('./test/saved_model/')`
"""
if isinstance(config, BertConfig):
return TFBertForTokenClassification(config)
elif isinstance(config, XLNetConfig):
return TFXLNetForTokenClassification(config)
elif isinstance(config, DistilBertConfig):
return TFDistilBertForTokenClassification(config)
elif isinstance(config, RobertaConfig):
return TFRobertaForTokenClassification(config)
raise ValueError("Unrecognized configuration class {}".format(config))
@classmethod
def from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs):
r""" Instantiates one of the question answering model classes of the library
from a pre-trained model configuration.
The `from_pretrained()` method takes care of returning the correct model class instance
using pattern matching on the `pretrained_model_name_or_path` string.
The model class to instantiate is selected as the first pattern matching
in the `pretrained_model_name_or_path` string (in the following order):
- contains `bert`: BertForTokenClassification (Bert model)
- contains `xlnet`: XLNetForTokenClassification (XLNet model)
- contains `distilbert`: DistilBertForTokenClassification (DistilBert model)
- contains `roberta`: RobertaForTokenClassification (Roberta model)
The model is set in evaluation mode by default using `model.eval()` (Dropout modules are deactivated)
To train the model, you should first set it back in training mode with `model.train()`
Params:
pretrained_model_name_or_path: either:
- a string with the `shortcut name` of a pre-trained model to load from cache or download, e.g.: ``bert-base-uncased``.
- a path to a `directory` containing model weights saved using :func:`~hgntransformers.PreTrainedModel.save_pretrained`, e.g.: ``./my_model_directory/``.
- a path or url to a `tensorflow index checkpoint file` (e.g. `./tf_model/model.ckpt.index`). In this case, ``from_tf`` should be set to True and a configuration object should be provided as ``config`` argument. This loading path is slower than converting the TensorFlow checkpoint in a PyTorch model using the provided conversion scripts and loading the PyTorch model afterwards.
model_args: (`optional`) Sequence of positional arguments:
All remaning positional arguments will be passed to the underlying model's ``__init__`` method
config: (`optional`) instance of a class derived from :class:`~hgntransformers.PretrainedConfig`:
Configuration for the model to use instead of an automatically loaded configuation. Configuration can be automatically loaded when:
- the model is a model provided by the library (loaded with the ``shortcut-name`` string of a pretrained model), or
- the model was saved using :func:`~hgntransformers.PreTrainedModel.save_pretrained` and is reloaded by suppling the save directory.
- the model is loaded by suppling a local directory as ``pretrained_model_name_or_path`` and a configuration JSON file named `config.json` is found in the directory.
state_dict: (`optional`) dict:
an optional state dictionnary for the model to use instead of a state dictionary loaded from saved weights file.
This option can be used if you want to create a model from a pretrained configuration but load your own weights.
In this case though, you should check if using :func:`~hgntransformers.PreTrainedModel.save_pretrained` and :func:`~hgntransformers.PreTrainedModel.from_pretrained` is not a simpler option.
cache_dir: (`optional`) string:
Path to a directory in which a downloaded pre-trained model
configuration should be cached if the standard cache should not be used.
force_download: (`optional`) boolean, default False:
Force to (re-)download the model weights and configuration files and override the cached versions if they exists.
proxies: (`optional`) dict, default None:
A dictionary of proxy servers to use by protocol or endpoint, e.g.: {'http': 'foo.bar:3128', 'http://hostname': 'foo.bar:4012'}.
The proxies are used on each request.
output_loading_info: (`optional`) boolean:
Set to ``True`` to also return a dictionnary containing missing keys, unexpected keys and error messages.
kwargs: (`optional`) Remaining dictionary of keyword arguments:
Can be used to update the configuration object (after it being loaded) and initiate the model. (e.g. ``output_attention=True``). Behave differently depending on whether a `config` is provided or automatically loaded:
- If a configuration is provided with ``config``, ``**kwargs`` will be directly passed to the underlying model's ``__init__`` method (we assume all relevant updates to the configuration have already been done)
- If a configuration is not provided, ``kwargs`` will be first passed to the configuration class initialization function (:func:`~hgntransformers.PretrainedConfig.from_pretrained`). Each key of ``kwargs`` that corresponds to a configuration attribute will be used to override said attribute with the supplied ``kwargs`` value. Remaining keys that do not correspond to any configuration attribute will be passed to the underlying model's ``__init__`` function.
Examples::
model = TFAutoModelForTokenClassification.from_pretrained('bert-base-uncased') # Download model and configuration from S3 and cache.
model = TFAutoModelForTokenClassification.from_pretrained('./test/bert_model/') # E.g. model was saved using `save_pretrained('./test/saved_model/')`
model = TFAutoModelForTokenClassification.from_pretrained('bert-base-uncased', output_attention=True) # Update configuration during loading
assert model.config.output_attention == True
# Loading from a TF checkpoint file instead of a PyTorch model (slower)
config = AutoConfig.from_json_file('./tf_model/bert_tf_model_config.json')
model = TFAutoModelForTokenClassification.from_pretrained('./tf_model/bert_tf_checkpoint.ckpt.index', from_tf=True, config=config)
"""
if "bert" in pretrained_model_name_or_path:
return TFBertForTokenClassification.from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs)
elif "xlnet" in pretrained_model_name_or_path:
return TFXLNetForTokenClassification.from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs)
elif "distilbert" in pretrained_model_name_or_path:
return TFDistilBertForTokenClassification.from_pretrained(
pretrained_model_name_or_path, *model_args, **kwargs
)
elif "roberta" in pretrained_model_name_or_path:
return TFRobertaForTokenClassification.from_pretrained(
pretrained_model_name_or_path, *model_args, **kwargs
)
raise ValueError(
"Unrecognized model identifier in {}. Should contains one of "
"'bert', 'xlnet', 'distilbert', 'roberta'".format(pretrained_model_name_or_path)
)
| [
"[email protected]"
] | |
2519156c6da4180c2448a0637cbcdaea6a256529 | fd2931f5e1721f469d5113f7b70ec721f3288503 | /my_house_settings.py | 6ac57f9b524ecbffef04039eafd214a1ad8aa474 | [
"MIT"
] | permissive | RasPiPkr/myHome | fbfb79b291e2e41ca0500d43117ae4942ca5cb6b | 213a09946a20c27743fa4c8fc90acb128911ab0c | refs/heads/main | 2023-02-05T21:54:21.907145 | 2020-12-21T19:36:56 | 2020-12-21T19:36:56 | 313,453,355 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,672 | py | # Settings for my Heating & Security Control
# GPIO pins used on Raspberry Pi
relayPins = [18, 23, 24, 22, 6, 13, 19, 26]
pirPins = [21, 20, 16, 12]
# CCTV variables
user = '' # In the string put your user name for your cctv system
passwd = '' # In the string put your password for you cctv system
camIP = '' # In the string put your local IP address for your cctv system
httpPort = '' # In the string enter your http port setup that is set in your cctv system
# Your system might not need these:
cam1 = '101' # Dependng on your cctv system for how many cameras or how the web request is required to view said camera
cam2 = '201' # Replace these values, I have left them in as these worked with my cctv system
cam3 = '301' # For my system the first digit refers to the camera and the 01 being the main stream 02 being substream, main stream is for best quality
# Google voice announcements
dialog = [] # Put links to a host for a recorded mp3, I used dropbox for mine
# Testing google home mini
voice_google = '' # In the string enter your local IP address for your google for sound from
# Default heating timer settings, these are defaults for when the GUI starts
morningTimer = '07:00:00' # Set them in here or when the GUI is running you can change them then
morningTimerTemp = '20.0'
bedtimeTimer = '23:00:00'
bedtimeTimerTemp = '19.0'
defaultTemp = '20.0'
# External lights on time
sunset = '16:00:00'
sunrise = '07:00:00'
dark = True
# ZONES
# ZONE DEFAULT OPTIONS: 0 = off, 1 = auto, 2 = on (This sets the GUI for how it works)
z1BtnsList = ['imgs/z1_off_btn.png', 'imgs/z1_auto_btn.png', 'imgs/z1_on_btn.png']
z1Default = 1
z1Timer = 120 # Seconds
z1Trig = 'imgs/z1_trig_btn.png'
z2BtnsList = ['imgs/z2_off_btn.png', 'imgs/z2_auto_btn.png', 'imgs/z2_on_btn.png']
z2Default = 1
z2Timer = 120 # Seconds
z2Trig = 'imgs/z2_trig_btn.png'
z3BtnsList = ['imgs/z3_off_btn.png', 'imgs/z3_auto_btn.png', 'imgs/z3_on_btn.png']
z3Default = 1
z3Timer = 120 # Seconds
z3Trig = 'imgs/z3_trig_btn.png'
z4BtnsList = ['imgs/z4_off_btn.png', 'imgs/z4_auto_btn.png', 'imgs/z4_on_btn.png']
z4Default = 1
z4Timer = 120 # Seconds
z4Trig = 'imgs/z4_trig_btn.png'
# OPTIONS
# DEFAULT OPTIONS: 0 = off, 1 = on
# Ext voice buttons list
extVoiceBtnsList = ['imgs/ext_voice_off_btn.png', 'imgs/ext_voice_on_btn.png']
extVoiceDefault = 0
# Doors voice buttons list
doorsBtnsList = ['imgs/doors_voice_off_btn.png', 'imgs/doors_voice_on_btn.png']
doorsDefault = 0
# CCTV buttons list
cctvBtnsList = ['imgs/cctv_off_btn.png', 'imgs/cctv_on_btn.png']
cctvDefault = 0
# Decking buttons list
deckingBtnsList = ['imgs/decking_off_btn.png', 'imgs/decking_on_btn.png']
deckingDefault = 0
| [
"[email protected]"
] | |
a519ec832241bd1385af8e97a2212ce45f79158a | 255b559089b5ee50b396505ae3caf443a6e1a82d | /rest_api.py | dad455f932fbcfd1de4bfa7e68f4ff62c2ae7b1b | [] | no_license | rdorame/ParcialLabWeb2 | 18176bc6f9c36bb344851585e362b3ab62c4b16c | 5f4f47f76523fee75962d8260a63bcd5337402c0 | refs/heads/master | 2021-01-23T01:02:18.575549 | 2017-04-01T12:48:24 | 2017-04-01T12:48:24 | 85,868,661 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 29,892 | py | import endpoints
from google.appengine.ext import ndb
from google.appengine.api import app_identity
from protorpc import remote
import jwt
import time
from CustomExceptions import NotFoundException
from messages import EmailPasswordMessage, TokenMessage, CodeMessage, Token, TokenKey, MessageNone
from messages import EmpresaInput, EmpresaUpdate, EmpresaList
from messages import TeamInput, TeamUpdate, TeamList
from messages import FacturaInput, FacturaUpdate, FacturaList
from messages import TicketInput, TicketUpdate, TicketList
from messages import UserInput, UserUpdate, UserList
from endpoints_proto_datastore.ndb import EndpointsModel
import models
from models import validarEmail
from models import Empresa, Usuarios, Team, Factura, Ticket
###############
# Usuarios
###############
@endpoints.api(name='usuarios_api', version='v1', description='usuarios endpoints')
class UsuariosApi(remote.Service):
###############get the info of one########
@endpoints.method(TokenKey, UserList, path='users/get', http_method='POST', name='users.get')
def users_get(cls, request):
try:
token = jwt.decode(request.tokenint, 'secret') #checa token
userentity = ndb.Key(urlsafe=request.entityKey)
user = Usuarios.get_by_id(userentity.id()) #obtiene usuario
#user = Usuarios.get_by_id(token['user_id']) #obtiene usuario dado el token
lista = [] #crea lista
lstMessage = UserList(code=1) # crea objeto mensaje
lista.append(UserUpdate(token='',
entityKey= user.entityKey,
codigo_empresa = user.codigo_empresa,
#empresa_key = user.empresa_key.urlsafe(),
email = user.email))
lstMessage.data = lista#ASIGNA a la salida la lista
message = lstMessage
except jwt.DecodeError:
message = UserList(code=-1, data=[]) #token invalido
except jwt.ExpiredSignatureError:
message = UserList(code=-2, data=[]) #token expiro
return message
########################## list###################
# ENTRADA SALIDA RUTA siempre es POST NOMBRE
@endpoints.method(Token, UserList, path='users/list', http_method='POST', name='users.list')
def lista_usuarios(cls, request):
try:
token = jwt.decode(request.tokenint, 'secret') #checa token
user = Usuarios.get_by_id(token['user_id']) #obtiene usuario dado el token
lista = [] #crea lista
lstMessage = UserList(code=1) # crea objeto mensaje
lstBd = Usuarios.query().fetch() # recupera de base de datos
for i in lstBd: # recorre
lista.append(UserUpdate(token='',
entityKey=i.entityKey,
#empresa_key=user.empresa_key.urlsafe(),
email=i.email,
codigo_empresa = i.codigo_empresa
)) # agrega a la lista
lstMessage.data = lista # la manda al messa
message = lstMessage #regresa
except jwt.DecodeError:
message = UserList(code=-1, data=[]) #token invalido
except jwt.ExpiredSignatureError:
message = UserList(code=-2, data=[]) #token expiro
return message
# delete
# ENTRADA SALIDA RUTA siempre es POST NOMBRE
@endpoints.method(TokenKey, CodeMessage, path='users/delete', http_method='POST', name='users.delete')
#siempre lleva cls y request
def user_remove(cls, request):
try:
token = jwt.decode(request.tokenint, 'secret')#CHECA EL TOKEN
usersentity = ndb.Key(urlsafe=request.entityKey)#Obtiene el elemento dado el EntitKey
usersentity.delete()#BORRA
message = CodeMessage(code=1, message='Succesfully deleted')
except jwt.DecodeError:
message = CodeMessage(code=-2, message='Invalid token')
except jwt.ExpiredSignatureError:
message = CodeMessage(code=-1, message='Token expired')
return message
# insert
# ENTRADA SALIDA RUTA siempre es POST NOMBRE
@endpoints.method(UserInput, CodeMessage, path='users/insert', http_method='POST', name='users.insert')
def user_add(cls, request):
try:
token = jwt.decode(request.token, 'secret')#CHECA EL TOKEN
user = Usuarios.get_by_id(token['user_id'])
if validarEmail(request.email) == False: #checa si el email esta registrado
#empresakey = ndb.Key(urlsafe=request.empresa_key) #convierte el string dado a entityKey
if user.usuario_m(request, user.empresa_key)==0:#llama a la funcion declarada en models.py en la seccion de USUARIOS
codigo=1
else:
codigo=-3
#la funcion josue_m puede actualizar e insertar
#depende de la ENTRADA de este endpoint method
message = CodeMessage(code=codigo, message='Succesfully added')
else:
message = CodeMessage(code=-4, message='El email ya ha sido registrado')
except jwt.DecodeError:
message = CodeMessage(code=-2, message='Invalid token')
except jwt.ExpiredSignatureError:
message = CodeMessage(code=-1, message='Token expired')
return message
##login##
@endpoints.method(EmailPasswordMessage, TokenMessage, path='users/login', http_method='POST', name='users.login')
def users_login(cls, request):
try:
user = Usuarios.query(Usuarios.email == request.email).fetch() #obtiene el usuario dado el email
if not user or len(user) == 0: #si no encuentra user saca
raise NotFoundException()
user = user[0]
keye = user.empresa_key.urlsafe() # regresa como mensaje el empresa key
if not user.verify_password(request.password): # checa la contrasena
raise NotFoundException()
token = jwt.encode({'user_id': user.key.id(), 'exp': time.time() + 43200}, 'secret') #crea el token
message = TokenMessage(token=token, message=keye, code=1) # regresa token
except NotFoundException:
message = TokenMessage(token=None, message='Wrong username or password', code=-1)
return message
##update##
# update
# ENTRADA SALIDA RUTA siempre es POST NOMBRE
@endpoints.method(UserUpdate, CodeMessage, path='user/update', http_method='POST', name='user.update')
#siempre lleva cls y request
def user_update(cls, request):
try:
token = jwt.decode(request.token, 'secret')#CHECA EL TOKEN
user = Usuarios.get_by_id(token['user_id'])#obtiene el usuario para poder acceder a los metodos declarados en models.py en la seccion de USUARIOS
empresakey = ndb.Key(urlsafe=user.empresa_key.urlsafe())#convierte el string dado a entityKey
if user.usuario_m(request, empresakey)==0:#llama a la funcion declarada en models.py en la seccion de USUARIOS
codigo=1
else:
codigo=-3
#la funcion josue_m puede actualizar e insertar
#depende de la ENTRADA de este endpoint method
message = CodeMessage(code=1, message='Sus cambios han sido guardados exitosamente')
except jwt.DecodeError:
message = CodeMessage(code=-2, message='Invalid token')
except jwt.ExpiredSignatureError:
message = CodeMessage(code=-1, message='Token expired')
return message
'''
'''
###########################
#### Empresa
###########################
## Google Cloud Endpoint
@endpoints.api(name='empresas_api', version='v1', description='empresas REST API')
class EmpresasApi(remote.Service):
# get one
@endpoints.method(TokenKey, EmpresaList, path='empresa/get', http_method='POST', name='empresa.get')
#siempre lleva cls y request
def empresa_get(cls, request):
try:
token = jwt.decode(request.tokenint, 'secret')#CHECA EL TOKEN
#Obtiene el elemento dado el entityKey
empresaentity = ndb.Key(urlsafe=request.entityKey)
#CREA LA SALIDA de tipo JosueInput y le asigna los valores, es a como se declaro en el messages.py
#empresaentity.get().empresa_key.urlsafe() para poder optener el EntityKey
##### ejemplo real
####### message = EmpresaList(code=1, data=[EmpresaUpdate(token='Succesfully get', nombre_empresa=empresaentity.get().nombre_empresa, empresa_key=empresaentity.get().empresa_key.urlsafe(), entityKey=empresaentity.get().entityKey)])
message = EmpresaList(code=1, data = [EmpresaUpdate(token='Succesfully get',
entityKey = empresaentity.get().entityKey,
codigo_empresa=empresaentity.get().codigo_empresa,
nombre_empresa = empresaentity.get().nombre_empresa,
lat_empresa = empresaentity.get().lat_empresa,
long_empresa = empresaentity.get().long_empresa,
logo_empresa = empresaentity.get().logo_empresa
)])
except jwt.DecodeError:
message = EmpresaList(code=-1, data=[])
except jwt.ExpiredSignatureError:
message = EmpresaList(code=-2, data=[])
return message
@endpoints.method(TokenKey, CodeMessage, path='empresa/delete', http_method='POST', name='empresa.delete')
#siempre lleva cls y request
def empresa_remove(cls, request):
try:
token = jwt.decode(request.tokenint, 'secret')#CHECA EL TOKEN
empresaentity = ndb.Key(urlsafe=request.entityKey)#Obtiene el elemento dado el EntitKey
empresaentity.delete()#BORRA
message = CodeMessage(code=1, message='Succesfully deleted')
except jwt.DecodeError:
message = CodeMessage(code=-2, message='Invalid token')
except jwt.ExpiredSignatureError:
message = CodeMessage(code=-1, message='Token expired')
return message
# insert
@endpoints.method(EmpresaInput, CodeMessage, path='empresa/insert', http_method='POST', name='empresa.insert')
#siempre lleva cls y request
def empresa_add(cls, request):
try:
token = jwt.decode(request.token, 'secret')#CHECA EL TOKEN
user = Usuarios.get_by_id(token['user_id'])#obtiene el usuario models.py
myempresa = Empresa()
if myempresa.empresa_m(request)==0:
codigo=1
else:
codigo=-3
#la funcion josue_m puede actualizar e insertar
#depende de la ENTRADA de este endpoint method
message = CodeMessage(code=codigo, message='Succesfully added')
#else:
# message = CodeMessage(code=-4, message='Succesfully added')
except jwt.DecodeError:
message = CodeMessage(code=-2, message='Invalid token')
except jwt.ExpiredSignatureError:
message = CodeMessage(code=-1, message='Token expired')
return message
# update
# ENTRADA SALIDA RUTA siempre es POST NOMBRE
@endpoints.method(EmpresaUpdate, CodeMessage, path='empresa/update', http_method='POST', name='empresa.update')
#siempre lleva cls y request
def empresa_update(cls, request):
try:
token = jwt.decode(request.token, 'secret')#CHECA EL TOKEN
user = Usuarios.get_by_id(token['user_id'])#obtiene el usuario para poder acceder a los metodos declarados en models.py en la seccion de USUARIOS
#empresakey = ndb.Key(urlsafe=request.empresa_key)#convierte el string dado a entityKey
myempresa = Empresa()
if myempresa.empresa_m(request)==0:#llama a la funcion declarada en models.py en la seccion de USUARIOS
codigo=1
else:
codigo=-3
#la funcion josue_m puede actualizar e insertar
#depende de la ENTRADA de este endpoint method
message = CodeMessage(code=1, message='Sus cambios han sido guardados exitosamente')
except jwt.DecodeError:
message = CodeMessage(code=-2, message='Invalid token')
except jwt.ExpiredSignatureError:
message = CodeMessage(code=-1, message='Token expired')
return message
# list
# ENTRADA SALIDA RUTA siempre es POST NOMBRE
@endpoints.method(Token, EmpresaList, path='empresa/list', http_method='POST', name='empresa.list')
#siempre lleva cls y request
def empresa_list(cls, request):
try:
token = jwt.decode(request.tokenint, 'secret')#CHECA EL TOKEN
user = Usuarios.get_by_id(token['user_id']) #obtiene usuario dado el token
#if user.importante==1 or user.importante==2:
lista = [] #crea lista para guardar contenido de la BD
lstMessage = EmpresaList(code=1) #CREA el mensaje de salida
lstBdEmpresa = Empresa.query().fetch() #obtiene de la base de datos
for i in lstBdEmpresa: #recorre la base de datos
#inserta a la lista creada con los elementos que se necesiten de la base de datos
#i.empresa_key.urlsafe() obtiene el entityKey
#lista.append(ClientesUpdate(token='', nombre=i.nombre, status=i.status, empresa_key=i.empresa_key.urlsafe(), entityKey=i.entityKey))
lista.append(EmpresaUpdate(token='',
entityKey = i.entityKey,
codigo_empresa=i.codigo_empresa,
nombre_empresa = i.nombre_empresa,
lat_empresa = i.lat_empresa,
long_empresa = i.long_empresa,
logo_empresa = i.logo_empresa
))
lstMessage.data = lista #ASIGNA a la salida la lista
message = lstMessage
#else:
# message = EmpresaList(code=-3, data=[])
except jwt.DecodeError:
message = EmpresaList(code=-1, data=[])
except jwt.ExpiredSignatureError:
message = EmpresaList(code=-2, data=[])
return message
###########################
#### Team
###########################
@endpoints.api(name='team_api', version='v1', description='team REST API')
class TeamApi(remote.Service):
# get one
# ENTRADA SALIDA RUTA siempre es POST NOMBRE
@endpoints.method(TokenKey, TeamList, path='team/get', http_method='POST', name='team.get')
#siempre lleva cls y request
def team_get(cls, request):
try:
token = jwt.decode(request.tokenint, 'secret')#CHECA EL TOKEN
#Obtiene el elemento dado el entityKey
teamentity = ndb.Key(urlsafe=request.entityKey)
#CREA LA SALIDA de tipo JosueInput y le asigna los valores, es a como se declaro en el messages.py
#josuentity.get().empresa_key.urlsafe() para poder optener el EntityKey
message = TeamList(code=1, data=[TeamUpdate(token='Succesfully get',
entityKey=teamentity.get().entityKey,
#empresa_key=teamentity.get().empresa_key.urlsafe(),
nombre=teamentity.get().nombre,
puesto=teamentity.get().puesto,
urlImage=teamentity.get().urlImage)])
except jwt.DecodeError:
message = TeamList(code=-1, data=[])
except jwt.ExpiredSignatureError:
message = TeamList(code=-2, data=[])
return message
# delete
# ENTRADA SALIDA RUTA siempre es POST NOMBRE
@endpoints.method(TokenKey, CodeMessage, path='team/delete', http_method='POST', name='team.delete')
#siempre lleva cls y request
def team_remove(cls, request):
try:
token = jwt.decode(request.tokenint, 'secret')#CHECA EL TOKEN
teamentity = ndb.Key(urlsafe=request.entityKey)#Obtiene el elemento dado el EntitKey
teamentity.delete()#BORRA
message = CodeMessage(code=0, message='Se ha eliminado el r.h.')
except jwt.DecodeError:
message = CodeMessage(code=-2, message='Invalid token')
except jwt.ExpiredSignatureError:
message = CodeMessage(code=-1, message='Token expired')
return message
# list
# ENTRADA SALIDA RUTA siempre es POST NOMBRE
@endpoints.method(Token, TeamList, path='team/list', http_method='POST', name='team.list')
#siempre lleva cls y request
def team_list(cls, request):
try:
token = jwt.decode(request.tokenint, 'secret')#CHECA EL TOKEN
user = Usuarios.get_by_id(token['user_id']) #obtiene usuario dado el token
lista = [] #crea lista para guardar contenido de la BD
lstMessage = TeamList(code=1) #CREA el mensaje de salida
lstBd = Team.query().fetch() #obtiene de la base de datos
for i in lstBd: #recorre la base de datos
#inserta a la lista creada con los elementos que se necesiten de la base de datos
#i.empresa_key.urlsafe() obtiene el entityKey
lista.append(TeamUpdate(token='',
entityKey=i.entityKey,
#empresa_key=i.empresa_key.urlsafe(),
nombre=i.nombre,
puesto=i.puesto,
urlImage=i.urlImage
))
lstMessage.data = lista #ASIGNA a la salida la lista
message = lstMessage
except jwt.DecodeError:
message = TeamList(code=-1, data=[])
except jwt.ExpiredSignatureError:
message = TeamList(code=-2, data=[])
return message
# insert
# ENTRADA SALIDA RUTA siempre es POST NOMBRE
@endpoints.method(TeamInput, CodeMessage, path='team/insert', http_method='POST', name='team.insert')
#siempre lleva cls y request
def team_add(cls, request):
try:
token = jwt.decode(request.token, 'secret')#CHECA EL TOKEN
user = Usuarios.get_by_id(token['user_id']) #obtiene el usuario para poder acceder a los metodos declarados en models.py en la seccion de
myteam = Team()
if myteam.team_m(request, user.empresa_key)==0:#llama a la funcion declarada en models.py en la seccion de USUARIOS
codigo=1
else:
codigo=-3
#la funcion josue_m puede actualizar e insertar
#depende de la ENTRADA de este endpoint method
message = CodeMessage(code=codigo, message='Su r.h. se ha sido registrado exitosamente')
except jwt.DecodeError:
message = CodeMessage(code=-2, message='Invalid token')
except jwt.ExpiredSignatureError:
message = CodeMessage(code=-1, message='Token expired')
return message
# update
# ENTRADA SALIDA RUTA siempre es POST NOMBRE
@endpoints.method(TeamUpdate, CodeMessage, path='team/update', http_method='POST', name='team.update')
#siempre lleva cls y request
def team_update(cls, request):
try:
token = jwt.decode(request.token, 'secret')#CHECA EL TOKEN
user = Usuarios.get_by_id(token['user_id'])#obtiene el usuario para poder acceder a los metodos declarados en models.py en la seccion de USUARIOS
empresakey = ndb.Key(urlsafe=user.empresa_key.urlsafe())#convierte el string dado a entityKey
myteam = Team()
if myteam.team_m(request, empresakey)==0:#llama a la funcion declarada en models.py en la seccion de USUARIOS
codigo=1
else:
codigo=-3
#la funcion josue_m puede actualizar e insertar
#depende de la ENTRADA de este endpoint method
message = CodeMessage(code=1, message='Sus cambios han sido guardados exitosamente')
except jwt.DecodeError:
message = CodeMessage(code=-2, message='Invalid token')
except jwt.ExpiredSignatureError:
message = CodeMessage(code=-1, message='Token expired')
return message
###########################
#### Factura
###########################
@endpoints.api(name='factura_api', version='v1', description='factura REST API')
class FacturaApi(remote.Service):
# get one
# ENTRADA SALIDA RUTA siempre es POST NOMBRE
@endpoints.method(TokenKey, FacturaList, path='factura/get', http_method='POST', name='factura.get')
#siempre lleva cls y request
def factura_get(cls, request):
try:
token = jwt.decode(request.tokenint, 'secret')#CHECA EL TOKEN
#Obtiene el elemento dado el entityKey
facturaentity = ndb.Key(urlsafe=request.entityKey)
#CREA LA SALIDA de tipo JosueInput y le asigna los valores, es a como se declaro en el messages.py
#josuentity.get().empresa_key.urlsafe() para poder optener el EntityKey
message = FacturaList(code=1, data=[FacturaUpdate(token='Succesfully get',
entityKey=facturaentity.get().entityKey,
#empresa_key=teamentity.get().empresa_key.urlsafe(),
tipoDePersona=facturaentity.get().tipoDePersona,
nombre=facturaentity.get().nombre,
idTicket = facturaentity.get().idTicket,
rfc = facturaentity.get().rfc,
pais = facturaentity.get().pais,
estado = facturaentity.get().estado,
municipio = facturaentity.get().municipio,
colonia = facturaentity.get().colonia,
cp = facturaentity.get().cp,
calle = facturaentity.get().calle,
numExt = facturaentity.get().numExt,
numInt = facturaentity.get().numInt,
email = facturaentity.get().email,
numFolio = facturaentity.get().numFolio,
fecha = facturaentity.get().fecha
)])
except jwt.DecodeError:
message = FacturaList(code=-1, data=[])
except jwt.ExpiredSignatureError:
message = FacturaList(code=-2, data=[])
return message
# delete
# ENTRADA SALIDA RUTA siempre es POST NOMBRE
@endpoints.method(TokenKey, CodeMessage, path='factura/delete', http_method='POST', name='factura.delete')
#siempre lleva cls y request
def factura_remove(cls, request):
try:
token = jwt.decode(request.tokenint, 'secret')#CHECA EL TOKEN
facturaentity = ndb.Key(urlsafe=request.entityKey)#Obtiene el elemento dado el EntitKey
facturaentity.delete()#BORRA
message = CodeMessage(code=0, message='Se ha eliminado el r.h.')
except jwt.DecodeError:
message = CodeMessage(code=-2, message='Invalid token')
except jwt.ExpiredSignatureError:
message = CodeMessage(code=-1, message='Token expired')
return message
# list
# ENTRADA SALIDA RUTA siempre es POST NOMBRE
@endpoints.method(Token, FacturaList, path='factura/list', http_method='POST', name='factura.list')
#siempre lleva cls y request
def factura_list(cls, request):
try:
token = jwt.decode(request.tokenint, 'secret')#CHECA EL TOKEN
user = Usuarios.get_by_id(token['user_id']) #obtiene usuario dado el token
lista = [] #crea lista para guardar contenido de la BD
lstMessage = FacturaList(code=1) #CREA el mensaje de salida
lstBd = Factura.query().fetch() #obtiene de la base de datos
for i in lstBd: #recorre la base de datos
#inserta a la lista creada con los elementos que se necesiten de la base de datos
#i.empresa_key.urlsafe() obtiene el entityKey
lista.append(FacturaUpdate(token='',
entityKey=i.entityKey,
#empresa_key=i.empresa_key.urlsafe(),
tipoDePersona=i.tipoDePersona,
nombre=i.nombre,
idTicket = i.idTicket,
rfc = i.rfc,
pais = i.pais,
estado = i.estado,
municipio = i.municipio,
colonia = i.colonia,
cp = i.cp,
calle = i.calle,
numExt = i.numExt,
numInt = i.numInt,
email = i.email,
numFolio = i.numFolio,
fecha = i.fecha
))
lstMessage.data = lista #ASIGNA a la salida la lista
message = lstMessage
except jwt.DecodeError:
message = FacturaList(code=-1, data=[])
except jwt.ExpiredSignatureError:
message = FacturaList(code=-2, data=[])
return message
# insert
# ENTRADA SALIDA RUTA siempre es POST NOMBRE
@endpoints.method(FacturaInput, CodeMessage, path='factura/insert', http_method='POST', name='factura.insert')
#siempre lleva cls y request
def factura_add(cls, request):
try:
token = jwt.decode(request.token, 'secret')#CHECA EL TOKEN
user = Usuarios.get_by_id(token['user_id']) #obtiene el usuario para poder acceder a los metodos declarados en models.py en la seccion de
myfactura = Factura()
if myfactura.factura_m(request, user.empresa_key)==0:#llama a la funcion declarada en models.py en la seccion de USUARIOS
codigo=1
else:
codigo=-3
#la funcion josue_m puede actualizar e insertar
#depende de la ENTRADA de este endpoint method
message = CodeMessage(code=codigo, message='Factura registrada con exito')
except jwt.DecodeError:
message = CodeMessage(code=-2, message='Invalid token')
except jwt.ExpiredSignatureError:
message = CodeMessage(code=-1, message='Token expired')
return message
# update
# ENTRADA SALIDA RUTA siempre es POST NOMBRE
@endpoints.method(FacturaUpdate, CodeMessage, path='factura/update', http_method='POST', name='factura.update')
#siempre lleva cls y request
def factura_update(cls, request):
try:
token = jwt.decode(request.token, 'secret')#CHECA EL TOKEN
user = Usuarios.get_by_id(token['user_id'])#obtiene el usuario para poder acceder a los metodos declarados en models.py en la seccion de USUARIOS
empresakey = ndb.Key(urlsafe=user.empresa_key.urlsafe())#convierte el string dado a entityKey
myfactura = Factura()
if myfactura.factura_m(request, empresakey)==0:#llama a la funcion declarada en models.py en la seccion de USUARIOS
codigo=1
else:
codigo=-3
#la funcion josue_m puede actualizar e insertar
#depende de la ENTRADA de este endpoint method
message = CodeMessage(code=1, message='Sus cambios han sido guardados exitosamente')
except jwt.DecodeError:
message = CodeMessage(code=-2, message='Invalid token')
except jwt.ExpiredSignatureError:
message = CodeMessage(code=-1, message='Token expired')
return message
###########################
#### Tickets
###########################
@endpoints.api(name='ticket_api', version='v1', description='ticket REST API')
class TicketApi(remote.Service):
# get one
# ENTRADA SALIDA RUTA siempre es POST NOMBRE
@endpoints.method(TokenKey, TicketList, path='ticket/get', http_method='POST', name='ticket.get')
#siempre lleva cls y request
def ticket_get(cls, request):
try:
token = jwt.decode(request.tokenint, 'secret')#CHECA EL TOKEN
#Obtiene el elemento dado el entityKey
ticketentity = ndb.Key(urlsafe=request.entityKey)
#CREA LA SALIDA de tipo JosueInput y le asigna los valores, es a como se declaro en el messages.py
#josuentity.get().empresa_key.urlsafe() para poder optener el EntityKey
message = TicketList(code=1, data=[TicketUpdate(token='Succesfully get',
entityKey=ticketentity.get().entityKey,
#empresa_key=teamentity.get().empresa_key.urlsafe(),
folio=ticketentity.get().folio,
fecha=ticketentity.get().fecha,
total=ticketentity.get().total,
items=ticketentity.get().items,
qty=ticketentity.get().qty,
facturado=ticketentity.get().facturado
)])
except jwt.DecodeError:
message = TicketList(code=-1, data=[])
except jwt.ExpiredSignatureError:
message = TicketList(code=-2, data=[])
return message
# delete
# ENTRADA SALIDA RUTA siempre es POST NOMBRE
@endpoints.method(TokenKey, CodeMessage, path='ticket/delete', http_method='POST', name='ticket.delete')
#siempre lleva cls y request
def ticket_remove(cls, request):
try:
token = jwt.decode(request.tokenint, 'secret')#CHECA EL TOKEN
ticketentity = ndb.Key(urlsafe=request.entityKey)#Obtiene el elemento dado el EntitKey
ticketentity.delete()#BORRA
message = CodeMessage(code=0, message='Se ha eliminado el r.h.')
except jwt.DecodeError:
message = CodeMessage(code=-2, message='Invalid token')
except jwt.ExpiredSignatureError:
message = CodeMessage(code=-1, message='Token expired')
return message
# list
# ENTRADA SALIDA RUTA siempre es POST NOMBRE
@endpoints.method(Token, TicketList, path='ticket/list', http_method='POST', name='ticket.list')
#siempre lleva cls y request
def ticket_list(cls, request):
try:
token = jwt.decode(request.tokenint, 'secret')#CHECA EL TOKEN
user = Usuarios.get_by_id(token['user_id']) #obtiene usuario dado el token
lista = [] #crea lista para guardar contenido de la BD
lstMessage = TicketList(code=1) #CREA el mensaje de salida
lstBd = Ticket.query().fetch() #obtiene de la base de datos
for i in lstBd: #recorre la base de datos
#inserta a la lista creada con los elementos que se necesiten de la base de datos
#i.empresa_key.urlsafe() obtiene el entityKey
lista.append(TicketUpdate(token='',
entityKey=i.entityKey,
#empresa_key=i.empresa_key.urlsafe(),
folio=i.folio,
fecha=i.fecha,
total=i.total,
items=i.items,
qty=i.qty,
facturado=i.facturado
))
lstMessage.data = lista #ASIGNA a la salida la lista
message = lstMessage
except jwt.DecodeError:
message = TicketList(code=-1, data=[])
except jwt.ExpiredSignatureError:
message = TicketList(code=-2, data=[])
return message
# insert
# ENTRADA SALIDA RUTA siempre es POST NOMBRE
@endpoints.method(TicketInput, CodeMessage, path='ticket/insert', http_method='POST', name='ticket.insert')
#siempre lleva cls y request
def ticket_add(cls, request):
try:
token = jwt.decode(request.token, 'secret')#CHECA EL TOKEN
user = Usuarios.get_by_id(token['user_id']) #obtiene el usuario para poder acceder a los metodos declarados en models.py en la seccion de
myticket = Ticket()
if myticket.ticket_m(request, user.empresa_key)==0:#llama a la funcion declarada en models.py en la seccion de USUARIOS
codigo=1
else:
codigo=-3
#la funcion josue_m puede actualizar e insertar
#depende de la ENTRADA de este endpoint method
message = CodeMessage(code=codigo, message='Ticket registrado')
except jwt.DecodeError:
message = CodeMessage(code=-2, message='Invalid token')
except jwt.ExpiredSignatureError:
message = CodeMessage(code=-1, message='Token expired')
return message
# update
# ENTRADA SALIDA RUTA siempre es POST NOMBRE
@endpoints.method(TicketUpdate, CodeMessage, path='ticket/update', http_method='POST', name='ticket.update')
#siempre lleva cls y request
def ticket_update(cls, request):
try:
token = jwt.decode(request.token, 'secret')#CHECA EL TOKEN
user = Usuarios.get_by_id(token['user_id'])#obtiene el usuario para poder acceder a los metodos declarados en models.py en la seccion de USUARIOS
empresakey = ndb.Key(urlsafe=user.empresa_key.urlsafe())#convierte el string dado a entityKey
myticket = Ticket()
if myticket.ticket_m(request, empresakey)==0:#llama a la funcion declarada en models.py en la seccion de USUARIOS
codigo=1
else:
codigo=-3
#la funcion josue_m puede actualizar e insertar
#depende de la ENTRADA de este endpoint method
message = CodeMessage(code=1, message='Sus cambios han sido guardados exitosamente')
except jwt.DecodeError:
message = CodeMessage(code=-2, message='Invalid token')
except jwt.ExpiredSignatureError:
message = CodeMessage(code=-1, message='Token expired')
return message
application = endpoints.api_server([UsuariosApi, EmpresasApi, TeamApi, FacturaApi, TicketApi], restricted=False)
| [
"[email protected]"
] | |
cce41f4aca21801834da31dc392b6b100c9a480b | 3fc5b3783f0cfe5f4a13959ccef2b5ad916ecaae | /CRN_Pascal_Pretrain/utils/params.py | f8e914e232a9481f89b62f22f3836b8d2c51ca09 | [] | no_license | feinanshan/Motion-Guided-CRN | 4ba0f4cc06ced6b6968f24aa5e71912560612fef | 69c705ee3e1008ca373854172234ac765af7cfe9 | refs/heads/master | 2021-04-12T11:08:16.236573 | 2020-03-24T02:58:38 | 2020-03-24T02:58:38 | 126,672,764 | 32 | 2 | null | null | null | null | UTF-8 | Python | false | false | 1,576 | py | import ConfigParser
import numpy as np
from ast import literal_eval
class Params(object):
def __init__(self,config_file='./config.ini',for_test=False):
parser = ConfigParser.ConfigParser()
parser.read(config_file)
self.gpu_nums = 1
# model
self.network = parser.get("model", "network")
self.backbone = parser.get("model", "backbone")
self.prefix = parser.get("model","network")
self.pretrained = parser.getboolean("model","pretrained")
self.frame_num = parser.getint("model","frame_num")
self.img_size = parser.getint("model","img_size")
# epoch
self.begin_epoch = parser.getint("epoch", "begin_epoch")
self.end_epoch = parser.getint("epoch", "end_epoch")
self.frequence = parser.getint("epoch", "frequence")
# iterator
itr_set = "iterator_test" if for_test else 'iterator_train'
self.batch_size = parser.getint(itr_set, "batch_size")
self.num_thread = parser.getint(itr_set, "num_thread")
self.gt_path = parser.get(itr_set, "gt_path")
self.img_path = parser.get(itr_set, "img_path")
self.list_path = parser.get(itr_set, "list_path")
self.data_aug = parser.getboolean(itr_set, "data_aug")
self.use_global_stats = parser.getboolean(itr_set, "use_global_stats")
self.updateIter = parser.getint(itr_set, "updateIter")
# optimizer
self.optimizer = parser.get("optimizer", "name")
self.learning_rate = parser.getfloat("optimizer", "learning_rate")
self.wd = parser.getfloat("optimizer", "wd")
self.momentum = parser.getfloat("optimizer", "momentum")
# misc
self.description = parser.get("misc", "description")
| [
"[email protected]"
] | |
a665f4e9d1535345865d92f2914bbc0053c2cfc2 | fda6735097ae1d0ef548b5789eb22398ce550d5e | /Learn_TensorFlow_2.py | f595a1b68655adc5a4da3c720109fe8d26270516 | [] | no_license | ll2088/zyy_ML-DL | fa618d7eba4ca4753f450b0d70bc2178b9c6e5fc | 5f96bb89433d62657806b7c8182cb1f80b02f772 | refs/heads/master | 2020-04-26T18:20:38.385870 | 2017-07-14T10:03:13 | 2017-07-14T10:03:13 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,002 | py | # author zyyFTD
# Github: https://github.com/YuyangZhangFTD/zyy_ML-DL
"""
this code is for python3
"""
import tensorflow as tf # import tensorflow
c = tf.constant(1.5) # creat a constant
x = tf.Variable(1.0, name="x") # creat a variable
add_op = tf.add(x, c) # creat add operation
assign_op = tf.assign(x, add_op)# creat assign operation
init = tf.global_variables_initializer() # init variables *
sess = tf.Session() # get session object
sess.run(init) # run session
print(sess.run(x)) # should print 1.0
print(sess.run(add_op)) # should print 2.5(x+c=2.5)
print(sess.run(x)) # should print 1.0(x has not been assigned)
print(sess.run(assign_op)) # should print 2.5
print(sess.run(x)) # should print 2.5(x has been assigned)
print(sess.run(x)) # should print out 2.5
sess.close() # close session
| [
"[email protected]"
] | |
50ed3cd85600e58486c075ad4959cb463d45996b | 57bb0379d20ffdc5fd9f4ca5b71246c1d7efdf5e | /models.py | 2140a5545279cd51f447c5684b94e0a4c9e60ba2 | [
"Apache-2.0"
] | permissive | albertounivr/deep-spatial-join | f042ec1b9abe625cc8abf8d52fa37877f328f442 | fd773054324e8a70a640811371d8f8268b720258 | refs/heads/main | 2023-02-22T13:20:41.165410 | 2021-01-23T20:39:47 | 2021-01-23T20:39:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,661 | py | from keras.models import Sequential, load_model
from keras.layers.normalization import BatchNormalization
from keras.layers.convolutional import Conv2D
from keras.layers.convolutional import MaxPooling2D
from keras.layers.core import Activation
from keras.layers.core import Dropout
from keras.layers.core import Dense
from keras.layers import concatenate
from keras.layers import Flatten
from keras.layers import Input
from keras.models import Model
from keras.optimizers import Adam
import pandas as pd
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.metrics import r2_score
import datasets
def create_mlp(dim, regress=False):
# define our MLP network
model = Sequential()
model.add(Dense(4, input_dim=dim, activation="relu"))
model.add(Dense(2, activation="relu"))
# check to see if the regression node should be added
if regress:
model.add(Dense(1, activation="linear"))
# return our model
return model
def create_cnn(width, height, depth, filters=(4, 8, 16), regress=False):
# initialize the input shape and channel dimension, assuming
# TensorFlow/channels-last ordering
input_shape = (height, width, depth)
chan_dim = -1
# define the model input
inputs = Input(shape=input_shape)
# loop over the number of filters
for (i, f) in enumerate(filters):
# if this is the first CONV layer then set the input
# appropriately
if i == 0:
x = inputs
# CONV => RELU => BN => POOL
x = Conv2D(f, (3, 3), padding="same")(x)
x = Activation("relu")(x)
x = BatchNormalization(axis=chan_dim)(x)
x = MaxPooling2D(pool_size=(2, 2))(x)
# flatten the volume, then FC => RELU => BN => DROPOUT
x = Flatten()(x)
x = Dense(8)(x)
x = Activation("relu")(x)
x = BatchNormalization(axis=chan_dim)(x)
x = Dropout(0.5)(x)
# apply another FC layer, this one to match the number of nodes
# coming out of the MLP
x = Dense(4)(x)
x = Activation("relu")(x)
# check to see if the regression node should be added
if regress:
x = Dense(1, activation="linear")(x)
# construct the CNN
model = Model(inputs, x)
# return the CNN
return model
def run(tabular_path, histogram_path, join_result_path, model_path, model_weights_path, is_train=True):
print ('Training the join cardinality estimator')
print ('Tabular data: {}'.format(tabular_path))
print ('Histogram path: {}'.format(histogram_path))
print ('Join result data: {}'.format(join_result_path))
target = 'join_selectivity'
num_rows, num_columns = 16, 16
tabular_features_df = datasets.load_datasets_feature(tabular_path)
join_data, ds1_histograms, ds2_histograms, ds_all_histogram, ds_bops_histogram = datasets.load_join_data(
tabular_features_df, join_result_path, histogram_path, num_rows, num_columns)
num_features = len(join_data.columns) - 10
if is_train:
train_attributes, test_attributes, ds1_histograms_train, ds1_histograms_test, ds2_histograms_train, ds2_histograms_test, ds_all_histogram_train, ds_all_histogram_test, ds_bops_histogram_train, ds_bops_histogram_test = train_test_split(
join_data, ds1_histograms, ds2_histograms, ds_all_histogram, ds_bops_histogram, test_size=0.20,
random_state=42)
X_train = pd.DataFrame.to_numpy(train_attributes[[i for i in range(num_features)]])
X_test = pd.DataFrame.to_numpy(test_attributes[[i for i in range(num_features)]])
y_train = train_attributes[target]
y_test = test_attributes[target]
else:
X_test = pd.DataFrame.to_numpy(join_data[[i for i in range(num_features)]])
y_test = join_data[target]
ds_bops_histogram_test = ds_bops_histogram
mlp = create_mlp(X_test.shape[1], regress=False)
cnn1 = create_cnn(num_rows, num_columns, 1, regress=False)
# cnn2 = models.create_cnn(num_rows, num_columns, 1, regress=False)
# cnn3 = models.create_cnn(num_rows, num_columns, 1, regress=False)
# combined_input = concatenate([mlp.output, cnn1.output, cnn2.output, cnn3.output])
combined_input = concatenate([mlp.output, cnn1.output])
x = Dense(4, activation="relu")(combined_input)
x = Dense(1, activation="linear")(x)
# model = Model(inputs=[mlp.input, cnn1.input, cnn2.input, cnn3.input], outputs=x)
model = Model(inputs=[mlp.input, cnn1.input], outputs=x)
EPOCHS = 40
LR = 1e-2
opt = Adam(lr=LR, decay=LR / EPOCHS)
model.compile(loss="mean_absolute_percentage_error", optimizer=opt)
if is_train:
print ('Training the model')
model.fit(
[X_train, ds_bops_histogram_train], y_train,
validation_data=([X_test, ds_bops_histogram_test], y_test),
epochs=EPOCHS, batch_size=256)
model.save(model_path)
model.save_weights(model_weights_path)
else:
print ('Loading the saved model and model weights')
model = load_model(model_path)
model.load_weights(model_weights_path)
print ('Testing')
y_pred = model.predict([X_test, ds_bops_histogram_test])
print ('r2 score: {}'.format(r2_score(y_test, y_pred)))
diff = y_pred.flatten() - y_test
percent_diff = (diff / y_test)
abs_percent_diff = np.abs(percent_diff)
# Compute the mean and standard deviation of the absolute percentage difference
mean = np.mean(abs_percent_diff)
std = np.std(abs_percent_diff)
# NOTICE: mean is the MAPE value, which is the target we want to minimize
print ('mean = {}, std = {}'.format(mean, std))
| [
"[email protected]"
] | |
6d90a0ac27da6ecb3acd52f2b6882b65fcd0ebfe | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/otherforms/_whiles.py | bad0312c2f1f431ec0236df694d9d915d4795784 | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 218 | py |
#calss header
class _WHILES():
def __init__(self,):
self.name = "WHILES"
self.definitions = while
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.basic = ['while']
| [
"[email protected]"
] | |
412c804b61fcad10d8f77cc05838e2bcba100656 | 3808e4f5c40e71d1e2e559014eaab50a8f4113cd | /api/models.py | bfda4c13d07a292f3ed5d5be09eebe50ebfcb565 | [] | no_license | Sam17SJ/apiDjango | 8488c77aad9211278d1af3c345b8087b373ba386 | 5c921d0c2b3df6a81b4f08f6a0872475fc4c1e6f | refs/heads/master | 2021-04-29T18:02:05.344677 | 2018-02-15T21:33:23 | 2018-02-15T21:33:23 | 121,684,984 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,905 | py | from django.db import models
from django.contrib.auth.models import User
# para la aplicacion de cuentas
from django.conf import settings
from django.db.models.signals import post_save
from django.dispatch import receiver
from rest_framework.authtoken.models import Token
from django.contrib.postgres.fields import ArrayField
# termina para aplicacion de cuentas
@receiver(post_save, sender=settings.AUTH_USER_MODEL)
def create_auth_token(sender, instance=None, created=False, **kwargs):
if created:
Token.objects.create(user=instance)
# Create your models here.
class Estrella(models.Model):
"""docstring for Estrella"""
rankin= models.IntegerField()
def __str__(self):
return str(self.rankin)
# class Imagen(models.Model):
# """docstring for Imagenes"""
# alto= models.IntegerField()
# ancho= models.IntegerField()
# def __init__(self, arg):
# super (Imagenes, self).__init__()
# self.arg = arg
class UsuarioGratuito(models.Model):
"""docstring for UsuarioGratuito"""
nombre= models.CharField(max_length=25)
numTelefonico = models.IntegerField()
idUser = models.ForeignKey(User)
def __str__(self):
return str(self.nombre)
class Broker(models.Model):
"""docstring for Broker"""
nombre= models.CharField(max_length=25)
estrella = models.ForeignKey(Estrella)
descripcion = models.CharField(max_length=250)
numTelefonico = models.IntegerField()
def __str__(self):
return str(self.nombre)
# class Corporacion(models.Model):
# nombre= models.CharField(max_length=25)
# estrella = models.ForeignKey(Estrella)
# descripcion = models.CharField(max_length=250)
# numTelefonico = models.IntegerField()
# """docstring for Corporacion"""
# def __init__(self, arg):
# super(Corporacion, self).__init__()
# self.arg = arg
# def user_directory_path(instance, filename):
# # file will be uploaded to MEDIA_ROOT/user_<id>/<filename>
# return 'uploads/user_{0}/{1}'.format(instance.id, filename)
class Propiedad(models.Model):
titulo = models.CharField(max_length=35)
# descripcio= models.CharField(max_length=250, blank=True)
# ubicacion = models.ForeignKey(Ubicacion)
precio = models.FloatField()
# propietario = models.ForeignKey(UsuarioGratuito, blank=True)
imagen = ArrayField(models.CharField(max_length=250, blank=True),size=10)
def __str__(self):
return str(self.titulo)
class Image(models.Model):
img = models.ImageField(upload_to='uploads/{0}'.format("%d-%m-%y/%H_%M_%S"), default='uploads/f1.png')
def __str__(self):
return str(self.img)
class Contrato(models.Model):
estado = models.IntegerField()
fechaInicio= models.DateField()
acuerdo = models.IntegerField()
broker = models.OneToOneField(Broker)
propiedad = models.OneToOneField(Propiedad)
"""docstring for Contrato"""
def __str__(self):
return str(self.acuerdo)
| [
"[email protected]"
] | |
758eb2b29442850b638c5036cabcffb5d46abe6c | 0a78bfd9f0c6390cf86c39a03010000afc9864eb | /feapp/data/__all_models.py | 24ed4ed84216e6ece3aa4ab2b75bc263e891dbe3 | [] | no_license | BlueRaccoonTech/glaceon-live | b1601a647adf96e7bdd2b0f66ced70a5b3ac96ea | 78d7ccebdfbbaefc10a7f8cfdbcd2bbed0266ec8 | refs/heads/master | 2023-06-30T22:12:45.625830 | 2021-08-07T18:33:34 | 2021-08-07T18:33:34 | 262,346,822 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 345 | py | # Add all your SQLAlchemy models here.
# This allows us to import just this file when
# we need to preload the models and ensure they
# are all loaded.
# noinspection PyUnresolvedReferences
import feapp.data.users
# noinspection PyUnresolvedReferences
import feapp.data.userData
# noinspection PyUnresolvedReferences
import feapp.data.userKeys
| [
"[email protected]"
] | |
b8101c53003873718f0b8e17c85360c68a645746 | f817ce9e7a52caf0363e8dfb8c47d2700fa627e2 | /PythonApplication1/module3.py | b3a965bc2fa848d6561f47361bec26e78a3d6b90 | [] | no_license | myweblab/python-playground | 86ce204b03b512fdc8ba37acc648d79c2e0341c0 | 7281eaa230e823154c9b7967b6f075c8c9ecef62 | refs/heads/master | 2021-07-10T23:06:30.860366 | 2020-07-25T16:27:53 | 2020-07-25T16:27:53 | 179,026,780 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 517 | py | from threading import Thread
import time
def timer(name,delay,repeat):
print("timer : " + name + " Started")
while repeat > 0:
time.sleep(delay)
print(name+ " : " + str(time.ctime()))
repeat=-1
print("Timer {0} Completed".format(name))
t1 = Thread(target=timer, args=("Timer1",10,100))
t2 = Thread(target=timer, args=("Timer2",20,150))
t3 = Thread(target=timer, args=("Timer3",10,200))
t4= Thread(target=timer, args=("Timer4",20,250))
t1.start()
t2.start()
t3.start()
t4.start()
| [
"[email protected]"
] | |
38e1ac549e2c738598e7f8b8a5db19f186b0d74b | fb124e51024917d6479fa626d9607ff10f7a3aba | /storm-control/storm_control/hal4000/qtWidgets/qtRecordButton.py | 8a9355923cc8f4b86fc922976debc0cd0dfdcb7d | [
"MIT"
] | permissive | BehnamAbaie/storm-control | 054bd7bbd903ed9635e4d1121c30544f58473c4f | 0c686321142eccad62ce3365eae22c3b69229b0d | refs/heads/main | 2023-06-18T08:04:01.108874 | 2021-07-14T00:51:15 | 2021-07-14T00:51:15 | 342,049,487 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,391 | py | #!/usr/bin/env python
"""
QPushButton specialized to be a record button.
Hazen 4/17
"""
from PyQt5 import QtWidgets
import storm_control.hal4000.film.filmRequest as filmRequest
import storm_control.hal4000.halLib.halMessage as halMessage
class QtRecordButton(QtWidgets.QPushButton):
def __init__(self, parent = None, **kwds):
kwds["parent"] = parent
super().__init__(**kwds)
self.filming = False
def getHalMessage(self):
if self.filming:
return halMessage.HalMessage(source = self,
m_type = "stop film request")
else:
return halMessage.HalMessage(source = self,
m_type = "start film request",
data = {"request" : filmRequest.FilmRequest()})
def startFilm(self, film_settings):
self.setText("Stop")
self.setEnabled(not film_settings.isTCPRequest())
if film_settings.isSaved():
self.setStyleSheet("QPushButton { color: red }")
else:
self.setStyleSheet("QPushButton { color: orange }")
self.filming = True
def stopFilm(self):
self.setEnabled(True)
self.setText("Record")
self.setStyleSheet("QPushButton { color: black }")
self.filming = False
| [
"[email protected]"
] | |
8c95e3772bbb9784cb8138d5752fbd7bbd395038 | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_017/ch16_2020_10_05_20_15_18_245128.py | d2bd590ba92f6842d0715be3eeff220f31237ae8 | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 122 | py | x= float((input("Qual o valor da conta? "))
valor =(1-0.01)*x
print("Valor da conta com 10%: R$ {0: .2f}".format(valor)) | [
"[email protected]"
] | |
7b92ab44783deefca94f9bbd15a2e1ab10d82f64 | 48e124e97cc776feb0ad6d17b9ef1dfa24e2e474 | /sdk/python/pulumi_azure_native/network/v20201101/private_dns_zone_group.py | 78e2a5b2cd6fea5f4afb1338719284a7a073f414 | [
"BSD-3-Clause",
"Apache-2.0"
] | permissive | bpkgoud/pulumi-azure-native | 0817502630062efbc35134410c4a784b61a4736d | a3215fe1b87fba69294f248017b1591767c2b96c | refs/heads/master | 2023-08-29T22:39:49.984212 | 2021-11-15T12:43:41 | 2021-11-15T12:43:41 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,488 | py | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
from ._inputs import *
__all__ = ['PrivateDnsZoneGroupArgs', 'PrivateDnsZoneGroup']
@pulumi.input_type
class PrivateDnsZoneGroupArgs:
def __init__(__self__, *,
private_endpoint_name: pulumi.Input[str],
resource_group_name: pulumi.Input[str],
id: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
private_dns_zone_configs: Optional[pulumi.Input[Sequence[pulumi.Input['PrivateDnsZoneConfigArgs']]]] = None,
private_dns_zone_group_name: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a PrivateDnsZoneGroup resource.
:param pulumi.Input[str] private_endpoint_name: The name of the private endpoint.
:param pulumi.Input[str] resource_group_name: The name of the resource group.
:param pulumi.Input[str] id: Resource ID.
:param pulumi.Input[str] name: Name of the resource that is unique within a resource group. This name can be used to access the resource.
:param pulumi.Input[Sequence[pulumi.Input['PrivateDnsZoneConfigArgs']]] private_dns_zone_configs: A collection of private dns zone configurations of the private dns zone group.
:param pulumi.Input[str] private_dns_zone_group_name: The name of the private dns zone group.
"""
pulumi.set(__self__, "private_endpoint_name", private_endpoint_name)
pulumi.set(__self__, "resource_group_name", resource_group_name)
if id is not None:
pulumi.set(__self__, "id", id)
if name is not None:
pulumi.set(__self__, "name", name)
if private_dns_zone_configs is not None:
pulumi.set(__self__, "private_dns_zone_configs", private_dns_zone_configs)
if private_dns_zone_group_name is not None:
pulumi.set(__self__, "private_dns_zone_group_name", private_dns_zone_group_name)
@property
@pulumi.getter(name="privateEndpointName")
def private_endpoint_name(self) -> pulumi.Input[str]:
"""
The name of the private endpoint.
"""
return pulumi.get(self, "private_endpoint_name")
@private_endpoint_name.setter
def private_endpoint_name(self, value: pulumi.Input[str]):
pulumi.set(self, "private_endpoint_name", value)
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> pulumi.Input[str]:
"""
The name of the resource group.
"""
return pulumi.get(self, "resource_group_name")
@resource_group_name.setter
def resource_group_name(self, value: pulumi.Input[str]):
pulumi.set(self, "resource_group_name", value)
@property
@pulumi.getter
def id(self) -> Optional[pulumi.Input[str]]:
"""
Resource ID.
"""
return pulumi.get(self, "id")
@id.setter
def id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "id", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
Name of the resource that is unique within a resource group. This name can be used to access the resource.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="privateDnsZoneConfigs")
def private_dns_zone_configs(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['PrivateDnsZoneConfigArgs']]]]:
"""
A collection of private dns zone configurations of the private dns zone group.
"""
return pulumi.get(self, "private_dns_zone_configs")
@private_dns_zone_configs.setter
def private_dns_zone_configs(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['PrivateDnsZoneConfigArgs']]]]):
pulumi.set(self, "private_dns_zone_configs", value)
@property
@pulumi.getter(name="privateDnsZoneGroupName")
def private_dns_zone_group_name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the private dns zone group.
"""
return pulumi.get(self, "private_dns_zone_group_name")
@private_dns_zone_group_name.setter
def private_dns_zone_group_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "private_dns_zone_group_name", value)
class PrivateDnsZoneGroup(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
id: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
private_dns_zone_configs: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['PrivateDnsZoneConfigArgs']]]]] = None,
private_dns_zone_group_name: Optional[pulumi.Input[str]] = None,
private_endpoint_name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
Private dns zone group resource.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] id: Resource ID.
:param pulumi.Input[str] name: Name of the resource that is unique within a resource group. This name can be used to access the resource.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['PrivateDnsZoneConfigArgs']]]] private_dns_zone_configs: A collection of private dns zone configurations of the private dns zone group.
:param pulumi.Input[str] private_dns_zone_group_name: The name of the private dns zone group.
:param pulumi.Input[str] private_endpoint_name: The name of the private endpoint.
:param pulumi.Input[str] resource_group_name: The name of the resource group.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: PrivateDnsZoneGroupArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Private dns zone group resource.
:param str resource_name: The name of the resource.
:param PrivateDnsZoneGroupArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(PrivateDnsZoneGroupArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
id: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
private_dns_zone_configs: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['PrivateDnsZoneConfigArgs']]]]] = None,
private_dns_zone_group_name: Optional[pulumi.Input[str]] = None,
private_endpoint_name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = PrivateDnsZoneGroupArgs.__new__(PrivateDnsZoneGroupArgs)
__props__.__dict__["id"] = id
__props__.__dict__["name"] = name
__props__.__dict__["private_dns_zone_configs"] = private_dns_zone_configs
__props__.__dict__["private_dns_zone_group_name"] = private_dns_zone_group_name
if private_endpoint_name is None and not opts.urn:
raise TypeError("Missing required property 'private_endpoint_name'")
__props__.__dict__["private_endpoint_name"] = private_endpoint_name
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__.__dict__["resource_group_name"] = resource_group_name
__props__.__dict__["etag"] = None
__props__.__dict__["provisioning_state"] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-native:network:PrivateDnsZoneGroup"), pulumi.Alias(type_="azure-native:network/v20200301:PrivateDnsZoneGroup"), pulumi.Alias(type_="azure-native:network/v20200401:PrivateDnsZoneGroup"), pulumi.Alias(type_="azure-native:network/v20200501:PrivateDnsZoneGroup"), pulumi.Alias(type_="azure-native:network/v20200601:PrivateDnsZoneGroup"), pulumi.Alias(type_="azure-native:network/v20200701:PrivateDnsZoneGroup"), pulumi.Alias(type_="azure-native:network/v20200801:PrivateDnsZoneGroup"), pulumi.Alias(type_="azure-native:network/v20210201:PrivateDnsZoneGroup"), pulumi.Alias(type_="azure-native:network/v20210301:PrivateDnsZoneGroup"), pulumi.Alias(type_="azure-native:network/v20210501:PrivateDnsZoneGroup")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(PrivateDnsZoneGroup, __self__).__init__(
'azure-native:network/v20201101:PrivateDnsZoneGroup',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'PrivateDnsZoneGroup':
"""
Get an existing PrivateDnsZoneGroup resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = PrivateDnsZoneGroupArgs.__new__(PrivateDnsZoneGroupArgs)
__props__.__dict__["etag"] = None
__props__.__dict__["name"] = None
__props__.__dict__["private_dns_zone_configs"] = None
__props__.__dict__["provisioning_state"] = None
return PrivateDnsZoneGroup(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def etag(self) -> pulumi.Output[str]:
"""
A unique read-only string that changes whenever the resource is updated.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter
def name(self) -> pulumi.Output[Optional[str]]:
"""
Name of the resource that is unique within a resource group. This name can be used to access the resource.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="privateDnsZoneConfigs")
def private_dns_zone_configs(self) -> pulumi.Output[Optional[Sequence['outputs.PrivateDnsZoneConfigResponse']]]:
"""
A collection of private dns zone configurations of the private dns zone group.
"""
return pulumi.get(self, "private_dns_zone_configs")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> pulumi.Output[str]:
"""
The provisioning state of the private dns zone group resource.
"""
return pulumi.get(self, "provisioning_state")
| [
"[email protected]"
] | |
35b4d4fbde92b3460aca90708c121ce65a8d17d9 | 30d46864106e1d1b4e88c3552a29da8f2b8d48d3 | /otv/Transfert_Donnees/Params/Mensuel.py | 6ff49df5107b7132126d23b07b6aeff30d5f4054 | [] | no_license | nantodevison/otv | c69491f1ea68e537e9eabe1da6cec9dc8b5e35b6 | e591644d6c83945a8550bdc52292c67be9495e92 | refs/heads/master | 2023-05-12T01:54:54.528087 | 2023-04-26T12:24:55 | 2023-04-26T12:24:55 | 168,543,113 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 993 | py | # -*- coding: utf-8 -*-
'''
Created on 18 nov. 2021
@author: martin.schoreisz
'''
dico_mois={'janv':[1,'Janv','Janvier', 'janu'], 'fevr':[2,'Fév','Février','févr','Fevrier', 'febr'], 'mars':[3,'Mars','Mars', 'marc'], 'avri':[4,'Avril','Avril', 'apri' ], 'mai':[5,'Mai','Mai', 'may'], 'juin':[6,'Juin','Juin', 'june'],
'juil':[7,'Juill','Juillet', 'juil', 'july'], 'aout':[8,'Août','Aout', 'augu'], 'sept':[9,'Sept','Septembre', 'sept'], 'octo':[10,'Oct','Octobre', 'octo'], 'nove':[11,'Nov','Novembre', 'nove'], 'dece':[12,'Déc','Décembre','Decembre', 'dece']}
def renommerMois(df):
"""
dans une df mensuelle, renommer les mois pour coller aux cles du dico_mois
in :
df :dataframe contenant des references aux mois selon les valeurs attendus dans dico_mois
out :
nouvelle df avec les noms renommes
"""
return df.rename(columns={c : k for k, v in dico_mois.items() for c in df.columns if c in v})
| [
"[email protected]"
] | |
f8dd78549cbe1632023d0aaa45a680d04ff64af2 | 34a4231a53f25cf4ac760569a2e3287d822dc312 | /example/example_24.py | aa70721470842fe967b2e1aa262c677cb204c861 | [] | no_license | galaxy-tek/python_study | 22f74110e557a86da1e5e2cfc3914710c5b56eed | 75cb14d588cab7a50744dbc19a7d93ae4b628ffe | refs/heads/master | 2022-04-17T20:45:19.437550 | 2020-04-07T14:32:16 | 2020-04-07T14:32:16 | 104,577,457 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7 | py | ##
##
| [
"[email protected]"
] | |
5a62c1a7a86c023c06ef0508c2747dd67883cf13 | 167cc4c25f570d285b54302d65252cb278b234c8 | /project_0/KNN_code1.py | fd63bd12a43583d91e133f3668fd4cf8f73a5788 | [] | no_license | divanshu79/k-nearest-neighbor | 011106b0d91d14ff6b64ad1faa07603fa5e9719f | 9289f109aaec95ada6a426c0652f99000b031368 | refs/heads/master | 2021-05-12T18:43:45.066714 | 2018-01-11T08:45:08 | 2018-01-11T08:45:08 | 117,072,858 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 947 | py | import numpy as np
from sklearn import preprocessing, cross_validation, neighbors
import pandas as pd
import pickle
# accuracy = []
df = pd.read_csv('breast-cancer-dataseta.txt')
df.replace('?', -99999, inplace=True)
df.drop(['id'],1,inplace=True)
x = np.array(df.drop(['class'],1))
y = np.array(df['class'])
x_train,x_test,y_train,y_test = cross_validation.train_test_split(x,y,test_size=.2)
clf = neighbors.KNeighborsClassifier(n_jobs=-1)
clf.fit(x_train,y_train)
with open('KNN_code1.pickle', 'wb') as f:
pickle.dump(clf, f)
pickle_in = open('KNN_code1.pickle', 'rb')
clf = pickle.load(pickle_in)
accuracy = clf.score(x_test,y_test)
print(accuracy)
example_measure = np.array([[4,2,1,1,1,2,3,2,1],[8,9,10,7,1,4,3,2,2]])
example_measure = example_measure.reshape(len(example_measure),-1)
pridiction = clf.predict(example_measure)
# accuracy.append(accuracy)
# print(sum(accuracy)/len(accuracy))
| [
"[email protected]"
] | |
91b0f84bf061aa67f635b39ccd699dddb0873f50 | f20e87f2256203075a74fab01abfbb54e259d4b9 | /93-Arithmetic expressions.py | f08c8e9b8b15d318e542c98473837f2e8ed60f8e | [] | no_license | ekolik/-Python-Project_Euler | 8873b5268af067dee697d83ad0db3669b318a83a | 97df0443b3eeee5bcb385e495f2dc8fc1bf62691 | refs/heads/master | 2020-04-10T03:56:09.038426 | 2016-07-15T22:56:11 | 2016-07-15T22:56:11 | 30,852,067 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,557 | py | from operator import add, sub, mul, truediv
from itertools import permutations, product, combinations
mx = 0
for digitset in combinations(range(1, 10), 4):
res = set()
for item in permutations(digitset):
for oper in list(product([add, sub, mul, truediv], repeat=3)):
cand = oper[1](oper[0](item[0], item[1]), oper[2](item[2], item[3])) # (..)(..)
if cand % 1 == 0 and cand > 0:
res.add(int(cand))
cand = oper[2](oper[1](oper[0](item[0], item[1]), item[2]), item[3]) # ((..).).
if cand % 1 == 0 and cand > 0:
res.add(int(cand))
cand = oper[2](oper[0](item[0], oper[1](item[1], item[2])), item[3]) # (.(..)).
if cand % 1 == 0 and cand > 0:
res.add(int(cand))
if not(oper[0] == truediv and oper[2](oper[1](item[1], item[2]), item[3]) == 0):
cand = oper[0](item[0], oper[2](oper[1](item[1], item[2]), item[3])) # .((..).)
if cand % 1 == 0 and cand > 0:
res.add(int(cand))
if not(oper[0] == truediv and oper[1](item[1], oper[2](item[2], item[3])) == 0):
cand = oper[0](item[0], oper[1](item[1], oper[2](item[2], item[3]))) # .(.(..))
if cand % 1 == 0 and cand > 0:
res.add(int(cand))
for i in range(1, max(res)):
if i not in res:
cur_mx = i-1
break
if cur_mx > mx:
mx = cur_mx
ans = item
print(''.join(str(i) for i in sorted(ans))) | [
"[email protected]"
] | |
b528ff65f964dac95f78ba32df4088d8dc634103 | 0c358afdb06667607fb93a5b1c022328d6704b69 | /Lab2/main.py | 12daf2296b633bba67fa45950ac914387754fa5f | [] | no_license | i1red/db-5th-sem | b5ca0095d037da15c1919ec2abb534268ad06760 | 817a4b1323b56b7a6a69bc1380d4d6e5257f8f9f | refs/heads/master | 2023-01-29T00:53:24.642769 | 2020-12-15T10:22:53 | 2020-12-15T10:22:53 | 300,396,714 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,211 | py | from fastapi import FastAPI
import models
import schemas
from router_utils import create_router, create_one_to_one_router, create_one_to_many_router, create_many_to_many_router
app = FastAPI(title='Combat Sports API')
app.include_router(
create_router(
'promotions', models.Promotion, schemas.PromotionIn, schemas.PromotionOut
)
)
app.include_router(
create_one_to_one_router(
'promotions', 'promotion_president', 'promotion_presidents', models.PromotionPresident, 'promotion_id',
schemas.PromotionPresidentIn, schemas.PromotionPresidentOut
)
)
app.include_router(
create_router(
'fighters', models.Fighter, schemas.FighterIn, schemas.FighterOut
)
)
app.include_router(
create_one_to_many_router(
'promotions', 'weight_classes', models.WeightClass, 'promotion_id',
schemas.WeightClassIn, schemas.WeightClassOut
)
)
app.include_router(
create_many_to_many_router(
'weight_classes', 'fighters', models.WeightClass, models.Fighter, models.FighterWeightClass,
'weight_class_id', 'fighter_id', schemas.WeightClassOut, schemas.FighterOut,
schemas.FighterWeightClassIn, schemas.FighterWeightClassOut
)
) | [
"[email protected]"
] | |
559a5b83c5f1a5efa4675793fbc5f8a9f6a17a33 | 5b51418652ffad300585463941b4ea15775daf69 | /SimpleWay/Levels_Setup.py | 59657ceb8eac0edd0752daea06701ba911412fe0 | [] | no_license | bguernouti/LevelSystem | b7194a93674cb2858ad413d69afd97c8ac177d19 | 2db75c6b985f197f0525b5f222f14beacd3fdf13 | refs/heads/master | 2020-12-23T09:17:31.798737 | 2020-02-02T11:54:09 | 2020-02-02T11:54:09 | 237,108,837 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 211 | py | class Level_List:
_Levels = {
1: 1000,
2: 1500,
3: 2000,
4: 2500,
5: 3000,
6: 3500,
7: 4000,
8: 4500,
9: 5000,
10: 6000,
} | [
"[email protected]"
] | |
def1d4490f9cf636707e654d2f964e7e91d57510 | 78a6163d421d95b5c07bf6d3ee979122cc97e809 | /config-tool-ice/ytproperty.py | b514cfb71a24614b28490474faed067f1a7ffcdf | [] | no_license | yuanyaru/configTool | dc65f4abb527188f8231b09dedda300fd17c16ed | ba6293de1d2a02e9a25dc8d308061566c85066ae | refs/heads/master | 2020-07-02T16:57:03.529746 | 2019-08-19T08:09:36 | 2019-08-19T08:09:36 | 201,596,495 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,310 | py | #!/usr/bin/python
# -*- coding:utf-8 -*-
from flask import Blueprint, request
from iceCon import ice_con
import json
import Ice
Ice.loadSlice("./ice-sqlite.ice")
import YTArea
yt_blu = Blueprint('yt', __name__)
# 查找(遥调属性)
@yt_blu.route('/yt_data', methods=['POST'])
def get_yt_property_send():
stationId = request.form.get("stationId")
station = json.loads(stationId)
DataCommand = ice_con()
status, result = DataCommand.RPCGetYTProperty(station)
ytproperty = []
for i in range(len(result)):
ytproperty.append({"id": result[i].ID, "name": result[i].name,
"describe": result[i].describe, "unit": result[i].unit,
"kval": result[i].kval, "bval": result[i].bval,
"address": result[i].address, "uplimt": result[i].uplimt,
"downlimt": result[i].downlimt})
return json.dumps(ytproperty)
# 添加、修改(遥调属性)
@yt_blu.route('/set_yt', methods=['POST'])
def set_yt_property():
DataCommand = ice_con()
stationId = request.form.get("stationId")
station = json.loads(stationId)
newyt = request.form.get("data")
YtProperty = json.loads(newyt)
ytp = []
for i in range(len(YtProperty)):
ytp.append(json.loads(YtProperty[i]))
ytproperty = []
for j in range(len(ytp[1])):
ytpstruct = YTArea.DxPropertyYT(int(ytp[0][j]), ytp[1][j].encode("utf-8"),
ytp[2][j].encode("utf-8"), ytp[3][j].encode("utf-8"),
float(ytp[4][j]), float(ytp[5][j]),
ytp[6][j].encode("utf-8"), float(ytp[7][j]),
float(ytp[8][j]))
ytproperty.append(ytpstruct)
DataCommand.RPCSetYTProperty(station, ytproperty)
return '保存成功!'
# 删除(遥调属性)
@yt_blu.route('/delete_yt', methods=['POST'])
def delete_yt_data():
DataCommand = ice_con()
stationId = request.form.get("stationId")
station = json.loads(stationId)
ytIDs = request.form.get("ids")
yt_IDs = json.loads(ytIDs)
pIDs = []
for i in range(len(yt_IDs)):
pIDs.append(long(yt_IDs[i]))
DataCommand.RPCDelYTProperty(station, pIDs)
return '删除成功!' | [
"[email protected]"
] | |
403ad416790d1ee65ab89443ebe669baefcb06c0 | 73c1734a5bab096ef348601beececea5adc8c5f5 | /test1.py | a67bfbedade63a30c6010f0f071d765d81ba94b0 | [] | no_license | WellingtonAmorim/testsondagem | ce362fb35caef452840be83dd0c21cceb8f382e4 | 0a348dbc47831f11cdad1b113f02a2b31572b224 | refs/heads/main | 2023-07-16T09:50:45.637522 | 2021-09-02T19:01:48 | 2021-09-02T19:01:48 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,111 | py | import requests
from concurrent.futures import ThreadPoolExecutor, as_completed
import time
url_list = [
"https://via.placeholder.com/400",
"https://via.placeholder.com/410",
"https://via.placeholder.com/420",
"https://via.placeholder.com/430",
"https://via.placeholder.com/440",
"https://via.placeholder.com/450",
"https://via.placeholder.com/460",
"https://via.placeholder.com/470",
"https://via.placeholder.com/480",
"https://via.placeholder.com/490",
"https://via.placeholder.com/500",
"https://via.placeholder.com/510",
"https://via.placeholder.com/520",
"https://via.placeholder.com/530",
]
def download_file(url):
html = requests.get(url, stream=True, verify=False)
print("loop")
time.sleep(2)
return html.status_code
for url in url_list:
print(download_file(url))
processes = []
with ThreadPoolExecutor(max_workers=10) as executor:
for url in url_list:
processes.append(executor.submit(download_file, url))
for task in as_completed(processes):
print(task.result())
| [
"[email protected]"
] | |
4631da8d5d4d98a49d4adf28bf4567c9c0bd866f | 1ddc74d79b2b62cbf910d2206963b778a15797c7 | /binary_search.py | 0edef03874c0fe91e3e3e3b794b46a94b0cfc8f1 | [] | no_license | ckz8780/python_algorithms | dbe064754a3610a641aea45e4006b36bb86e6c35 | 4551a179a32c41af28120efb3569799d349a7130 | refs/heads/main | 2023-02-07T07:54:45.751612 | 2020-12-31T23:44:17 | 2020-12-31T23:44:17 | 325,882,473 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,098 | py | data = [2, 3, 5, 7, 9, 12, 15, 17, 18, 20, 23, 25, 28, 30, 33, 34, 35, 37]
target = 25
# Linear Search
def linear_search(data, target):
for i in range(len(data)):
if data[i] == target:
return True
return False
print(linear_search(data, target))
# Iterative Binary Search
# To use binary search, list MUST be SORTED!
def binary_search_iterative(data, target):
low = 0
high = len(data) - 1
while low <= high:
mid = (low + high) // 2
if target == data[mid]:
return True
elif target < data[mid]:
high = mid - 1
else:
low = mid + 1
return False
print(binary_search_iterative(data, target))
# Recursive Binary Search
def binary_search_recursive(data, target, low, high):
if low > high:
return False
else:
mid = (low + high) // 2
if target == data[mid]:
return True
elif target < data[mid]:
return binary_search_recursive(data, target, low, mid - 1)
else:
return binary_search_recursive(data, target, mid + 1, high)
return False
print(binary_search_recursive(data, target, 0, len(data) - 1))
| [
"[email protected]"
] | |
06b826743e84279007efad50a2f248c317022896 | 3ac278b2d0e469e6e8d61da963782bccfd3a51b6 | /build/mav_state_machine_msgs/catkin_generated/generate_cached_setup.py | 09307dfe4ae74f29af5a2c2dd7fea57c4ae24c8b | [] | no_license | erl-ethz/haptic_ws | a08971c8f99a1aa5f574d9d898c189a9726a9ab6 | 8f02d35a52c08f5729ae7a489dee9e3f2bd6f110 | refs/heads/master | 2023-02-19T02:50:12.377601 | 2021-01-12T15:51:12 | 2021-01-12T15:51:12 | 325,560,695 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,350 | py | # -*- coding: utf-8 -*-
from __future__ import print_function
import os
import stat
import sys
# find the import for catkin's python package - either from source space or from an installed underlay
if os.path.exists(os.path.join('/opt/ros/melodic/share/catkin/cmake', 'catkinConfig.cmake.in')):
sys.path.insert(0, os.path.join('/opt/ros/melodic/share/catkin/cmake', '..', 'python'))
try:
from catkin.environment_cache import generate_environment_script
except ImportError:
# search for catkin package in all workspaces and prepend to path
for workspace in '/home/emanuele/catkin_ws/devel;/opt/ros/melodic'.split(';'):
python_path = os.path.join(workspace, 'lib/python2.7/dist-packages')
if os.path.isdir(os.path.join(python_path, 'catkin')):
sys.path.insert(0, python_path)
break
from catkin.environment_cache import generate_environment_script
code = generate_environment_script('/home/emanuele/catkin_ws/devel/.private/mav_state_machine_msgs/env.sh')
output_filename = '/home/emanuele/catkin_ws/build/mav_state_machine_msgs/catkin_generated/setup_cached.sh'
with open(output_filename, 'w') as f:
# print('Generate script for cached setup "%s"' % output_filename)
f.write('\n'.join(code))
mode = os.stat(output_filename).st_mode
os.chmod(output_filename, mode | stat.S_IXUSR)
| [
"[email protected]"
] | |
45865e541a768afcf6947a0829b5e3b49e84c55a | bb3b31c8fd3c3e74376f05094f9436f18e4d31d0 | /src/main/c++/examples/pybullet/examples/humanoidMotionCapture.py | c823dc1de774670077c9a0e52b27d8904237b6ac | [
"MIT",
"Zlib"
] | permissive | yildiz-online/component-native-bullet | af1e08f27744287ebe5494fc58b873832a6fcc9c | 1ed2bf7a01b888622b7bea323e239e3dcd8ba634 | refs/heads/develop | 2021-01-21T03:34:34.490565 | 2020-05-02T14:43:03 | 2020-05-02T14:43:03 | 101,897,097 | 0 | 0 | MIT | 2020-05-02T14:43:04 | 2017-08-30T15:17:50 | C++ | UTF-8 | Python | false | false | 8,638 | py | import pybullet as p
import json
p.connect(p.GUI)
#p.configureDebugVisualizer(p.COV_ENABLE_Y_AXIS_UP , 1)
import pybullet_data
useMotionCapture=True
useMotionCaptureReset=not useMotionCapture
p.setAdditionalSearchPath(pybullet_data.getDataPath())
p.setPhysicsEngineParameter(numSolverIterations=200)
#path = pybullet_data.getDataPath()+"/motions/humanoid3d_backflip.txt"
path = pybullet_data.getDataPath()+"/motions/humanoid3d_cartwheel.txt"
p.loadURDF("plane.urdf",[0,0,-0.03])
print("path = ", path)
with open(path, 'r') as f:
motion_dict = json.load(f)
#print("motion_dict = ", motion_dict)
print("len motion=", len(motion_dict))
print(motion_dict['Loop'])
numFrames = len(motion_dict['Frames'])
print("#frames = ", numFrames)
frameId= p.addUserDebugParameter("frame",0,numFrames-1,0)
jointTypes = ["JOINT_REVOLUTE","JOINT_PRISMATIC",
"JOINT_SPHERICAL","JOINT_PLANAR","JOINT_FIXED"]
humanoid = p.loadURDF("humanoid/humanoid.urdf", globalScaling=0.25)
for j in range (p.getNumJoints(humanoid)):
ji = p.getJointInfo(humanoid,j)
targetPosition=[0]
if (ji[2] == p.JOINT_SPHERICAL):
targetPosition=[0,0,0,1]
#p.setJointMotorControlMultiDof(humanoid,j,p.POSITION_CONTROL,targetPosition, force=0)
#print(ji)
print("joint[",j,"].type=",jointTypes[ji[2]])
print("joint[",j,"].name=",ji[1])
jointIds=[]
paramIds=[]
for j in range (p.getNumJoints(humanoid)):
p.changeDynamics(humanoid,j,linearDamping=0, angularDamping=0)
p.changeVisualShape(humanoid,j,rgbaColor=[1,1,1,1])
info = p.getJointInfo(humanoid,j)
#print(info)
if (not useMotionCapture):
jointName = info[1]
jointType = info[2]
if (jointType==p.JOINT_PRISMATIC or jointType==p.JOINT_REVOLUTE):
jointIds.append(j)
paramIds.append(p.addUserDebugParameter(jointName.decode("utf-8"),-4,4,0))
print("jointName=",jointName, "at ", j)
p.changeVisualShape(humanoid,2,rgbaColor=[1,0,0,1])
chest=1
neck=2
rightShoulder=3
rightElbow=4
leftShoulder=6
leftElbow = 7
rightHip = 9
rightKnee=10
rightAnkle=11
leftHip = 12
leftKnee=13
leftAnkle=14
import time
once=True
p.getCameraImage(320,200)
maxForce=1000
while (p.isConnected()):
frameReal = p.readUserDebugParameter(frameId)
frame = int(frameReal)
frameNext = frame+1
if (frameNext >= numFrames):
frameNext = frame
frameFraction = frameReal - frame
#print("frameFraction=",frameFraction)
#print("frame=",frame)
#print("frameNext=", frameNext)
#getQuaternionSlerp
frameData = motion_dict['Frames'][frame]
frameDataNext = motion_dict['Frames'][frameNext]
#print("duration=",frameData[0])
#print(pos=[frameData])
basePos1Start = [frameData[1],frameData[2],frameData[3]]
basePos1End = [frameDataNext[1],frameDataNext[2],frameDataNext[3]]
basePos1 = [basePos1Start[0]+frameFraction*(basePos1End[0]-basePos1Start[0]),
basePos1Start[1]+frameFraction*(basePos1End[1]-basePos1Start[1]),
basePos1Start[2]+frameFraction*(basePos1End[2]-basePos1Start[2])]
baseOrn1Start = [frameData[5],frameData[6], frameData[7],frameData[4]]
baseOrn1Next = [frameDataNext[5],frameDataNext[6], frameDataNext[7],frameDataNext[4]]
baseOrn1 = p.getQuaternionSlerp(baseOrn1Start,baseOrn1Next,frameFraction)
#pre-rotate to make z-up
y2zPos=[0,0,0.0]
y2zOrn = p.getQuaternionFromEuler([1.57,0,0])
basePos,baseOrn = p.multiplyTransforms(y2zPos, y2zOrn,basePos1,baseOrn1)
p.resetBasePositionAndOrientation(humanoid, basePos,baseOrn)
# once=False
chestRotStart = [frameData[9],frameData[10],frameData[11],frameData[8]]
chestRotEnd = [frameDataNext[9],frameDataNext[10],frameDataNext[11],frameDataNext[8]]
chestRot = p.getQuaternionSlerp(chestRotStart,chestRotEnd,frameFraction)
neckRotStart = [frameData[13],frameData[14],frameData[15],frameData[12]]
neckRotEnd= [frameDataNext[13],frameDataNext[14],frameDataNext[15],frameDataNext[12]]
neckRot = p.getQuaternionSlerp(neckRotStart,neckRotEnd,frameFraction)
rightHipRotStart = [frameData[17],frameData[18],frameData[19],frameData[16]]
rightHipRotEnd = [frameDataNext[17],frameDataNext[18],frameDataNext[19],frameDataNext[16]]
rightHipRot = p.getQuaternionSlerp(rightHipRotStart,rightHipRotEnd,frameFraction)
rightKneeRotStart = [frameData[20]]
rightKneeRotEnd = [frameDataNext[20]]
rightKneeRot = [rightKneeRotStart[0]+frameFraction*(rightKneeRotEnd[0]-rightKneeRotStart[0])]
rightAnkleRotStart = [frameData[22],frameData[23],frameData[24],frameData[21]]
rightAnkleRotEnd = [frameDataNext[22],frameDataNext[23],frameDataNext[24],frameDataNext[21]]
rightAnkleRot = p.getQuaternionSlerp(rightAnkleRotStart,rightAnkleRotEnd,frameFraction)
rightShoulderRotStart = [frameData[26],frameData[27],frameData[28],frameData[25]]
rightShoulderRotEnd = [frameDataNext[26],frameDataNext[27],frameDataNext[28],frameDataNext[25]]
rightShoulderRot = p.getQuaternionSlerp(rightShoulderRotStart,rightShoulderRotEnd,frameFraction)
rightElbowRotStart = [frameData[29]]
rightElbowRotEnd = [frameDataNext[29]]
rightElbowRot = [rightElbowRotStart[0]+frameFraction*(rightElbowRotEnd[0]-rightElbowRotStart[0])]
leftHipRotStart = [frameData[31],frameData[32],frameData[33],frameData[30]]
leftHipRotEnd = [frameDataNext[31],frameDataNext[32],frameDataNext[33],frameDataNext[30]]
leftHipRot = p.getQuaternionSlerp(leftHipRotStart,leftHipRotEnd,frameFraction)
leftKneeRotStart = [frameData[34]]
leftKneeRotEnd = [frameDataNext[34]]
leftKneeRot = [leftKneeRotStart[0] +frameFraction*(leftKneeRotEnd[0]-leftKneeRotStart[0]) ]
leftAnkleRotStart = [frameData[36],frameData[37],frameData[38],frameData[35]]
leftAnkleRotEnd = [frameDataNext[36],frameDataNext[37],frameDataNext[38],frameDataNext[35]]
leftAnkleRot = p.getQuaternionSlerp(leftAnkleRotStart,leftAnkleRotEnd,frameFraction)
leftShoulderRotStart = [frameData[40],frameData[41],frameData[42],frameData[39]]
leftShoulderRotEnd = [frameDataNext[40],frameDataNext[41],frameDataNext[42],frameDataNext[39]]
leftShoulderRot = p.getQuaternionSlerp(leftShoulderRotStart,leftShoulderRotEnd,frameFraction)
leftElbowRotStart = [frameData[43]]
leftElbowRotEnd = [frameDataNext[43]]
leftElbowRot = [leftElbowRotStart[0]+frameFraction*(leftElbowRotEnd[0]-leftElbowRotStart[0])]
#print("chestRot=",chestRot)
p.setGravity(0,0,0)
kp=1
if (useMotionCapture):
p.setJointMotorControlMultiDof(humanoid,chest,p.POSITION_CONTROL, targetPosition=chestRot,positionGain=kp, force=maxForce)
p.setJointMotorControlMultiDof(humanoid,neck,p.POSITION_CONTROL,targetPosition=neckRot,positionGain=kp, force=maxForce)
p.setJointMotorControlMultiDof(humanoid,rightHip,p.POSITION_CONTROL,targetPosition=rightHipRot,positionGain=kp, force=maxForce)
p.setJointMotorControlMultiDof(humanoid,rightKnee,p.POSITION_CONTROL,targetPosition=rightKneeRot,positionGain=kp, force=maxForce)
p.setJointMotorControlMultiDof(humanoid,rightAnkle,p.POSITION_CONTROL,targetPosition=rightAnkleRot,positionGain=kp, force=maxForce)
p.setJointMotorControlMultiDof(humanoid,rightShoulder,p.POSITION_CONTROL,targetPosition=rightShoulderRot,positionGain=kp, force=maxForce)
p.setJointMotorControlMultiDof(humanoid,rightElbow, p.POSITION_CONTROL,targetPosition=rightElbowRot,positionGain=kp, force=maxForce)
p.setJointMotorControlMultiDof(humanoid,leftHip, p.POSITION_CONTROL,targetPosition=leftHipRot,positionGain=kp, force=maxForce)
p.setJointMotorControlMultiDof(humanoid,leftKnee, p.POSITION_CONTROL,targetPosition=leftKneeRot,positionGain=kp, force=maxForce)
p.setJointMotorControlMultiDof(humanoid,leftAnkle, p.POSITION_CONTROL,targetPosition=leftAnkleRot,positionGain=kp, force=maxForce)
p.setJointMotorControlMultiDof(humanoid,leftShoulder, p.POSITION_CONTROL,targetPosition=leftShoulderRot,positionGain=kp, force=maxForce)
p.setJointMotorControlMultiDof(humanoid,leftElbow, p.POSITION_CONTROL,targetPosition=leftElbowRot,positionGain=kp, force=maxForce)
if (useMotionCaptureReset):
p.resetJointStateMultiDof(humanoid,chest,chestRot)
p.resetJointStateMultiDof(humanoid,neck,neckRot)
p.resetJointStateMultiDof(humanoid,rightHip,rightHipRot)
p.resetJointStateMultiDof(humanoid,rightKnee,rightKneeRot)
p.resetJointStateMultiDof(humanoid,rightAnkle,rightAnkleRot)
p.resetJointStateMultiDof(humanoid,rightShoulder,rightShoulderRot)
p.resetJointStateMultiDof(humanoid,rightElbow, rightElbowRot)
p.resetJointStateMultiDof(humanoid,leftHip, leftHipRot)
p.resetJointStateMultiDof(humanoid,leftKnee, leftKneeRot)
p.resetJointStateMultiDof(humanoid,leftAnkle, leftAnkleRot)
p.resetJointStateMultiDof(humanoid,leftShoulder, leftShoulderRot)
p.resetJointStateMultiDof(humanoid,leftElbow, leftElbowRot)
p.stepSimulation()
#time.sleep(1./240.)
| [
"[email protected]"
] | |
50faeae0894485df5c3a03fa5afe161265b94cc9 | 33fea8d6ca343044796366d9e489b2eed9b1b70d | /Homework/a3/utils/parser_utils.py | 22f7f7035f3260553b2f48d2d221e751dfafc3af | [
"MIT"
] | permissive | LFhase/Learning_CS224N | 25b347c2c93f7d02e802ee8fde093b57245b3e1a | 21af6dd4f7b9dcb3f34aac9c2cebf4a02a17176f | refs/heads/master | 2021-01-16T02:18:54.949755 | 2020-09-23T02:55:46 | 2020-09-23T02:55:46 | 242,939,774 | 6 | 1 | null | null | null | null | UTF-8 | Python | false | false | 15,808 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
CS224N 2018-19: Homework 3
parser_utils.py: Utilities for training the dependency parser.
Sahil Chopra <[email protected]>
"""
import time
import os
import logging
from collections import Counter
from .general_utils import get_minibatches
from parser_transitions import minibatch_parse
from tqdm import tqdm
import torch
import numpy as np
P_PREFIX = '<p>:'
L_PREFIX = '<l>:'
UNK = '<UNK>'
NULL = '<NULL>'
ROOT = '<ROOT>'
class Config(object):
language = 'english'
with_punct = True
unlabeled = True
lowercase = True
use_pos = True
use_dep = True
use_dep = use_dep and (not unlabeled)
data_path = './data'
train_file = 'train.conll'
dev_file = 'dev.conll'
test_file = 'test.conll'
embedding_file = './data/en-cw.txt'
class Parser(object):
"""Contains everything needed for transition-based dependency parsing except for the model"""
def __init__(self, dataset):
root_labels = list([l for ex in dataset for (h, l) in zip(ex['head'], ex['label']) if h == 0])
counter = Counter(root_labels)
if len(counter) > 1:
logging.info('Warning: more than one root label')
logging.info(counter)
self.root_label = counter.most_common()[0][0]
deprel = [self.root_label] + list(set([w for ex in dataset for w in ex['label'] if w != self.root_label]))
tok2id = {L_PREFIX + l: i for (i, l) in enumerate(deprel)}
tok2id[L_PREFIX + NULL] = self.L_NULL = len(tok2id)
config = Config()
self.unlabeled = config.unlabeled
self.with_punct = config.with_punct
self.use_pos = config.use_pos
self.use_dep = config.use_dep
self.language = config.language
if self.unlabeled:
trans = ['L', 'R', 'S']
self.n_deprel = 1
else:
trans = ['L-' + l for l in deprel] + ['R-' + l for l in deprel] + ['S']
self.n_deprel = len(deprel)
self.n_trans = len(trans)
self.tran2id = {t: i for (i, t) in enumerate(trans)}
self.id2tran = {i: t for (i, t) in enumerate(trans)}
# logging.info('Build dictionary for part-of-speech tags.')
tok2id.update(build_dict([P_PREFIX + w for ex in dataset for w in ex['pos']], offset=len(tok2id)))
tok2id[P_PREFIX + UNK] = self.P_UNK = len(tok2id)
tok2id[P_PREFIX + NULL] = self.P_NULL = len(tok2id)
tok2id[P_PREFIX + ROOT] = self.P_ROOT = len(tok2id)
# logging.info('Build dictionary for words.')
tok2id.update(build_dict([w for ex in dataset for w in ex['word']], offset=len(tok2id)))
tok2id[UNK] = self.UNK = len(tok2id)
tok2id[NULL] = self.NULL = len(tok2id)
tok2id[ROOT] = self.ROOT = len(tok2id)
self.tok2id = tok2id
self.id2tok = {v: k for (k, v) in tok2id.items()}
self.n_features = 18 + (18 if config.use_pos else 0) + (12 if config.use_dep else 0)
self.n_tokens = len(tok2id)
def vectorize(self, examples):
vec_examples = []
for ex in examples:
word = [self.ROOT] + [self.tok2id[w] if w in self.tok2id else self.UNK for w in ex['word']]
pos = [self.P_ROOT
] + [self.tok2id[P_PREFIX + w] if P_PREFIX + w in self.tok2id else self.P_UNK for w in ex['pos']]
head = [-1] + ex['head']
label = [-1] + [self.tok2id[L_PREFIX + w] if L_PREFIX + w in self.tok2id else -1 for w in ex['label']]
vec_examples.append({'word': word, 'pos': pos, 'head': head, 'label': label})
return vec_examples
def extract_features(self, stack, buf, arcs, ex):
if stack[0] == "ROOT":
stack[0] = 0
def get_lc(k):
return sorted([arc[1] for arc in arcs if arc[0] == k and arc[1] < k])
def get_rc(k):
return sorted([arc[1] for arc in arcs if arc[0] == k and arc[1] > k], reverse=True)
p_features = []
l_features = []
features = [self.NULL] * (3 - len(stack)) + [ex['word'][x] for x in stack[-3:]]
features += [ex['word'][x] for x in buf[:3]] + [self.NULL] * (3 - len(buf))
if self.use_pos:
p_features = [self.P_NULL] * (3 - len(stack)) + [ex['pos'][x] for x in stack[-3:]]
p_features += [ex['pos'][x] for x in buf[:3]] + [self.P_NULL] * (3 - len(buf))
for i in range(2):
if i < len(stack):
k = stack[-i - 1]
lc = get_lc(k)
rc = get_rc(k)
llc = get_lc(lc[0]) if len(lc) > 0 else []
rrc = get_rc(rc[0]) if len(rc) > 0 else []
features.append(ex['word'][lc[0]] if len(lc) > 0 else self.NULL)
features.append(ex['word'][rc[0]] if len(rc) > 0 else self.NULL)
features.append(ex['word'][lc[1]] if len(lc) > 1 else self.NULL)
features.append(ex['word'][rc[1]] if len(rc) > 1 else self.NULL)
features.append(ex['word'][llc[0]] if len(llc) > 0 else self.NULL)
features.append(ex['word'][rrc[0]] if len(rrc) > 0 else self.NULL)
if self.use_pos:
p_features.append(ex['pos'][lc[0]] if len(lc) > 0 else self.P_NULL)
p_features.append(ex['pos'][rc[0]] if len(rc) > 0 else self.P_NULL)
p_features.append(ex['pos'][lc[1]] if len(lc) > 1 else self.P_NULL)
p_features.append(ex['pos'][rc[1]] if len(rc) > 1 else self.P_NULL)
p_features.append(ex['pos'][llc[0]] if len(llc) > 0 else self.P_NULL)
p_features.append(ex['pos'][rrc[0]] if len(rrc) > 0 else self.P_NULL)
if self.use_dep:
l_features.append(ex['label'][lc[0]] if len(lc) > 0 else self.L_NULL)
l_features.append(ex['label'][rc[0]] if len(rc) > 0 else self.L_NULL)
l_features.append(ex['label'][lc[1]] if len(lc) > 1 else self.L_NULL)
l_features.append(ex['label'][rc[1]] if len(rc) > 1 else self.L_NULL)
l_features.append(ex['label'][llc[0]] if len(llc) > 0 else self.L_NULL)
l_features.append(ex['label'][rrc[0]] if len(rrc) > 0 else self.L_NULL)
else:
features += [self.NULL] * 6
if self.use_pos:
p_features += [self.P_NULL] * 6
if self.use_dep:
l_features += [self.L_NULL] * 6
features += p_features + l_features
assert len(features) == self.n_features
return features
def get_oracle(self, stack, buf, ex):
if len(stack) < 2:
return self.n_trans - 1
i0 = stack[-1]
i1 = stack[-2]
h0 = ex['head'][i0]
h1 = ex['head'][i1]
l0 = ex['label'][i0]
l1 = ex['label'][i1]
if self.unlabeled:
if (i1 > 0) and (h1 == i0):
return 0
elif (i1 >= 0) and (h0 == i1) and \
(not any([x for x in buf if ex['head'][x] == i0])):
return 1
else:
return None if len(buf) == 0 else 2
else:
if (i1 > 0) and (h1 == i0):
return l1 if (l1 >= 0) and (l1 < self.n_deprel) else None
elif (i1 >= 0) and (h0 == i1) and \
(not any([x for x in buf if ex['head'][x] == i0])):
return l0 + self.n_deprel if (l0 >= 0) and (l0 < self.n_deprel) else None
else:
return None if len(buf) == 0 else self.n_trans - 1
def create_instances(self, examples):
all_instances = []
succ = 0
for id, ex in enumerate(examples):
n_words = len(ex['word']) - 1
# arcs = {(h, t, label)}
stack = [0]
buf = [i + 1 for i in range(n_words)]
arcs = []
instances = []
for i in range(n_words * 2):
gold_t = self.get_oracle(stack, buf, ex)
if gold_t is None:
break
legal_labels = self.legal_labels(stack, buf)
assert legal_labels[gold_t] == 1
instances.append((self.extract_features(stack, buf, arcs, ex), legal_labels, gold_t))
if gold_t == self.n_trans - 1:
stack.append(buf[0])
buf = buf[1:]
elif gold_t < self.n_deprel:
arcs.append((stack[-1], stack[-2], gold_t))
stack = stack[:-2] + [stack[-1]]
else:
arcs.append((stack[-2], stack[-1], gold_t - self.n_deprel))
stack = stack[:-1]
else:
succ += 1
all_instances += instances
return all_instances
def legal_labels(self, stack, buf):
labels = ([1] if len(stack) > 2 else [0]) * self.n_deprel
labels += ([1] if len(stack) >= 2 else [0]) * self.n_deprel
labels += [1] if len(buf) > 0 else [0]
return labels
def parse(self, dataset, eval_batch_size=5000):
sentences = []
sentence_id_to_idx = {}
for i, example in enumerate(dataset):
n_words = len(example['word']) - 1
sentence = [j + 1 for j in range(n_words)]
sentences.append(sentence)
sentence_id_to_idx[id(sentence)] = i
model = ModelWrapper(self, dataset, sentence_id_to_idx)
dependencies = minibatch_parse(sentences, model, eval_batch_size)
UAS = all_tokens = 0.0
with tqdm(total=len(dataset)) as prog:
for i, ex in enumerate(dataset):
head = [-1] * len(ex['word'])
for h, t, in dependencies[i]:
head[t] = h
for pred_h, gold_h, gold_l, pos in \
zip(head[1:], ex['head'][1:], ex['label'][1:], ex['pos'][1:]):
assert self.id2tok[pos].startswith(P_PREFIX)
pos_str = self.id2tok[pos][len(P_PREFIX):]
if (self.with_punct) or (not punct(self.language, pos_str)):
UAS += 1 if pred_h == gold_h else 0
all_tokens += 1
prog.update(i + 1)
UAS /= all_tokens
return UAS, dependencies
class ModelWrapper(object):
def __init__(self, parser, dataset, sentence_id_to_idx):
self.parser = parser
self.dataset = dataset
self.sentence_id_to_idx = sentence_id_to_idx
def predict(self, partial_parses):
mb_x = [
self.parser.extract_features(p.stack, p.buffer, p.dependencies,
self.dataset[self.sentence_id_to_idx[id(p.sentence)]]) for p in partial_parses
]
mb_x = np.array(mb_x).astype('int32')
mb_x = torch.from_numpy(mb_x).long()
mb_l = [self.parser.legal_labels(p.stack, p.buffer) for p in partial_parses]
pred = self.parser.model(mb_x)
pred = pred.detach().numpy()
pred = np.argmax(pred + 10000 * np.array(mb_l).astype('float32'), 1)
pred = ["S" if p == 2 else ("LA" if p == 0 else "RA") for p in pred]
return pred
def read_conll(in_file, lowercase=False, max_example=None):
examples = []
with open(in_file) as f:
word, pos, head, label = [], [], [], []
for line in f.readlines():
sp = line.strip().split('\t')
if len(sp) == 10:
if '-' not in sp[0]:
word.append(sp[1].lower() if lowercase else sp[1])
pos.append(sp[4])
head.append(int(sp[6]))
label.append(sp[7])
elif len(word) > 0:
examples.append({'word': word, 'pos': pos, 'head': head, 'label': label})
word, pos, head, label = [], [], [], []
if (max_example is not None) and (len(examples) == max_example):
break
if len(word) > 0:
examples.append({'word': word, 'pos': pos, 'head': head, 'label': label})
return examples
def build_dict(keys, n_max=None, offset=0):
count = Counter()
for key in keys:
count[key] += 1
ls = count.most_common() if n_max is None \
else count.most_common(n_max)
return {w[0]: index + offset for (index, w) in enumerate(ls)}
def punct(language, pos):
if language == 'english':
return pos in ["''", ",", ".", ":", "``", "-LRB-", "-RRB-"]
elif language == 'chinese':
return pos == 'PU'
elif language == 'french':
return pos == 'PUNC'
elif language == 'german':
return pos in ["$.", "$,", "$["]
elif language == 'spanish':
# http://nlp.stanford.edu/software/spanish-faq.shtml
return pos in [
"f0", "faa", "fat", "fc", "fd", "fe", "fg", "fh", "fia", "fit", "fp", "fpa", "fpt", "fs", "ft", "fx", "fz"
]
elif language == 'universal':
return pos == 'PUNCT'
else:
raise ValueError('language: %s is not supported.' % language)
def minibatches(data, batch_size):
x = np.array([d[0] for d in data])
y = np.array([d[2] for d in data])
one_hot = np.zeros((y.size, 3))
one_hot[np.arange(y.size), y] = 1
return get_minibatches([x, one_hot], batch_size)
def load_and_preprocess_data(reduced=True):
config = Config()
print("Loading data...",)
start = time.time()
train_set = read_conll(os.path.join(config.data_path, config.train_file), lowercase=config.lowercase)
dev_set = read_conll(os.path.join(config.data_path, config.dev_file), lowercase=config.lowercase)
test_set = read_conll(os.path.join(config.data_path, config.test_file), lowercase=config.lowercase)
if reduced:
train_set = train_set[:1000]
dev_set = dev_set[:500]
test_set = test_set[:500]
print("took {:.2f} seconds".format(time.time() - start))
print("Building parser...",)
start = time.time()
parser = Parser(train_set)
print("took {:.2f} seconds".format(time.time() - start))
print("Loading pretrained embeddings...",)
start = time.time()
word_vectors = {}
for line in open(config.embedding_file).readlines():
sp = line.strip().split()
word_vectors[sp[0]] = [float(x) for x in sp[1:]]
embeddings_matrix = np.asarray(np.random.normal(0, 0.9, (parser.n_tokens, 50)), dtype='float32')
for token in parser.tok2id:
i = parser.tok2id[token]
if token in word_vectors:
embeddings_matrix[i] = word_vectors[token]
elif token.lower() in word_vectors:
embeddings_matrix[i] = word_vectors[token.lower()]
print("took {:.2f} seconds".format(time.time() - start))
print("Vectorizing data...",)
start = time.time()
train_set = parser.vectorize(train_set)
dev_set = parser.vectorize(dev_set)
test_set = parser.vectorize(test_set)
print("took {:.2f} seconds".format(time.time() - start))
print("Preprocessing training data...",)
start = time.time()
train_examples = parser.create_instances(train_set)
print("took {:.2f} seconds".format(time.time() - start))
return parser, embeddings_matrix, train_examples, dev_set, test_set,
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
if __name__ == '__main__':
pass
| [
"[email protected]"
] | |
bf06e74cf8597dd2a18aa2da1089aceb7d28c43d | 4a91c38d50d9c8ba5ff2c1adcc3a611611788733 | /data_analysis_forThesis.py | f1cdbcfc58e344275bc9ee8c97ce9d72c51949ae | [] | no_license | heewonpark/DataAnalysis | 944d33b61532ae37d19ec31bed9140397014cd51 | 9b0dfb1d58b135f03bc8f2e6d8d5bd6952479e98 | refs/heads/master | 2021-01-21T12:06:56.475481 | 2016-03-19T06:31:33 | 2016-03-19T06:31:33 | 28,217,704 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,193 | py | #! /usr/bin/python
# coding: UTF-8
##############################################
# This file is written by Park
# Edited in 2015.03.23
###############################################
import stfio
import numpy as np
import matplotlib.pyplot as plt
import math
import os.path
#import seaborn as sns
from SpikeDetector import spikedetector
#readfilelist = open("ListOfData.txt",'r')
readfilelist = open("listoffiles2.txt",'r')
filenames = readfilelist.readlines()
print filenames
readfilelist.close()
RANGE = 170
w_step_time = []
w_step_freq = [0 for _ in range(RANGE)]
DATABIN = 0.1
AVER_STIM = 6.0
#splist:spikelist2 ,dt = rec.dt, databin = timestep? timeWidth = limit
def analysis_by_bin(wname,splist2,Bin, steps, delay, dt, timeWidth,sstart,send) :
step_freq = [0 for _ in range(steps+1)]
step_num = [0 for _ in range(steps+1)]
step_time = [0 for _ in range(steps+1)]
l_step_freq = [0 for _ in range(RANGE)]
for j in range(steps+1):
step_time[j] = j*Bin*dt
if (j+1)*Bin >= timeWidth:
for i in range(j*Bin, timeWidth):
if splist2[i] == 1:
step_num[j] +=1
else:
for i in range(j*Bin, (j+1)*Bin):
if splist2[i] == 1:
step_num[j] +=1
for j in range(steps+1):
step_freq[j] = step_num[j]/DATABIN
#fig2 =plt.figure()
fig2 =plt.figure(figsize=(12,8),dpi=400)
plt.bar(step_time, step_freq,width = 0.1,color='0.2')
plt.plot([sstart,send],[175,175],linewidth=8,color='k')
plt.xlabel('Time[s]',fontsize=20)
plt.xlim(0,15)
plt.ylim(0,180)
plt.ylabel('Frequency[Hz]',fontsize=20)
plt.title('PSTH',fontsize=20)
plt.savefig("./graph/"+wname+"bin.png")
plt.close()
bin_file = open("./analyzed_data/"+wname+"bin.dat",'w')
bin_file.writelines(repr(steps)+' '+repr(delay)+'\n')
for j in range(steps+1):
# print 'j '+repr(j)
w_step_freq[j+10+delay] +=step_freq[j]
l_step_freq[j+10+delay] = step_freq[j]
for k in range(RANGE):
bin_file.writelines(repr(w_step_time[k]) + '\t' + repr(l_step_freq[k])+'\n')
bin_file.close()
wholedata = open('./analyzed_data/ifreq_whole.dat','w')
def data_analysis(string) :
#刺激スタート時間の補正が必要(とりあえず 全部0にした)
filename, starttime = string.split(' ')
rec = stfio.read(filename)
#rec = stfio.read("300ng.atf")
print 'filename '+filename+' start time '+ starttime
filename = os.path.basename(filename)
writename = filename.rstrip('.atf')
print writename, filename
f = open("./analyzed_data/"+writename + '.dat','w')
histogram = open("./analyzed_data/"+writename + 'histogram.dat','w') # To make histogram, it will record top point of spike
freqdata = open("./analyzed_data/"+writename + 'ifreq.dat','w')
clamp = rec[0][0]
voltage = rec[0][2]
limit = rec[0][0].asarray().shape[0]
flg=0
for i in range(limit):
if i==0:
pass
elif(voltage[i-1]-0.2)*(voltage[i]-0.2)<0:
if flg==0:
stim_start = i*rec.dt
flg +=1
elif flg==1:
stim_end = i*rec.dt
flg = 0
flg=0
time = np.arange(0.0, limit, 1.0) * rec.dt
threshold_c = 0.75 #for cclamp
threshold_v = 0.2 #for voltage IN2
spike_cnt = 0 #spike number counter
bs_cnt = 0 #before stimulus spike number counter
as_cnt = 0 #after stimulus spike number counter
s_start = 0 # start of stimulation by Voltage
s_end = 0 # end of stimulation by Voltage
s_flg = 0
in_spike = False
spikelist = [] #time of the spike will be recorded. spikelist.x[i]*dt = Spike time
spikelist2 = [] #length of this list is same as clamp and voltage, if spike: spikelist2[i]=1; if not spike:spikelist2[i]=0
rs_start = 0 # real start of stimulation calcuate by spike frequency
rs_start_flg = 0
if_2 = [] #instantaneus frequency, 2 spike
if_2t = [] #instantaneus frequency time
if_3 = [] #instantaneus frequency, 3 spike
if_3t = [] #instantaneus frequency time
if_4 = [] #instantaneus frequency, 4 spike
if_4t = [] #instantaneus frequency time
sec1spike = 0
sec2spike = 0
print rec.dt
print "time "
print rec.dt * limit
f.writelines('#Whole time :'+repr(rec.dt * limit)+'\n')
for i in range(limit):
if i ==0:
pass
elif (voltage[i-1]-threshold_v)*(voltage[i]-threshold_v)<0:
if s_flg == 0:
# print(i)
s_start = i
s_flg +=1
elif s_flg > 0:
s_end = i
print 's_start : ' + repr(s_start) + ' s_end : ' +repr(s_end) + ' i : ' + repr(i)
print (limit)
i = 0
spikelist2 =[0 for _ in range(limit)]
for i in range(limit):
if i == 0:
pass
elif (clamp[i-1] - threshold_c) * (clamp[i] - threshold_c) < 0:
spike_cnt +=1
if in_spike == True:
in_spike = False
elif in_spike == False:
in_spike = True
if (in_spike == True) & (i != limit-1):
if(clamp[i-1]<clamp[i]) & (clamp[i]>clamp[i+1]):
spikelist.append(i)
spikelist2[i] = 1
# print(i)
# print (i)
##Print spike timing
spfile = open("./analyzed_data/"+writename + 'spiketiming.dat','w')
for i in range(len(spikelist)):
spfile.writelines(repr(spikelist[i]*rec.dt)+'\n')
spfile.close()
##Print spike timing end
i=0
for i in range(limit):
if (i <limit-1) &(i>0) :
if(clamp[i-1]-clamp[i]) * (clamp[i]-clamp[i+1]) < 0:
histogram.writelines(repr(clamp[i])+'\n')
histogram.close()
print s_start
print spike_cnt
f.writelines('#Number of Spike :'+repr(spike_cnt/2)+'\n')
for j in range(len(spikelist)):
if spikelist[j]>s_start:
if j< len(spikelist)-3 :
if (spikelist[j+3]-spikelist[j])<10000:
if rs_start_flg ==0 :
rs_start = spikelist[j]
rs_start_flg +=1
print 'rs_start : ' + repr(rs_start) +' '+ repr(rs_start*rec.dt)+'s'
f.writelines('#Start time of stimulation : ' + repr(rs_start)+'\n')
i=0
for i in range(len(spikelist)):
if i<len(spikelist)-1:
if spikelist[i]-rs_start==0:
bs_cnt = i
f.writelines('#bs_cnt :' + repr(bs_cnt)+'\n')
for j in range(len(spikelist)):
if (spikelist[j]-rs_start>=0)&(spikelist[j]-rs_start<(1/rec.dt)):
sec1spike += 1
for j in range(len(spikelist)):
if (spikelist[j]-rs_start>=0)&(spikelist[j]-rs_start<(2/rec.dt)):
sec2spike += 1
f.writelines('#1sec spike : ' + repr(sec1spike)+'\n')
f.writelines('#2sec spike : ' + repr(sec2spike)+'\n')
i=0
freqdata.writelines(repr(len(spikelist)-1-bs_cnt)+'\n')
for i in range(len(spikelist)):
if i< len(spikelist)-1:
a = 1 / ((spikelist[i+1]-spikelist[i])*rec.dt)
at = (spikelist[i+1]+spikelist[i])*0.5*rec.dt
#print repr(a)
"""
if a>200:
a=200
"""
f.writelines(repr(i)+'\t'+repr(spikelist[i])+'\t'+repr(spikelist[i+1])+'\t')
f.writelines(repr(at)+ '\t'+repr(a)+'\n')
if bs_cnt<=i:
freqdata.writelines(repr(at)+ '\t'+repr(a)+'\n')
if_2.append(a)
if_2t.append(at)
i=0
for i in range(len(spikelist)):
if i< len(spikelist)-2:
b = 2/ ((spikelist[i+2] - spikelist[i])*rec.dt)
bt = (spikelist[i+2]+spikelist[i+1]+spikelist[i])/3*rec.dt
"""
if b>200:
b=200
"""
f.writelines(repr(bt)+'\t'+repr(b)+'\n')
if_3.append(b)
if_3t.append(bt)
f.writelines('\n\n')
i=0
for i in range(len(spikelist)):
if i< len(spikelist)-3:
c = 3/ ((spikelist[i+3] - spikelist[i])*rec.dt)
ct = (spikelist[i+3]+spikelist[i+2]+spikelist[i+1]+spikelist[i])/4*rec.dt
"""
if c>200:
c=200
"""
f.writelines(repr(ct)+'\t'+repr(c)+'\n')
if_4.append(c)
if_4t.append(ct)
for i in range(len(spikelist)):
if i< len(spikelist)-5:
d = 5/ ((spikelist[i+5] - spikelist[i])*rec.dt)
dt = (spikelist[i+5]+spikelist[i+4]+spikelist[i+3]+spikelist[i+2]+spikelist[i+1]+spikelist[i])/6*rec.dt
if bs_cnt<=i:
wholedata.writelines(repr(dt)+ '\t'+repr(d)+'\n')
Bin = int(DATABIN/rec.dt)
steps = int(limit/Bin)
delay = int(starttime.strip('\n'))
print 'delay ' +repr(delay)+'steps '+repr(steps)
analysis_by_bin(writename, spikelist2,Bin,steps,delay,rec.dt, limit,stim_start,stim_end)
f.close()
flg = plt.figure()
plt.subplot(311)
plt.plot(if_2t,if_2)
plt.ylim(0,300)
plt.subplot(312)
plt.plot(if_3t,if_3)
plt.ylim(0,300)
plt.subplot(313)
plt.plot(if_4t,if_4)
plt.ylim(0,300)
plt.savefig("./graph/"+writename+"instantaneusFrequency.png")
plt.close()
#flg2__ = plt.figure()
fig2__ =plt.figure(figsize=(12,8),dpi=400)
plt.plot([stim_start,stim_end],[295,295],linewidth=8,color='k')
plt.plot(if_2t,if_2,color='0.2')
plt.ylim(0,300)
plt.xlim(0,15)
plt.xlabel('Time[s]',fontsize=20)
plt.ylabel('Frequency[Hz]',fontsize=20)
plt.title('Instantaneus Frequency',fontsize=20)
plt.savefig("./graph/"+writename+"if.png")
plt.close()
#plt.show()
#end of data_analysis
w_step_time =[0 for _ in range(RANGE)]
for i in range(RANGE):
w_step_time[i] = (i-10)*0.1
i=0
for i in range(len(filenames)):
data_analysis(filenames[i])
for j in range(RANGE):
w_step_freq[j] = w_step_freq[j]/6.0
aver_f = open('./analyzed_data/average.dat','w')
for i in range(RANGE):
aver_f.writelines(repr(w_step_time[i])+'\t'+repr(w_step_freq[i])+'\n')
fig3 = plt.figure(figsize=(10,8),dpi=400)
fig3.patch.set_alpha(0.0)
plt.rcParams['font.size']=20
plt.bar(np.array(w_step_time)-6.0, w_step_freq,width = 0.1,color="#3b3b3b")
plt.ylim(0,120)
plt.xlim(-6,8)
plt.xlabel('Time[s]',fontsize=30)
plt.ylabel('Frequency[Hz]',fontsize=30)
plt.savefig("./graph/average.png")
#plt.close()
#### Graph for Presentation
#plt.rcParams['font.family'] = 'Times New Roman'
plt.rcParams['font.size'] = 20
plt.rcParams['axes.linewidth'] = 2.0
#plt.rcParams['xtics.major.size'] = 10
#plt.rcParams['xtics.major.width'] = 1.5
fig = plt.figure(figsize=(5,4),dpi=250)
fig.subplots_adjust(bottom=0.2,left =0.20)
ax = fig.add_subplot(111)
fig.patch.set_alpha(0.0)
plt.bar(np.array(w_step_time)-6.0, w_step_freq,width = 0.1,color="#3b3b3b")
plt.ylim(0,120)
plt.xlim(-6,8)
plt.xlabel('Time[s]')
plt.ylabel('Frequency[Hz]')
plt.savefig("./graph/average_fp.png")
plt.close()
| [
"[email protected]"
] | |
9eb2a02715208ed6e0124decfad21ddb2fc33527 | 401aa55f4db07846bc567af007df3a9e9c197353 | /main.py | 3beca79208cea2db4f1d542b4c715b7322c9e94e | [] | no_license | hachibeeDI/learn-deeplearning | 9adf4872a16cacd098ad80ed157740ed4c4a54a7 | 683ad03c05efed5e06aa968056cfb703264ecbc7 | refs/heads/master | 2021-04-09T10:20:27.167866 | 2018-03-31T12:58:10 | 2018-03-31T12:58:10 | 125,313,776 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,731 | py | import numpy as np
from matplotlib import pylab as plt
from src import exp
class Chap_3_3:
def confirm_sigmoid():
x = np.array([-1.0, 1.0, 2.0])
y = exp.sigmoid(x)
plt.plot(x,y)
plt.ylim(-0.1, 1.1)
plt.show()
def confirm_exp_3_9():
"""
p60-p61
A weighted addition for the first neuron of first layer.
W = weight?
B = bias
Z = zone?
b + w1x1 + w2x2 => a1
"""
X = np.array([1.0, 0.5])
W1 = np.array([[1.0, 0.3, 0.5], [0.2, 0.4, 0.6]])
B1 = np.array([0.1, 0.2, 0.3])
A1 = np.dot(X, W1) + B1
return A1
def confirm_activate_function_applied():
"""
p62
h(a1) => z1
"""
A1 = self.confirm_exp_3_9()
Z1 = exp.sigmoid(A1)
print(A1)
print(Z1)
def init_network():
return {
'W1': np.array([[0.1, 0.3, 0.5], [0.2, 0.4, 0.6]]),
'b1': np.array([0.1, 0.2, 0.3]),
'W2': np.array([[0.1, 0.4], [0.2, 0.5], [0.3, 0.6]]),
'b2': np.array([0.1, 0.2]),
'W3': np.array([[0.1, 0.3], [0.2, 0.4]]),
'b3': np.array([0.1, 0.2]),
}
def forward(network, x):
W1, W2, W3 = network['W1'], network['W2'], network['W3']
b1, b2, b3 = network['b1'], network['b2'], network['b3']
a1 = np.dot(x, W1) + b1
z1 = exp.sigmoid(a1)
a2 = np.dot(z1, W2) + b2
z2 = exp.sigmoid(a2)
a3 = np.dot(z2, W3) + b3
y = exp.identity(a3)
return y
if __name__ == '__main__':
network = init_network()
x = np.array([1.0, 0.5])
y = forward(network, x)
print(y)
assert y[0] == 0.3168270764110298, y[0]
assert y[1] == 0.6962790898619668, y[1]
| [
"[email protected]"
] | |
9cde44866d0e5b7b3b6c3c540e755ecc32f55691 | 002c182e171ffa52d28bef8ebc9622cd252201c0 | /counter.py | 820815cf39ba3eb61da5522409189b7ad2b55991 | [] | no_license | pinkcoderunicorn/Mean-Median-and-Mode | 865b012f3adfcea765e530d4cc618f04c71b13b0 | 8e9541c5be80a5f3e09c56eeef4bc85ae584adda | refs/heads/main | 2023-07-09T14:42:02.017397 | 2021-08-22T05:20:01 | 2021-08-22T05:20:01 | 398,720,559 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 169 | py | from collections import Counter
new_data = "whitehatjr"
data = Counter(new_data)
print(data)
new_list = data.items()
print(new_list)
value = data.values()
print(value) | [
"[email protected]"
] | |
6cdb12e87e1c58c080e8da087c4144b04d09ebfb | d469ad948b8d0f199fc1bbdb625678d68c87b919 | /exp/nb_copernicusActivations.py | 999afdc8d7be242ad6bf479f1030fcb00bd6d643 | [
"Apache-2.0"
] | permissive | dmbernaal/copernicus | 4723b07b074e58dea294b5192d43ba111ac4f2f0 | 4e4b9692418bf6575f69a5b6de7228765266d14e | refs/heads/master | 2020-12-12T00:29:33.356928 | 2020-01-15T04:19:56 | 2020-01-15T04:19:56 | 233,995,659 | 0 | 0 | null | 2020-01-15T04:19:57 | 2020-01-15T04:06:59 | Jupyter Notebook | UTF-8 | Python | false | false | 1,970 | py |
#################################################
### THIS FILE WAS AUTOGENERATED! DO NOT EDIT! ###
#################################################
# file to edit: dev_nb/copernicusActivations_001.ipynb
import torch
import math
import torch.nn.functional as F
import torch.nn as nn
# mish activation
class Mish(nn.Module):
def __init__(self): super().__init__()
def forward(self, x): return x * ( torch.tanh(F.softplus(x)))
class Selu(nn.Module):
def __init__(self):
super().__init__()
self.alpha = torch.tensor(1.6732632423543772848170429916717)
self.scale = torch.tensor(1.0507009873554804934193349852946)
def forward(self, x):
return self.scale * torch.where(x>=0.0, x, self.alpha * torch.exp(x) - self.alpha)
def get_activation_(act):
if act is None or act == 'relu': act_fn = nn.ReLU(inplace=True) # relu as default
elif act == 'mish': act_fn = Mish()
elif act == 'selu': act_fn = Selu()
elif act == 'elu': act_fn = nn.ELU()
elif act == 'hardshrink': act_fn = nn.Hardshrink()
elif act == 'hardtanh': act_fn = nn.Hardtanh()
elif act == 'leakyrelu': act_fn = nn.LeakyReLU()
elif act == 'logsigmoid': act_fn = nn.LogSigmoid()
elif act == 'prelu': act_fn = nn.PReLU()
elif act == 'relu6': act_fn = nn.ReLU6()
elif act == 'rrelu': act_fn = nn.RReLU()
elif act == 'celu': act_fn = nn.CELU()
elif act == 'sigmoid': act_fn = nn.Sigmoid()
elif act == 'softplus': act_fn = nn.Softplus()
elif act == 'softshrink': act_fn = nn.Softshrink()
elif act == 'softsign': act_fn = nn.Softsign()
elif act == 'tanhshrink': act_fn = nn.Tanhshrink()
else:
raise ValueError('Act is not properly defined: check activations list')
return act_fn
activations = ['mish', 'selu', 'elu', 'relu', 'hardshrink', 'hardtanh', 'leakyrelu', 'logsigmoid', 'prelu', 'rrelu', 'relu6', 'celu', 'sigmoid', 'softplus', 'softshrink', 'softsign', 'tanhshrink'] | [
"[email protected]"
] | |
d7e63dc0aa5732d688a925eb77b33fd1951f4883 | 51981912bbe59bc2179aceae00d0a5d99ef2f9e7 | /node_modules/mongoose/node_modules/mongodb/node_modules/kerberos/build/config.gypi | 7f6004f3e577c1b395938a61022aecdd8cb62b4b | [
"MIT",
"Apache-2.0"
] | permissive | bmdFalmouth/GTVServer | 65b61da4e547f08fb435ada0a1f4a6fd0f50a32d | 9bf96039b8c8a4ef202861d0dac127895c498553 | refs/heads/master | 2022-06-28T08:54:57.867608 | 2013-08-03T21:18:09 | 2013-08-03T21:18:09 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,986 | gypi | # Do not edit. File was generated by node-gyp's "configure" step
{
"target_defaults": {
"cflags": [],
"default_configuration": "Release",
"defines": [],
"include_dirs": [],
"libraries": []
},
"variables": {
"clang": 0,
"gcc_version": 42,
"host_arch": "x64",
"node_install_npm": "true",
"node_prefix": "",
"node_shared_cares": "false",
"node_shared_http_parser": "false",
"node_shared_libuv": "false",
"node_shared_openssl": "false",
"node_shared_v8": "false",
"node_shared_zlib": "false",
"node_tag": "",
"node_unsafe_optimizations": 0,
"node_use_dtrace": "true",
"node_use_etw": "false",
"node_use_openssl": "true",
"node_use_perfctr": "false",
"python": "/usr/bin/python",
"target_arch": "x64",
"v8_enable_gdbjit": 0,
"v8_no_strict_aliasing": 1,
"v8_use_snapshot": "false",
"nodedir": "/Users/brian/.node-gyp/0.10.7",
"copy_dev_lib": "true",
"standalone_static_library": 1,
"save_dev": "",
"browser": "",
"viewer": "man",
"rollback": "true",
"usage": "",
"globalignorefile": "/usr/local/etc/npmignore",
"init_author_url": "",
"shell": "/bin/bash",
"parseable": "",
"userignorefile": "/Users/brian/.npmignore",
"cache_max": "null",
"init_author_email": "",
"sign_git_tag": "",
"ignore": "",
"long": "",
"registry": "https://registry.npmjs.org/",
"fetch_retries": "2",
"npat": "",
"message": "%s",
"versions": "",
"globalconfig": "/usr/local/etc/npmrc",
"always_auth": "",
"cache_lock_retries": "10",
"fetch_retry_mintimeout": "10000",
"proprietary_attribs": "true",
"coverage": "",
"json": "",
"pre": "",
"description": "true",
"engine_strict": "",
"https_proxy": "",
"init_module": "/Users/brian/.npm-init.js",
"userconfig": "/Users/brian/.npmrc",
"npaturl": "http://npat.npmjs.org/",
"node_version": "v0.10.7",
"user": "",
"editor": "vi",
"save": "",
"tag": "latest",
"global": "",
"optional": "true",
"username": "",
"bin_links": "true",
"force": "",
"searchopts": "",
"depth": "null",
"rebuild_bundle": "true",
"searchsort": "name",
"unicode": "true",
"yes": "",
"fetch_retry_maxtimeout": "60000",
"strict_ssl": "true",
"dev": "",
"fetch_retry_factor": "10",
"group": "20",
"cache_lock_stale": "60000",
"version": "",
"cache_min": "10",
"cache": "/Users/brian/.npm",
"searchexclude": "",
"color": "true",
"save_optional": "",
"user_agent": "node/v0.10.7 darwin x64",
"cache_lock_wait": "10000",
"production": "",
"save_bundle": "",
"init_version": "0.0.0",
"umask": "18",
"git": "git",
"init_author_name": "",
"onload_script": "",
"tmp": "/var/folders/q5/fcs3189n4vxb3l495j86w6300000gp/T/",
"unsafe_perm": "true",
"link": "",
"prefix": "/usr/local"
}
}
| [
"[email protected]"
] | |
7eb8c4d235c5035ab3546fcd9eb03f4ca97c8e1a | b59b658f5e73883dd7bb096bb27ecb90df9ff499 | /breakthrough/migrations/0005_databank_q_class.py | 4250aefdf991454a9ff178de99d2ed5215fa593f | [] | no_license | Liewithen/Ape-Evolution | f7864b9295c2ab34425eb07b1a02ae25b8f95c9a | f2518999a157266d3dfb28baae25ea5167dfb6c1 | refs/heads/master | 2021-01-10T22:18:15.796754 | 2016-10-16T02:12:34 | 2016-10-16T02:12:34 | 70,323,923 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 489 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2016-09-27 09:50
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('breakthrough', '0004_auto_20160927_0840'),
]
operations = [
migrations.AddField(
model_name='databank',
name='q_class',
field=models.IntegerField(default=0),
preserve_default=False,
),
]
| [
"[email protected]"
] | |
2f460c436140054e616edf206a5e6d9025614efd | cc394dac9219889d6dd59583d1a70e4ca58a47a4 | /P7_E_2.py | bfdbb0774480ea47e7a1afd18bd727e9e1f46f94 | [
"Giftware"
] | permissive | animformed/problem-sets-mit-ocw-6 | de8462fb32d34f9d57dcef6e00ed53be1749e265 | a51ff7e59a4e6ffa924db59a482a43e413644ce0 | refs/heads/master | 2021-01-22T07:32:57.873780 | 2014-10-22T01:02:33 | 2014-10-22T01:02:33 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 404 | py | class MyError(Exception): pass
def oops():
raise MyError('spam')
#raise IndexError
def run():
try:
oops()
except MyError as X:
print 'oops caught!', X.__class__, X.args[0]
except IndexError:
import sys
print 'IndexError caught', sys.exc_info()[:2]
else:
print 'no exception'
if __name__ == '__main__':
run() | [
"[email protected]"
] | |
98d3a767fabe37017451aec5edb5bf275ae80812 | d1e8ac79c69b2c909ec739d841ae3fbb44539ad2 | /assignment5_2.py | ab5d38db4202c91fb49637b2620f55a359508e3d | [] | no_license | Kyaw-Wonna-MM/assignment | 6791f9a34ade12c70a3fbf03a8e9777a9d7b6115 | 8c7422b682021fb925c88a82b7cfbce388bd8ade | refs/heads/main | 2023-02-03T13:19:13.321508 | 2020-12-16T18:39:11 | 2020-12-16T18:39:11 | 322,058,631 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 290 | py | num = 0
tot = 0.0
while True:
sval = input("enter a number: ")
if sval == "done":
break
try:
fval = float(sval)
except:
print("Invaild input")
continue
num = num + 1 #counter
tot = tot + fval #sum all input
print(tot,num,tot/num) | [
"[email protected]"
] | |
b9187104c05e26111ccbd49ca1e9b63f852fcce0 | 05b7a0b91f5edcf33e945d4cdc78c7dfe9d669da | /pybotfinal.py | eb14e74f941462ce0da48a51691ba86c622bcef9 | [] | no_license | sindhurao385/IRC-bot- | b1f2d8ed513694f4a5b053218a60eda59433b6cb | b21f8ba60294b89789ee130033d0165d840b68b1 | refs/heads/main | 2023-07-18T22:05:06.268907 | 2021-08-31T15:35:02 | 2021-08-31T15:35:02 | 401,755,295 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,627 | py | #!/usr/bin/python3
import socket
import random
import sys
sys.path.insert(0,"\HOME\Desktop")
import dictionary
from dictionary import motivational,literature,philosophical,jokes
socket.getaddrinfo("182.73.209.206",6665)
ircsock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server = "chat.freenode.net" # Server
channel = "##testabotforme" # Channel
botnick = "pybot" # bots nick
exitcode = "bye " + botnick
ircsock.connect((server, 6667)) # Here we connect to the server using the port 6667
ircsock.send(bytes("USER "+ botnick +" "+ botnick +" "+ botnick + " " + botnick + "\n", "UTF-8")) #We are basically filling out a form with this line and saying to set all the fields to the bot nickname.
ircsock.send(bytes("NICK "+ botnick +"\n", "UTF-8")) # assign the nick to the bot
def joinchan(chan): # join channel(s).
ircsock.send(bytes("JOIN "+ chan +"\n", "UTF-8"))
ircmsg = ""
while ircmsg.find("End of /NAMES list.") == -1:
ircmsg = ircsock.recv(2048).decode("UTF-8")
ircmsg = ircmsg.strip('\n\r')
print(ircmsg)
def ping(): # respond to server Pings.
ircsock.send(bytes("PONG :pingis\n", "UTF-8"))
def sendmsg(msg, target=channel): # sends messages to the target.
ircsock.send(bytes("PRIVMSG "+ target +" :"+ msg +"\n", "UTF-8"))
def main():
joinchan(channel)
while 1:
ircmsg = ircsock.recv(2048).decode("UTF-8")
ircmsg = ircmsg.strip('\n\r')
print(ircmsg)
if ircmsg.find("PRIVMSG") != -1:
name = ircmsg.split('!',1)[0][1:]
message = ircmsg.split('PRIVMSG',1)[1].split(':',1)[1]
if ircmsg.find("PRIVMSG") != -1:
name = ircmsg.split('!',1)[0][1:]
message = ircmsg.split('PRIVMSG',1)[1].split(':',1)[1]
if "hey" in message.rstrip():
sendmsg("Hello! my name is pybot")
if "what can you do" in message.rstrip():
sendmsg( "I'm an IRC bot that can bombard your life with quotes! .If you are not interested in quotes I have some funny jokes as well.")
if "jokes" in message.rstrip():
sendmsg(random.choice(jokes))
if "quotes" in message.rstrip():
sendmsg("Right now I've got three types of quotes,1.Motivational 2.Philosophical 3.Literary. What kind of quote would you like to see?")
if "motivational" in message.rstrip():
sendmsg("With whose quote can i inspire you ? Bill Gates ,Confusius,Paulo Coelho,Nelson Mandela or Abdul Kalam?")
if "literary" in message.rstrip():
sendmsg("Would you like to see quotes by Margaret Atwood ,Ezra Pound ,Nelson Mandela,John Updike or Italo Calvino?")
if "philosophical"in message.rstrip():
sendmsg("Would you like to see quotes by Nietzche, Freud, Karl Marx, Kant or Kierkegaard?")
if "Bill Gates" in message.rstrip():
sendmsg(random.choice(motivational["Bill Gates"]))
if "Confusius" in message.rstrip():
sendmsg(random.choice(motivational["Confusius"]))
if "Paulo Coelho" in message.rstrip():
sendmsg(random.choice(motivational["Paulo Coelho"]))
if "Nelson Mandela" in message.rstrip():
sendmsg(random.choice(motivational["Nelson Mandela"]))
if "Abdul Kalam" in message.rstrip():
sendmsg(random.choice(motivational["Abdul Kalam"]))
if "Margaret Atwood" in message.rstrip():
sendmsg(random.choice(literature["Margaret Atwood"]))
if "Erza Pound" in message.rstrip():
sendmsg(random.choice(literature["Ezra Pound"]))
if "John Updike" in message.rstrip():
sendmsg(random.choice(literature["John Updike"]))
if "Italo Calvino" in message.rstrip():
sendmsg(random.choice(literature["Italo Calvino"]))
if "Forster" in message.rstrip():
sendmsg(random.choice(literature["Forster"]))
if "Nietzche" in message.rstrip():
sendmsg(random.choice(philosophical["Nietzche"]))
if "Freud" in message.rstrip():
sendmsg(random.choice(philosophical["Freud"]))
if "Karl Marx" in message.rstrip():
sendmsg(random.choice(philosophical["Marx"]))
if "Kant" in message.rstrip():
sendmsg(random.choice(philosophical["Kant"]))
if "Kierkegaard" in message.rstrip():
sendmsg(random.choice(philosophical["Kierkegaard"]))
if "thanks" in message.rstrip():
sendmsg("you're welcome. I'm glad I could help you :) ")
if message.rstrip() == exitcode:
sendmsg("oh...okay. :'( bye ")
ircsock.send(bytes("QUIT \n", "UTF-8"))
return
else:
if ircmsg.find("PING :") != -1:
ping()
main()
| [
"[email protected]"
] | |
47a4d611b83645f007dccbfdc22cb318abe48b66 | 78d7436e51d2b00e56b6d7234d5e7861b5431256 | /3. Testing/CLAHE/clahe_test.py | 80fe7258767363c79db2673c974ea270dd87d767 | [] | no_license | sakthi-s/face-replacement-in-videos | 7005943168a46febcc7a6ecfcf2e719517f9c2af | 29a21b38197843bff338ec5d0b47b74cfcbd2cc3 | refs/heads/master | 2021-09-09T12:02:28.378885 | 2018-03-15T20:49:00 | 2018-03-15T20:49:00 | 78,509,906 | 5 | 0 | null | null | null | null | UTF-8 | Python | false | false | 298 | py | import numpy as np
import cv2
img = cv2.imread('clahe.png',0)
# create a CLAHE object (Arguments are optional).
clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8,8))
cl1 = clahe.apply(img)
cv2.imwrite('clahe_2.jpg',cl1)
cv2.imshow('img',cl1)
cv2.waitKey(0)
cv2.destroyAllWindows
| [
"[email protected]"
] | |
60aef547dda585aef44dcb5bcecd830a2b57519a | 33fad70445b90dabc830f5c6a985a5c1522b7fe2 | /shop/migrations/0011_auto_20191107_1833.py | 77c65431b76bdb39406b877da81e299474bd2a8f | [] | no_license | Wald-K/sandbox | 1b6521ee37179346ca13a348bfd4a1ba90da19e4 | 586a2570cd66b5008b308f3f2877970dfb7729a9 | refs/heads/master | 2022-12-11T18:18:17.997607 | 2022-12-07T18:15:43 | 2022-12-07T18:15:43 | 220,682,137 | 0 | 0 | null | 2022-11-22T04:38:37 | 2019-11-09T18:00:35 | Python | UTF-8 | Python | false | false | 516 | py | # Generated by Django 2.2.6 on 2019-11-07 18:33
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('shop', '0010_auto_20191105_2209'),
]
operations = [
migrations.RemoveField(
model_name='category',
name='products',
),
migrations.AddField(
model_name='product',
name='categories',
field=models.ManyToManyField(blank=True, to='shop.Category'),
),
]
| [
"[email protected]"
] | |
15e410315b08eaceeeb76925ab14d2131bde401e | ac3339c95afe9066d66ff009fb1b7c3fde927ee6 | /Python/06_Django/02_Django/01_Dojo_Signal/dojo_signal/wsgi.py | d8263ab40949c7611d6eebfc0cb2c8f0b0f6bd6d | [] | no_license | aakasht05/DojoAssignments | 5d5013700528084fd17f93ebaea55d20aeea2d9d | 227f6deb62e75e07243ec9058d8557973b03840e | refs/heads/master | 2021-05-03T15:46:39.527638 | 2018-04-13T05:59:38 | 2018-04-13T05:59:38 | 120,479,335 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 400 | py | """
WSGI config for dojo_signal project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "dojo_signal.settings")
application = get_wsgi_application()
| [
"[email protected]"
] | |
4094d98bff825c29fbd92b7598849c23d0da9c4e | 6c14b4d76dc8efa925ef55273b1f7c4b8ba7d2d1 | /couchbase_smoke_test.py | d6e8851ab7335eaaa588e6cac6599439e3a5dc47 | [] | no_license | sjdillon/couchbase_smoke_test1 | 23cb6c4dad7d3a9b81098ba54890e2c8ef5ae0d5 | 73fc2db0d8b2c415d8237d14bc146809796a28bd | refs/heads/master | 2021-01-17T07:29:18.060220 | 2017-03-02T20:57:06 | 2017-03-02T20:57:06 | 83,723,785 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,564 | py | #!python
#==========================================================================#
# sjdillon
# 1. quick smoke test to confirm nodes, buckets, credentials are correct
# sets, gets, deletes keys against nodes in cluster, output exec time
# stores cluster nodes in pickled json file
#==========================================================================#
from couchbase import Couchbase
import pickledb
import timeit
def get_config(key, dbfile):
pdb=pickledb.load(dbfile, False)
config=pdb.dgetall(key)
return config
def key_test(bucket,pw,env):
server=env
start = timeit.default_timer()
print 'bucket: %s' % (bucket)
print 'pw: %s' % pw
print 'nodes: %s' % server
cb=Couchbase.connect(host=server,bucket=bucket,password=pw, timeout=10)
key='miskatonic'
val={"first_name":"Keziah", "last_name":"Mason", "town":"Arkham"}
print '1. setting key...'
cb.set(key,val)
print '\t2. getting key...'
result=cb.get(key)
print '\t' + str(result.value)
print '\t3. deleting key...'
#cb.delete(key)
stop = timeit.default_timer()
print '\texecution time:%fs' % (stop-start)
print '\n'
def smoke(bucket,pw,env):
print 'environment: %s' % env
servers=get_config(env,'couchbase.db')
key_test(bucket,pw,servers)
def smoke_each_node(bucket,pw,env):
servers=get_config(env,'couchbase.db')
print servers
for server in servers:
key_test(bucket,pw,server)
# run test
env='my_cluster'
smoke_each_node('CoinOpCache','Aki9kak9ujj',env)
smoke_each_node('YarsReveng','Loev213ddaa',env)
smoke_each_node('BatConfiguration','woolw98rcccaw1',env)
| [
"[email protected]"
] | |
f4de029c363753117ec6770d310257147ba9c193 | 75f0e04c6330950a9bd225fd8b62fdc9fb0021b8 | /11.containerWithMostWater.py | da22be17ee8650062f7a31f774ab9d4f19f52b10 | [] | no_license | Minzhe/LeetCode | f07bc1edce77cee778d7dc3c4a379921747637a5 | e5cb0212cb83daac829456c14aec557e26eea68c | refs/heads/master | 2020-05-04T11:25:07.084483 | 2020-03-09T02:10:17 | 2020-03-09T02:10:17 | 179,107,420 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 338 | py | class Solution:
def maxArea(self, height) -> int:
i, j = 0, len(height)-1
area = (j-i)*min(height[i], height[j])
while i < j:
area = max(area, (j-i)*min(height[i], height[j]))
if height[i] < height[j]:
i += 1
else:
j -= 1
return area
| [
"[email protected]"
] | |
9f831a525ca6b399a262076018968a9ab96e7468 | 93d2dc4c33e3fdb06a42166c0a85108ad8285971 | /JOB_SEARCH.py | 6e3272ada19b83b2d9aec20a5a9a6bf451b63c04 | [] | no_license | Vikendra447/New-Repo | 492693e36fbc93b4e4a7499412514624b9e470b7 | b29c97fd7bdfafc33def0f3828203663c6b23c45 | refs/heads/master | 2020-07-29T15:47:40.053946 | 2020-04-04T10:29:36 | 2020-04-04T10:29:36 | 209,867,493 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,926 | py | from tkinter import *
root=Tk()
root.title('PYTHON PROJECT')
var2 = StringVar()
var2.set(" ")
var1 = StringVar()
var1.set(" ")
Label(root,text=' J O B S E A R C H', font=('Helvetica',25),fg='black',relief=SUNKEN,bg='green').grid()
Label(root,text='Student_Name', font=('Helvetica',15),fg='red').grid(row=1,column=0,sticky=W)
Label(root,text='Vikendra Singh',width=12, font=('Helvetica',15),fg='blue').grid(row=1,column=0,sticky=E)
Label(root,text='EnrolL_nO.', width=12,font=('Helvetica',15),fg='red').grid(row=2,column=0,sticky=W)
Label(root,text='151447 ',width=12, font=('Helvetica',15),fg='blue').grid(row=2,column=0,sticky=E)
Label(root,text='Batch ', width=12,font=('Helvetica',15),fg='red').grid(row=3,column=0,sticky=W)
Label(root,text='B-7 ', width=12,font=('Helvetica',15),fg='blue').grid(row=3,column=0,sticky=E)
Label(root,text='Branch ', width=12,font=('Helvetica',15),fg='red').grid(row=4,column=0,sticky=W)
Label(root,text='SCE ',width=12, font=('Helvetica',15),fg='blue',).grid(row=4,column=0,sticky=E)
Label(root,text='Submitted to ', width=12,font=('Helvetica',15),fg='red').grid(row=5,column=0,sticky=W)
Label(root,text='DR.Mahesh kumar',width=15, font=('Helvetica',15),fg='blue',).grid(row=5,column=0,sticky=E)
Button(root,text='find job find resume Employers / Post Job Post your resume Sign/login',bg='black',fg='red',width=150).grid(row=10,column=0)
def start():
Label(root,text='indeed:',font=('Helvetica',50),fg='blue').grid(row=19,column=0,sticky=W)
Label(root,text='what',font=('Helvetica',30),fg='red').grid(row=20,column=0,sticky=W)
Label(root,text='where',font=('Helvetica',30),fg='red').grid(row=20,column=0)
op=OptionMenu(root,var2,'Delhi','Banglore','Chennai','Bhopal','Mumbai','Wizag','Ahamdabad','Indore','Madhyapredesh','Punjab','Surat','Uttarpradesh','Chhatisgarh','Gujrat')
op.grid(row=21,column=0)
op=OptionMenu(root, var1,"Copmuter Science","Civil","Electronic","Electrical",'Machenical','Teacher','Banking','Medical','Army','Online marketing','Civil survice')
op.grid(row=21,column=0,sticky=W)
def find():
import tkinter
root=Tk()
if var1.get()=='Copmuter Science':
Label(root,bg='black',fg='white',text='[YOU HAVE AVAILABLE JOB FOR Copmuter Science]\n\nCustomer Service Executive (CSE):\nOperations and Customer Quality Engineer II - (E2) :\nSr. Software Development Engineer in Test (SDET): \nTelephonic Clinical Counselor:\nPrincipal Business Analyst, India IT\:nSDSO Knowledge Training & Compliance Expert:\nManual Testing(Gherkin Language):\nSMTS Software Engineer:\nSoftware Engineer 2:\nTechnical Helpdesk Management:\nEngineering Manager - DWH/Big Data:\nClient Service Executive').grid()
elif var1.get()=="Civil":
Label(root,bg='black',fg='white',text='[YOU HAVE AVAILABLE JOB FOR Civil]\n\nAutocadd Draftsperson:\nPROJECT MANAGER - CIVIL:\nSite Engineer (Civil):\nSite Engineers:\nCivil Engineer - QS:\nAutoCAD Draftsman:\nMarketing Executives, Civil Engineer, Store Incharge & Recep:\nTrainee Testing Engineer:\nUrgently Looking for a project coordinator ( civil ):\nSite Supervisor (Civil) - Building Construction having 5 Yrs:\nCivil Engineer walk in Interview 15 nov 2016 to 26 nov 2016:').grid()
elif var1.get()=="Electronic":
Label(root,bg='black',fg='white',text='[YOU HAVE AVAILABLE JOB FOR Electronic]\n\nField Application Engineer - 5093:\nSenior Application Engineer - 4885:\nSenior Statistical Programmer :\nElectronics Engineering walk 15 nov 2016 to 26 nov 2016:\nJunior-Engg CAM-Diploma ,Electrical / Electronics / Mecatron:\nSOLDERING & PANEL ASSEMBLER:\nElectronic Engineering Jr:\nHardware Designer / Technician:\nR&D Engineer - Electronics: \nEngineer (Electrical/Electronics):\nSystem Engineer Power Breaker:\nPurchase Exeutive (Electrical and Electronics)').grid()
elif var1.get()== "Electrical":
Label(root,bg='black',fg='white',text='[YOU HAVE AVAILABLE JOB FOR Electrical]\n\nElectrical Maintenance Engineer:\nElectrical Engineering walk in 15nov 2016 to 26 nov 2016:\nElectrical Engineer:\nEngineer (Electrical):\nElectrical Engineer (Maintenance):\nElectrical Design Engineer:\nElectrical Technician:\nJunior-Engg CAM-Diploma ,Electrical / Electronics / Mecatron:\nSenior Test Engineer :\nRisk Engineer ').grid()
elif var1.get()== "Machenical":
Label(root,bg='black',fg='white',text='[YOU HAVE AVAILABLE JOB FOR Machenical]\n\nGeneral Manager / Dy. General Manager - Operation:\nMechanical Engineer Fresher,Electrical:\nMechanical Engineer:\nMechanical and Automobile Engineering:\nTrainee Engineer:\nMechanical plant inchage:\nmechanical engineer freshers:\nTrainee Engineer(ITI/ Dip Mechanical) Pithampur:\nMechanical / Thermo Mechanical Engineer').grid()
elif var1.get()== "Teacher":
Label(root,bg='black',fg='white',text='[YOU HAVE AVAILABLE JOB FOR Teacher]\n\nPGT,TGT,PRT All subject teachers for Next Academic Session:\nWanted primary teachers for cbse school:\nPrimary & Pre Primary Teachers:\nJOB FOR NGO:\nSubject Expert Teacher (Curriculum Based-State/CBSE/ICSE):\nCenter Head - EuroKids Preschool / Creche:\nUrgent Requirement of Computer Teacher:\nPrincipal required for an upcoming CBSE School at Seoni:\nPre-primary Teacher').grid()
elif var1.get()== "Banking":
Label(root,bg='black',fg='white',text='[YOU HAVE AVAILABLE JOB FOR Banking]\n\nFemale Candidate Back Office Work and Telecalling :\nassistant manager, operations - banking, credit and investme.:\nbanking operations:\nFRESHERS IN BANKING OPERATION:\nInterviews for Banking Sector:\nJobs In Banking Sector:\nRelationship Manager- Commercial Business Banking:\nVacancy in Bank\nback office executive for Banking Sector:\nassistant manager, corporate banking services').grid()
elif var1.get()== "Medical":
Label(root,bg='black',fg='white',text='[YOU HAVE AVAILABLE JOB FOR Medical]\n\nClinical Data Associate :\nMedical Officer and Hindi Officer / AR:\nCOMPANY SUPERVISOR:\nTransaction Processor II, Service Delivery\nMedical Transcriptionist for CT-MRI:\nMedical Officer MBBS:\nMedical Officer (Female doctor):\nJunior Scientific Officer (Explosives):\nVacancy for Medical Representative (MR / AM / ZM / RM):\nCRM/Customer Relationship Officer/Relationship Manager').grid()
elif var1.get()== "Army":
Label(root,bg='black',fg='white',text='[YOU HAVE AVAILABLE JOB FOR Army]\n\nOperations Analyst:\nBTL & Trade Marketing Executive :\nSecurity Officer:\nCounselor/TGT:\nRegional Head West:\nMale Warden:\nHead - DRA ( Regulatory Affairs ):\nLabour Officer:\nWARDENS\nBusiness Development Executive:\n[Government] Security Watchman cum Fireman (Second Attempt)').grid()
elif var1.get()== "Online marketing":
Label(root,bg='black',fg='white',text='[YOU HAVE AVAILABLE JOB FOR Online marketing]\n\nMarketing and Business Development Executive:\nSr. Analyst - Email Operations:\nSales & Marketing Manager :\nDigital Marketing Executive:\nDigital Marketing Manager:\nUrgent Requirement for Digital Marketing Manager:\nOnline Marketing Manager for E Commerce:\nUrgently required Marketing/Branding Manager for FMCG sector:\nFront End Developer - SDE II ').grid()
mainloop()
Label(root,text='job title, keywords or company name',fg='blue').grid(row=22,column=0,sticky=W)
Button(root,text='find jobs',width=15,height=1,command=find).grid(row=21,column=0,sticky=E)
Label(root,text='city or postcode',fg='blue').grid(row=22,column=0)
Label(root,text='Advanced job search',fg='blue').grid(row=22,column=0,sticky=E)
Button(root,text='LET BEGIN YOUR PROJECT WORK',command=start,height=2,bg='green').grid(row=6,column=0)
root.mainloop()
| [
"[email protected]"
] | |
abb2b363698247fe9e19e20fb4089a7c72b88b3d | 0a6d0ea3a60ece86ec477e490605b063b2e8c060 | /dlYouTubeFromLocalHtml.py | 25c916d53da6626d1b89c0beac233c646349dff5 | [] | no_license | alclass/bin | bdbe063d629c05e17f64dc27f71211e5881e1f3b | b4c5642c8d5843846d529630f8d93a7103676539 | refs/heads/master | 2023-07-24T11:36:50.009838 | 2023-07-09T20:26:42 | 2023-07-09T20:26:42 | 17,493,545 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,301 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
'''
import os, re, sys
urlBase = 'www.youtube.com/watch?v=%s'
commBase = 'youtube-dl "%s"'
def getLocalHtmlFileFromSysArg():
htmlFilename = sys.argv[1]
if os.path.isfile(htmlFilename):
return htmlFilename
print 'file', htmlFilename, 'is not here on local folder.'
sys.exit(1)
filesOnCurrentDir = os.listdir('.')
def isItOnCurrentDir(uTubeIds):
for eachFile in filesOnCurrentDir:
if eachFile.find(uTubeIds) > -1:
return True
return False
def youtubeDownload():
htmlFilename = getLocalHtmlFileFromSysArg()
reStr = '/watch[?]v[=](\w+)&'
reComp = re.compile(reStr)
iterObj = reComp.findall(open(htmlFilename).read())
total = len(iterObj); seq = 0; uTubeIdsList = []
for uTubeIds in iterObj:
if uTubeIds in uTubeIdsList:
total -= 1; print 'Video repeat, moving on to next one', seq+1, 'of', total
continue
if isItOnCurrentDir(uTubeIds):
total -= 1; print 'There is a video with the same id %s, moving on to next one' %uTubeIds, seq+1, 'of', total
continue
uTubeIdsList.append(uTubeIds)
seq += 1
print seq, 'of', total, uTubeIds
url = urlBase %uTubeIds
comm = commBase %url
os.system(comm)
if __name__ == '__main__':
# copyToUsb()
youtubeDownload()
| [
"[email protected]"
] | |
b62f2225a75e511adb64b907b5738e5bc64be07d | dcebb803285499f880e82c2aede20d1c109bb781 | /openai/baselines/baselines/deepq/experiments/test_cloud.py | 4ebfbd38ef4fc06e26dab0f9a300b80e92abe15a | [
"MIT",
"BSD-3-Clause"
] | permissive | johndpope/ETHZDeepReinforcementLearning | 1216f5e259f606e3c9228e7850e718cdccd6a280 | e1ae22159753724290f20068214bb3d94fcb7be4 | refs/heads/master | 2020-03-22T02:27:54.928283 | 2017-11-22T23:22:53 | 2017-11-22T23:22:53 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,784 | py | import gym
from baselines import deepq
from baselines.common.atari_wrappers_deprecated import wrap_dqn, ScaledFloatFrame
from cloud_environment import CloudEnvironment
import numpy as np
import collections
import os
import csv
import pandas as pd
#Logging
def logger_callback(locals,globals):
done = locals['done']
num_episodes = locals['num_episodes']
log_action_l = locals['log_action_l'] # actions chosen in current episode step
log_action_l.append(locals['action'])
if done:
action_counter = collections.Counter(log_action_l).items()
reward_sum = np.sum(locals['episode_rewards'])
reward_mean = np.mean(locals['episode_rewards'])
c_reward_sum = np.sum(locals['cumulative_episode_rewards'])
c_reward_mean = np.mean(locals['cumulative_episode_rewards'])
path = locals['test_file_path']
print("Writing episode {} log to ".format(num_episodes), path)
with open(path, 'a') as f:
env = locals['env']
actions_np = np.zeros(env.action_space.n)
for k, v in action_counter:
actions_np[k] = v
action_count_header = ['action_count{}'.format(i) for i in range(env.action_space.n)]
#action_q_header = ['mean_action_q{}'.format(i) for i in range(len(episode_q_t.tolist()))]
headers = ['episode','reward_sum','reward_mean','c_reward_sum','c_reward_mean']
#headers = headers + action_q_header+action_count_header
headers = headers + action_count_header
action_counts = list(actions_np)
#actions_qs = [q for q in episode_q_t.tolist()]
#output_list = [num_episodes]+[steps]+[rew100]+[rew50]+[rew10]+[episode_q_t_selected]+[episode_q_t_targets]+[episode_td_errors]+[episode_errors]+ actions_qs+action_counts
output_list = [num_episodes] + [reward_sum] + [reward_mean] + [c_reward_sum] + [c_reward_mean] + action_counts
print(headers)
print(output_list)
w = csv.writer(f)
if os.stat(path).st_size == 0:
w.writerow(headers)
w.writerow(output_list)
return False
def result_callback(ci_list,episode_list,locals,globals):
nps = len(episode_list[0])-len(ci_list[0])
cis_l = [[np.nan]*nps + cil for cil in ci_list]
e_df = pd.concat(episode_list,axis=0).reset_index(drop=True)
ci_df = pd.DataFrame(np.concatenate(cis_l),columns=['ci']).reset_index(drop=True)
output_df = pd.concat([e_df,ci_df],axis=1)
output_df.dropna(inplace=True)
output_df = output_df.reset_index(drop=True)
output_df.to_csv('eval_predictions.csv')
def main():
load_cpk="/home/nox/Masterarbeit/thesis_data/baseline_rl/simple_rl/7_unbalanced_test/experiments_unbalanced/cloud_model.pkl"
channels=3
seq_length=2
img_size=84
env = CloudEnvironment(img_size=img_size,radius=[12,13],sequence_stride=1,channels=channels,sequence_length=seq_length,ramp_step=0.1,action_type=1,action_nr=3,stochastic_irradiance=True,save_images=True)
#Note: cloud speed can be changes but may also require different ramps.. default, speed of cloud per frame at least 1 pixel in y direction
model = deepq.models.cnn_to_mlp(
convs=[(32, 8, 4), (64, 4, 2), (64, 3, 1)],
hiddens=[256],
dueling=True,
channels=channels,
seq_length=seq_length,
img_size=img_size
)
deepq.test(load_cpk=load_cpk,
result_callback=result_callback,
env=env,
q_func=model,
log_callback=logger_callback,
episode_n=1
)
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
170937f98cbb80d0f021d4d3abe42bffac2d8cd2 | 25c82a44aa025944f441cce1d7001dbec326561b | /DataProcessingPipeline/prefit_raw_data_baselines.py | 0feab2ae1684ee6464fb9e538fab0209afcce280 | [] | no_license | thogge/RAMPS | 4eabfe022c873ebfad828f9336dc41d464eb7ebf | 8718191fd1f1f8770804847b19fa017b347da213 | refs/heads/master | 2021-12-28T03:41:15.744828 | 2021-11-11T21:51:42 | 2021-11-11T21:51:42 | 71,209,158 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,467 | py | """
prefit_raw_data_baselines.py
Remove zeroth order baseline from raw data. This avoids
poorly combined spectra in overlap region of combined
data cubes if the baselines are unstable in the
individual observations. Can optionally run this
process in parallel.
Example:
python prefit_raw_data_baselines.py
-i L30_Tile01-04_23694_MHz_line.fits
-o L30_Tile01-04_23694_MHz_line_prefit.fits -n 16
-i : Input -- Input file (from the Green Bank pipeline)
-o : Output -- Output file
-n : Numcores -- Number of cores available for parallized computing
-h : Help -- Display this help
"""
import sys,os,getopt
try:
import astropy.io.fits as pyfits
except:
import pyfits
import scipy.ndimage as im
import numpy as np
import numpy.ma as ma
import scipy.signal as si
import matplotlib.pyplot as plt
import multiprocessing as mp
import my_pad
import pdb
def main():
#Defaults
numcores = 1
try:
opts,args = getopt.getopt(sys.argv[1:],"i:o:n:h")
except getopt.GetoptError:
print("Invalid arguments")
print(__doc__)
sys.exit(2)
for o,a in opts:
if o == "-i":
input_file = a
elif o == "-o":
output_file = a
elif o == "-n":
numcores = int(a)
elif o == "-h":
print(__doc__)
sys.exit(1)
else:
assert False, "unhandled option"
print(__doc__)
sys.exit(2)
#Read in data into array, remove single-dimensional entries
d,h = pyfits.getdata(input_file,header=True)
d = np.squeeze(d)
"""
Check that numcores does not exceed the number
of cores available
"""
avail_cores = mp.cpu_count()
if numcores > avail_cores:
print("numcores variable exceeds the available number of cores.")
print("Setting numcores equal to "+str(avail_cores))
numcores = avail_cores
if numcores > 1:
#Split the data
s = np.array_split(d, numcores, 2)
procs = []
#Fit baselines and write to temporary files
for num in range(len(s)):
procs.append(mp.Process(target=do_chunk,
args=(num,s[num])))
for proc in procs:
proc.start()
for proc in procs:
proc.join()
else:
do_chunk(0,d)
#Recombine baselined temporary files
dout = recombine(numcores)
hout = strip_header(h[:],4)
pyfits.writeto(output_file,dout,hout,overwrite=True)
#Remove temporary files
for n in np.arange(numcores):
os.system("rm prefit_temp"+str(n)+".fits")
def do_chunk(num,data):
"""
Use apply_along_axis to apply
baseline fitting to each spectrum in
this chunk of the cube.
"""
print("Fitting chunk"+str(num)+"...")
ya = np.apply_along_axis(baseline_and_deglitch,0,data)
pyfits.writeto("prefit_temp"+str(num)+".fits",ya,overwrite=True)
def recombine(numparts,output_file="test_final.fits"):
"""
Recombine all the individual fits files into
one final image
"""
indata = []
for n in range(numparts):
d = pyfits.getdata("prefit_temp"+str(n)+".fits")
indata.append(d)
final = np.dstack(indata)
return(final)
def rolling_window(a,window):
"""
Magic code to quickly create a second dimension
with the elements in a rolling window. This
allows us to apply numpy operations over this
extra dimension MUCH faster than using the naive approach.
"""
shape = a.shape[:-1] + (a.shape[-1] - window + 1, window)
strides = a.strides+(a.strides[-1],)
return np.lib.stride_tricks.as_strided(a, shape=shape, strides=strides)
def fit_baseline(masked,xx,ndeg=0):
"""
Fit the masked array with a polynomial of
the given order.
"""
ya = ma.polyfit(xx,masked,ndeg)
basepoly = np.poly1d(ya)
return(basepoly)
def baseline_and_deglitch(orig_spec,window_halfwidth=20):
"""
Function that fits a zeroth order polynomial baseline function
to a spectrum and subtracts it.
Parameters
----------
orig_spec : ndarray
The original spectrum with full resolution.
window_halfwidth : int
Half of the window width used to mask the spectrum for fitting.
Returns
-------
sub : ndarray
Baseline-subtracted spectrum.
"""
#Mask and smooth spectrum
masked= mask_for_baseline(orig_spec,window_halfwidth=window_halfwidth)
#Get best-fit zeroth order polynomial baseline
xx = np.arange(masked.size)
poly = fit_baseline(masked,xx)
baseline = poly(xx)
#Subtract baseline and smooth spectrum
sub = orig_spec - baseline
return(sub)
def mask_for_baseline(spec,sigma_cut=1.5,window_halfwidth=15):
"""
Mask the spectral channels that contain signal. Search for
signal by comparing the local standard deviation within a
window width of 2*window_halfwidth to the median local standard deviation
in all windows throughout the spectrum.
"""
ya = rolling_window(spec,window_halfwidth*2)
#Calculate local standard dev for each channel and pad the output
stds = my_pad.pad(np.nanstd(ya,-1),(window_halfwidth-1,window_halfwidth),
mode='edge')
#Figure out which bits of the spectrum have signal/glitches
med_std = np.nanmedian(stds)
std_std = np.nanstd(stds)
sigma_x_bar = med_std/np.sqrt(window_halfwidth)
sigma_s = (1./np.sqrt(2.))*sigma_x_bar
#Mask out signal for baseline
masked = ma.masked_where(np.logical_or(stds > med_std+sigma_cut*sigma_s,
np.isnan(spec)),spec)
#Mask 10 channels around each masked channel to capture faint line wings
if ma.is_masked(masked):
where_masked = np.where(masked.mask)[0]
mask_extension = 10
for channel in where_masked:
masked[slice(channel-10,channel+10,1)] = ma.masked
return(masked)
def strip_header(h,n):
"""
Remove the nth dimension from a FITS header
"""
try:
h['NAXIS'] = n-1
h['WCSAXES'] = n-1
except:
h['NAXIS'] = n-1
keys = ['NAXIS','CTYPE','CRVAL','CDELT','CRPIX','CUNIT']
for k in keys:
try:
del h[k+str(n)]
except:
pass
return(h)
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
e5831f21a2a52efc8089670d9e6429ba6eeb6897 | bc5355f9385b891304e619a7e04d64c4a25e732c | /python/rlimit/Subprocess.py | 4459ef398eeb6ea89429cf5e66ff17af174ed3ed | [
"BSD-3-Clause",
"BSD-2-Clause"
] | permissive | perror/librlimit | 758c0a5db0d820b1099931679133dc73fa862f81 | 3bc3088d5f1044f0dfd18e6d25d235e511f719da | refs/heads/master | 2021-01-02T23:06:38.758866 | 2012-07-02T14:28:35 | 2012-07-02T14:28:35 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,332 | py | '''
Subprocess is a Python class to control and profile processes in a
convenient and flexible manner. It is primarily designed for black-box
testing over untrusted code in an educational context. It is also
fully based on the C library librlimit.
'''
try:
from ctypes import *
except ImportError as err:
raise ImportError (str(err) + '''
A needed module for Subprocess was not found.
You should install it and try again.
''')
try:
rlimit = cdll.LoadLibrary("librlimit.so")
except OSError as err:
raise OSError (str(err) + '''
librlimit.so cannot be found on your system.
You should install it and/or set up properly your system.
''')
class SUBPROCESS(Structure):
_fields_ = [("argc", c_int),
("argv", POINTER(c_char_p)),
("envp", POINTER(c_char_p)),
("pid", c_int),
("status", c_int),
("retval", c_int),
("stdin", c_void_p),
("stdout", c_void_p),
("stderr", c_void_p),
("stdin_buffer", c_char_p),
("stdout_buffer", c_char_p),
("stderr_buffer", c_char_p),
("real_time_usec", c_int),
("user_time_usec", c_int),
("sys_time_usec", c_int),
("memory_kbytes", c_int),
("limits", c_void_p),
("expect_stdout", c_int),
("expect_stderr", c_int),
("monitor", c_void_p),
("write_mutex", c_int)]
class Subprocess(object):
'''Subprocess class is intended to provide a basic control over
untrusted subprocesses.
This class provide a simple interface to run subprocesses in a
non-blocking manner and, yet, getting their results. It also
allows to have a control over the execution time, the maximum
amount of memory it can use, and also provide basic profiling
information (time, memory) about the subprocess.
'''
def __init__(self, cmd, env=None):
'''Default constructor for a subprocess.
This constructor requires at least the command ('cmd') to be
executed and might requires to set the proper environment
variables ('env') in order to run it properly.
'''
self.cmd = cmd
self.env = env
# Translating cmd/env into C arguments through ctypes
argv_type = c_char_p * len(cmd)
argv = argv_type(*cmd)
argc = c_int(len(cmd))
if (env == None):
envp = None
else:
envp_type = c_char_p * len(env)
envp = envp_type(*env)
# Getting the subprocess pointer
rlimit.rlimit_subprocess_create.restype = POINTER(SUBPROCESS)
self.subprocess = \
rlimit.rlimit_subprocess_create (argc, argv, envp)
def run(self, timeout=None, memory=None):
'''Non-blocking execution.
The subprocess will be run in a separate thread. This function
do not return anything but might throw an exception if a
problem occurs at start time. The user might set a limit over
the maximum time and memory for the subprocess to run.
'''
if not (timeout == None):
rlimit.rlimit_set_time_limit(self.subprocess, timeout)
if not (memory == None):
rlimit.rlimit_set_memory_limit(self.subprocess, memory)
rlimit.rlimit_subprocess_run(self.subprocess)
def kill(self):
'''Kill the process.'''
if (rlimit.rlimit_subprocess_kill(self.subprocess) == -1):
raise Exception("subprocess kill failed")
def suspend(self):
'''Suspend the process.'''
if (rlimit.rlimit_subprocess_suspend(self.subprocess) == -1):
raise Exception("subprocess suspend failed")
def resume(self):
'''Resume the process.'''
if (rlimit.rlimit_subprocess_resume(self.subprocess) == -1):
raise Exception("subprocess resume failed")
def wait(self):
'''Wait for the end of the execution.
This command wait for the subprocess to end and returns with
the subprocess return code.
'''
return rlimit.rlimit_subprocess_wait(self.subprocess)
def write(self, msg):
'''Write to the stdin of the subprocess.
Write 'msg' to the stdin of the subprocess. Note that you need
to be sure that the subprocess wait for input.
'''
rlimit_write_stdin(self.subprocess, c_char_p(msg))
def expect(self, pattern, stdout=True, stderr=False, timeout=None):
'''Search the given pattern at the end of the given output
(default: stdout) and wait until the timeout elapsed if the
pattern is not found yet.
This command is intended to ease the interactive communication
with the subprocess. It returns 'True' is the pattern has been
found and 'False' otherwise.
Example:
process.run()
process.expect('password:')
processs.write('mypassword')
if (process.expect('Welcome !\n%prompt> ')):
print('Success !')
elif (process.expect('Wrong password, try again!\npassword:'))
process.expect('myotherpassword')
else
print('Cannot log-in !')
'''
if (timeout == None):
timeout = 120
if (stdout and stderr):
return rlimit_expect(self.subprocess, pattern, timeout)
elif (stdout and not stderr):
return rlimit_expect_stdout(self.subprocess, pattern, timeout)
elif (not stdout and stderr):
return rlimit_expect_stderr(self.subprocess, pattern, timeout)
def status(self):
if (self.subprocess.contents.status == 0):
return "Ready"
elif (self.subprocess.contents.status == 1):
return "Running"
elif (self.subprocess.contents.status == 2):
return "Sleeping"
elif (self.subprocess.contents.status == 3):
return "Stopped"
elif (self.subprocess.contents.status == 4):
return "Zombie"
elif (self.subprocess.contents.status == 5):
return "Terminated"
elif (self.subprocess.contents.status == 6):
return "Killed"
elif (self.subprocess.contents.status == 7):
return "Timeout"
elif (self.subprocess.contents.status == 8):
return "Memoryout"
elif (self.subprocess.contents.status == 9):
return "FsizeExceed"
elif (self.subprocess.contents.status == 10):
return "FDExceed"
elif (self.subprocess.contents.status == 11):
return "ProcExceed"
elif (self.subprocess.contents.status == 12):
return "DeniedSyscall"
def stdout(self):
return self.subprocess.contents.stdout_buffer
def stderr(self):
return self.subprocess.contents.stderr_buffer
def returnvalue(self):
return self.subprocess.contents.retval
def time_profile(self):
return \
(self.subprocess.contents.real_time_usec,
self.subprocess.contents.user_time_usec,
self.subprocess.contents.sys_time_usec)
def memory_profile(self):
return self.subprocess.contents.memory_kbytes
| [
"[email protected]"
] | |
ce87752de022016b146ac72edb19df40cb67fffe | 9d2b3f427d9ae37f195120246c243ac5eff2e2f9 | /test/box_memcached/binary-get.test | c63eaaa72e009dfb8609daed2deb0fd465f6976d | [
"BSD-2-Clause"
] | permissive | zcoder/tarantool | 12e64c670ce34524cd5b56a9cc9ee0703bc7c94e | 74253ce7369e5571646ce47b47f72412c5de6cad | refs/heads/master | 2020-12-24T21:37:00.570745 | 2011-12-25T19:22:04 | 2011-12-25T19:22:04 | 3,051,029 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 441 | test | # encoding: tarantool
blobs_list = [ "mooo\0", "mumble\0\0\0\0\r\rblarg", "\0", "\r" ]
for i in range(len(blobs_list)):
key = "foo_%d" % i
blob = blobs_list[i]
blob_len = len(blob)
print "len is %d" % blob_len
exec memcached "set %s 0 0 %d\r\n%s\r\n" % (key, blob_len, blob)
exec memcached "get %s\r\n" % key
# resore default suite config
server.stop()
server.deploy(self.suite_ini["config"])
# vim: syntax=python
| [
"[email protected]"
] | |
1cdf3137fe48ac43bc88dc2e6ac2e9c61263ca45 | 7c7236aa95ebebe241f04b98d55f3033b19dadc2 | /locomotivelist/locomotivelist/settings.py | e41c705b2a66d10fc03ced8af8349dda3a0be106 | [] | no_license | taisuo/cms | 57d792bb47d85bf6a4a39558a1bc34457a355c26 | dd8baa834d6426a2ce7406ea0b74eab252ef7789 | refs/heads/master | 2020-08-03T13:49:44.754754 | 2019-10-08T04:00:21 | 2019-10-08T04:00:21 | 211,765,786 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,160 | py | """
Django settings for locomotivelist project.
Generated by 'django-admin startproject' using Django 2.2.1.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'ntj5%=$^r$3g@_(o)^-qv%s-6gpo^2(4iz4pzw+=g%zbd-vt*u'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ["*"]
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
# 'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'locomotivelist.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')]
,
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'locomotivelist.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
| [
"[email protected]"
] | |
fbe4dadc65e9001e0e915766959cfef2e6a1bd71 | d824c0fdc3249c191aa4fba9ef4d750e3a2262a3 | /plot_normal.py | 7b6b1992e472d330520533e2af2b0720e078b5c3 | [] | no_license | PanYicheng/nest_bpstdp | 877d0b98f2353ddbf2453bdfcb3a403efa07fda2 | c74d0312ef2bc106eba41d4e7f2f84a334fc9db6 | refs/heads/master | 2020-03-30T19:58:20.080885 | 2018-10-04T12:27:18 | 2018-10-04T12:38:47 | 151,567,825 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,008 | py | import math
import pylab as pl
import numpy as np
def gd(x,m,s):
left=1/(math.sqrt(2*math.pi)*s)
right=math.exp(-math.pow(x-m,2)/(2*math.pow(s,2)))
return left*right
def showfigure():
x=np.arange(-200,200,1)
y=[]
for i in x:
y.append(gd(i,100.0,50.0))
pl.plot(x,y)
pl.xlim(-200,200)
# pl.ylim(-0.2,0.5)
#
ax = pl.gca()
ax.spines['right'].set_color('none')
ax.spines['top'].set_color('none')
ax.xaxis.set_ticks_position('bottom')
ax.spines['bottom'].set_position(('data',0))
ax.yaxis.set_ticks_position('left')
ax.spines['left'].set_position(('data',0))
#add param
label_f1 = "$\mu=10.0,\ \sigma=10.0$"
pl.text(2.5,0.3,label_f1,fontsize=15,verticalalignment="top",
horizontalalignment="left")
label_f2 = r"$f(x)=\frac{1}{\sqrt{2\pi}\sigma}exp(-\frac{(x-\mu)^2}{2\sigma^2})$"
pl.text(1.5,0.4,label_f2,fontsize=15,verticalalignment="top"
,horizontalalignment="left")
pl.show()
showfigure() | [
"[email protected]"
] | |
f9431e18242fc2153449ebaaa99257659244f53a | 5d14ae10f438f0bb463473d98a7d8af4990c7e5f | /TU05 If Statements.py | e57ea692c7016e2819eb8aac302cfd43adb06ae7 | [] | no_license | TigerAng0/Python-Challanges | 9a0e48dfb00d7046b24e4c84aac885015e9fe571 | d2a6748cdcde430b928f577b0d89388884122a41 | refs/heads/master | 2021-01-12T16:08:54.381000 | 2016-11-09T13:48:57 | 2016-11-09T13:48:57 | 71,951,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 195 | py | awnser=input("Is it raining today (y/n)-->").upper()
if awnser in ["Y","YES"]:
print("Play some computer games.")
else:
print("Play some football or computer games if you're lazy.")
| [
"[email protected]"
] | |
a30678f2abf733db6f5910c618047274cdc34669 | e75c7be93388cd7b03310c046d70d680d20fcbf8 | /tweets/management/commands/benchmark.py | 75a8d5f667192b5f739a086c612a3e93af4a5499 | [] | no_license | mikeln/twissandra | 3086239c16999f77cab90e0c88718ba8e41860a5 | 68b7f0e429531fc480d045264a0be2ea501ae456 | refs/heads/master | 2020-04-05T06:18:43.150942 | 2016-12-05T22:35:59 | 2016-12-05T22:35:59 | 30,832,868 | 0 | 0 | null | 2015-02-15T16:18:31 | 2015-02-15T16:18:30 | Python | UTF-8 | Python | false | false | 1,641 | py | import datetime
import random
import string
import time
import uuid
import cass
from django.core.management.base import BaseCommand
#
# based on fake_data.py
#
class Command(BaseCommand):
def handle(self, *args, **options):
# Oldest account is 10 years
origin = int(
time.time() +
datetime.timedelta(days=365.25 * 10).total_seconds() * 1e6)
now = int(time.time() * 1e6)
if len(args) < 2:
print " "
print "Usage benchmard <num_users> <max_tweets>"
print " Inserts new <num_users> users * new <max_tweets> tweets"
print " "
sys.exit(1)
num_users = int(args[0])
max_tweets = int(args[1])
the_ucount = 0
the_tweet_total = 0
print "users:",num_users," tw:",max_tweets
for i in range(num_users):
username = self.get_random_string()
cass.save_user(username, self.get_random_string())
creation_date = random.randint(origin, now)
the_tcount = 0
loop_tweets = max_tweets
for _ in range(loop_tweets):
cass.save_tweet(uuid.uuid1(), username, self.get_tweet(), timestamp=random.randint(creation_date, now))
the_tcount += 1
the_tweet_total += the_tcount
the_ucount += 1
print "created user", the_ucount, " tweets:",the_tcount, " total:",the_tweet_total
def get_tweet(self):
return ''.join(random.sample((string.letters)*5, 80))
def get_random_string(self):
return ''.join(random.sample(string.letters, 10))
| [
"[email protected]"
] | |
25da2e7dec7936944258fe5be4d9694ca7b50b71 | ddddd14f7f21c4b1e3a922d9489e918542323284 | /gallery/file_modules/gif.py | 4ceed7f3cb0772e67d3a7e74def407b5eef454c1 | [
"MIT"
] | permissive | sgreene570/gallery | 72e0bc4fcd165243f4774e49e07050f6eb804a32 | 19e521d7100efc6f7e6565444219ef2a9e2aa842 | refs/heads/develop | 2021-01-01T16:07:35.824634 | 2017-07-11T04:42:50 | 2017-07-11T18:25:05 | 89,010,522 | 0 | 0 | null | 2017-04-21T18:01:56 | 2017-04-21T18:01:56 | null | UTF-8 | Python | false | false | 343 | py | import os
import piexif
from wand.image import Image
from wand.color import Color
from gallery.file_modules import FileModule
from gallery.util import hash_file
class GIFFile(FileModule):
def __init__(self, file_path):
FileModule.__init__(self, file_path)
self.mime_type = "image/gif"
self.generate_thumbnail()
| [
"[email protected]"
] | |
8d3e3eae1a75074cc9ea1fc0e5c67acf8a69c01a | 0a047a804d3808d32e812ed5127e6c1b3fdf1c06 | /Pytorch/MNIST_DCGAN.py | 07504a665f7cec03fab7cab5ca9791ce3f6cfd54 | [] | no_license | leejeyeol/LJY_Machine_Learning | 17fccb609b772e37924d1102e25c2246f34086f6 | 456fe6e5cf5b054cf2bb7cd9d6cca7f031dcd033 | refs/heads/master | 2021-06-08T20:33:43.686471 | 2020-05-05T14:12:07 | 2020-05-05T14:12:07 | 89,432,146 | 4 | 1 | null | 2018-06-26T01:34:17 | 2017-04-26T03:09:52 | Python | UTF-8 | Python | false | false | 5,646 | py | import torch.nn as nn
import torch.utils.data
import torchvision.datasets as dset
import torchvision.transforms as transforms
import matplotlib.pyplot as plt
import numpy as np
import torchvision.utils as vutils
import torch.optim as optim
device = torch.device("cuda:0")
criterion = nn.BCELoss()
epochs = 5
batch_size = 100
nz = 100
ngf = 64
ndf = 64
def show_generated_data(real_data, fake_data):
plt.figure(figsize=(15, 5))
plt.subplot(1, 2, 1)
plt.axis("off")
plt.title("Real Images")
plt.imshow(np.transpose(vutils.make_grid(real_data[:64], padding=5, normalize=True).cpu(), (1, 2, 0)))
# Plot the fake images from the last epoch
plt.subplot(1, 2, 2)
plt.axis("off")
plt.title("Fake Images")
plt.imshow(np.transpose(vutils.make_grid(fake_data.detach()[:64], padding=5, normalize=True).cpu(), (1, 2, 0)))
plt.show()
# MNIST call and load
dataloader = torch.utils.data.DataLoader(
dset.MNIST('../data', train=True, download=True,
transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.5,), (0.5,))
])),
batch_size=batch_size, shuffle=True)
def weights_init(m):
classname = m.__class__.__name__
if classname.find('Conv') != -1:
m.weight.data.normal_(0.0, 0.02)
elif classname.find('BatchNorm') != -1:
m.weight.data.normal_(1.0, 0.02)
m.bias.data.fill_(0)
class Net_generator(nn.Module):
def __init__(self, nz, ngf):
super(Net_generator, self).__init__()
self.nz = nz
self.ngf = ngf
self.generator = nn.Sequential(
nn.ConvTranspose2d(in_channels=self.nz, out_channels=self.ngf*4, kernel_size=5,stride=1,padding=1),
nn.BatchNorm2d(self.ngf*4),
nn.ReLU(True),
nn.ConvTranspose2d(self.ngf * 4, self.ngf * 2, 5, 1, 1),
nn.BatchNorm2d(self.ngf * 2),
nn.ReLU(True),
nn.ConvTranspose2d(self.ngf * 2, self.ngf, 5, 2, 0),
nn.BatchNorm2d(self.ngf),
nn.ReLU(True),
nn.ConvTranspose2d(self.ngf, 1, 4, 2, 0),
nn.Tanh()
)
def forward(self, input):
output = self.generator(input)
return output
net_generator = Net_generator(nz, ngf).to(device)
net_generator.apply(weights_init)
class Net_Discriminator(nn.Module):
def __init__(self, ndf):
super(Net_Discriminator, self).__init__()
self.ndf = ndf
self.discriminator = nn.Sequential(
nn.Conv2d(in_channels=1, out_channels=self.ndf, kernel_size=5, stride=2, padding=1),
nn.LeakyReLU(0.2),
nn.Conv2d(self.ndf, self.ndf*2, 4, 2, 1),
nn.BatchNorm2d(self.ndf*2),
nn.LeakyReLU(0.2),
nn.Conv2d(self.ndf*2, self.ndf*4, 4, 2, 1),
nn.BatchNorm2d(self.ndf * 4),
nn.LeakyReLU(0.2),
nn.Conv2d(self.ndf*4,1,3,1,0),
nn.Sigmoid()
)
def forward(self, input):
output = self.discriminator(input)
return output
net_discriminator = Net_Discriminator(ndf).to(device)
net_discriminator.apply(weights_init)
#======================================================================================================================
# Training
#======================================================================================================================
test_noise = torch.FloatTensor(batch_size, nz, 1, 1).normal_(0, 1).to(device)
label = torch.FloatTensor(batch_size).to(device)
real_label = 1
fake_label = 0
# setup optimizer
optimizerD = optim.Adam(net_discriminator.parameters(), lr=0.0002, betas=(0.5, 0.999))
optimizerG = optim.Adam(net_generator.parameters(), lr=0.001, betas=(0.5, 0.999))
G_losses = []
D_losses = []
for epoch in range(epochs):
for i, (real_data, _) in enumerate(dataloader, 0):
############################
# (1) Update D network: maximize log(D(x)) + log(1 - D(G(z)))
###########################
# train with real
real_data = real_data.to(device)
label.data.fill_(real_label)
real_output = net_discriminator(real_data)
errD_real = criterion(real_output, label)
net_discriminator.zero_grad()
errD_real.backward()
# train with fake
noise = torch.FloatTensor(batch_size, nz, 1, 1).normal_(0, 1).to(device)
fake_data = net_generator(noise)
label.data.fill_(fake_label)
fake_output = net_discriminator(fake_data.detach())
errD_fake = criterion(fake_output, label)
errD_fake.backward()
optimizerD.step()
############################
# (2) Update G network: maximize log(D(G(z)))
###########################
net_generator.zero_grad()
label.data.fill_(real_label) # fake labels are real for generator cost
fake_output = net_discriminator(fake_data)
errG = criterion(fake_output, label)
errG.backward()
optimizerG.step()
G_losses.append(errG.item())
D_losses.append(errD_real+errD_fake.item())
if i %100 == 0 :
print('[%d/%d][%d/%d] Loss_D: %.4f Loss_G: %.4f '
% (epoch, epochs, i, len(dataloader),
(errD_real + errD_fake).item(), errG.item()))
show_generated_data(real_data,fake_data)
plt.figure(figsize=(10,5))
plt.title("Generator and Discriminator Loss During Training")
plt.plot(G_losses,label="G")
plt.plot(D_losses,label="D")
plt.xlabel("iterations")
plt.ylabel("Loss")
plt.legend()
plt.show()
| [
"[email protected]"
] | |
965326fd49974bd5a96390ae1e08da6e071e0ecd | 838fd8d80508d61a77b04ec98b69fe39ae5bbec1 | /usermanagement/forms.py | 5afa8233d7353ecbe7aa6f4590733d32c12bc09c | [] | no_license | billgsm/agenda | 8dfee6a86557bfc3cdef33155cf72d4075b0aef4 | 6f6d565562ec7690ee20e9ff62e4185b23416f78 | refs/heads/master | 2021-05-26T14:36:16.308763 | 2013-12-12T15:57:57 | 2013-12-12T15:57:57 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 935 | py | from django.contrib.auth.models import User
from django.contrib.auth.forms import UserCreationForm
#from django.contrib.auth.hashers import MAXIMUM_PASSWORD_LENGTH
from django import forms
from django.utils.translation import ugettext, ugettext_lazy as _
class UserCreateForm(UserCreationForm):
#email = forms.EmailField(required=True)
#password1 = forms.CharField(label=_("Password"),
# widget=forms.PasswordInput, max_length=MAXIMUM_PASSWORD_LENGTH)
#password2 = forms.CharField(label=_("Password confirmation"),
# widget=forms.PasswordInput,
# max_length=MAXIMUM_PASSWORD_LENGTH,
# help_text=_("Enter the same password as above, for verification."))
class Meta:
model = User
fields = ('username', 'email',)
#def save(self, commit=True):
# user = super(UserCreateForm, self).save(commit=True)
# user.email = self.cleaned_data['email']
# if commit:
# user.save()
# return user
| [
"[email protected]"
] | |
304a1cd3b71b376d3f7964d58d6d894df67b469c | bd802f8f34489a092550d749ea4444c6052d9875 | /python/other/Recommender_spark_1.py | fb3b66aefa4be06ee3929a9ab86dc2377af3aa6d | [] | no_license | amirnasri/Spark_movie_recommender | 610e4291aefad9081d48b6ec37b3e5d42ac50981 | bcb48c8d0f4783c7b85bbec199feff0763c2c764 | refs/heads/master | 2021-06-24T16:25:16.471139 | 2018-08-09T05:00:16 | 2018-08-09T05:00:16 | 101,361,446 | 0 | 1 | null | 2020-07-23T14:57:02 | 2017-08-25T03:18:34 | Jupyter Notebook | UTF-8 | Python | false | false | 4,648 | py | import pandas as pd
import numpy as np
from pyspark import SparkConf, SparkContext
from pyspark.sql import SparkSession
import os
from pyspark.mllib.recommendation import ALS, MatrixFactorizationModel, Rating
from scipy.sparse import csr_matrix
from sklearn.preprocessing import normalize
import pickle
import sys
ratings = pd.read_csv('data/ml-100k/u.data', delimiter="\t", engine="python", header=None)
ratings.columns = ["UserID::MovieID::Rating::Timestamp".split("::")]
# Load movie table
movies = pd.read_csv('data/ml-100k/u.item', delimiter='|', engine='python', header=None)
# Movie table columns as provided in the ReadMe file
columns = ' MovieID | movie title | release date | video release date |' \
'IMDb URL | unknown | Action | Adventure | Animation |'\
'Children | Comedy | Crime | Documentary | Drama | Fantasy |'\
'Film-Noir | Horror | Musical | Mystery | Romance | Sci-Fi |'\
'Thriller | War | Western'.split('|')
movies.columns = ["-".join(i.strip().split()) for i in columns]
movies.head()
print "The following movie id's are missing from movie table"
print sorted(set(range(1, movies.MovieID.max())) - set(movies.MovieID))
print "\nnumber of unique movies: %s\n" % len(set(movies.MovieID))
# movie id have some missing values in addition to the missing values above
# , i.e., there are movies that are not rated by any user.
mi = ratings['MovieID'].unique()
mi.sort()
print "The following movie id's exist in movie table are not rate by any user"
print sorted(set(movies.MovieID) - set(mi))
print len(mi)
# movie-ID: id's provided in the movie table
# movie-index: index ranges from 0 to #(unique movies) - 1
movie_index_to_ID = dict(zip(range(len(mi)), mi))
movie_ID_to_index = {k: v for v, k in movie_index_to_ID.iteritems()}
# Setting up spark session and spark context
spark = SparkSession \
.builder \
.appName("Python Spark SQL basic example") \
.config("spark.some.config.option", "some-value") \
.getOrCreate()
sc = spark.sparkContext
sc.setLogLevel("WARN")
# TODO: Should use broadcast for movie_ID_to_index?
def parse_data(line):
fields = line.split("\t")
user_id = int(fields[0])
movie_id = int(fields[1])
rating = float(fields[2])
ts = fields[3]
return movie_ID_to_index_bc.value[movie_id], user_id - 1, rating, ts
movie_ID_to_index_bc = sc.broadcast(movie_ID_to_index)
# u_data_file = os.path.join("ml-1m", "ratings.dat")
u_data_file = os.path.join("data/ml-100k", "u.data")
ratings_rdd = sc.textFile(u_data_file).map(parse_data) # .cache().filter(lambda x: x is not None)
print movie_ID_to_index.items()[:10]
ratings_columns = "UserID::MovieID::Rating::Timestamp".split("::")
ratings_sp = spark.createDataFrame(ratings_rdd, schema=ratings_columns)
model = ALS.train(ratings_sp.select(['UserID', 'MovieID', 'Rating']), rank = 10, iterations = 10)
def row_style_to_coordinate(m):
"""
Change from row-style to coordinate style matrix:
m = [
(0, (v00, v01, v02))
(2, (v20, v21, v22))
(5, (v50, v51, v52))
]
=>
[
(0, [(0, v00), (1, v01), (2, v02)])
(2, [(0, v20), (1, v21), (2, v22)])
(5, [(0, v50), (1, v51), (2, v52)])
]
=>
[
(0, 0, v00), (0, 1, v01), (0, 2, v02),
(2, 0, v20), (2, 1, v21), (2, 2, v22),
]
"""
x = m.map(lambda r: (r[0], zip(range(len(r[1])), r[1])))
return x.flatMap(lambda r: [(r[0], i[0], i[1]) for i in r[1]])
def coordinate_to_sparse(m):
row, col, data = np.array(m).T
return csr_matrix((data, (row, col)))
pf_rdd = model.productFeatures()
uf_rdd = model.userFeatures()
user_features = row_style_to_coordinate(pf_rdd)
product_features = row_style_to_coordinate(uf_rdd)
#print coordinate_to_sparse(user_features.collect()).todense().shape
#print coordinate_to_sparse(product_features.collect()).todense().shape
pf_sparse = coordinate_to_sparse(product_features.collect())
pf = np.array(pf_sparse.todense())
print pf
pf_norm = normalize(pf, axis=1)
pp_sim = np.dot(pf_norm, pf_norm.T)
recom_movie_index = np.argsort(pp_sim[0, :])[::-1][:10]
recom_movie_df = pd.merge(pd.DataFrame({'MovieID':[movie_index_to_ID[i] for i in recom_movie_index]}), movies, how='inner', on='MovieID', suffixes=('_x', '_y'))
print recom_movie_df
def save_obj(obj, name ):
with open(name + '.pkl', 'wb') as f:
pickle.dump(obj, f, pickle.HIGHEST_PROTOCOL)
result_folder = './result'
if len(sys.argv) > 1:
result_folder = sys.argv[1]
if not os.path.exists(result_folder):
os.mkdir(result_folder)
curr_dir = os.getcwd()
os.chdir(result_folder)
movies.to_csv('movies_df.csv')
save_obj(movie_index_to_ID, 'movie_index_to_ID')
np.save('pp_sim', pp_sim)
os.chdir(curr_dir)
| [
"[email protected]"
] | |
7398929e6c6e4e9ba906b903cebc809dfcf312e9 | 3a6aa0296a0258000f91f9c096fe21612eb17cc6 | /src/initialization/httpexception_processor.py | d627f3d81dbe960f16d45c7c45a72f9faaa3ace4 | [] | no_license | chiwah-keen/flask-demo-pro | a20c8c271e39dd44136b0f10e861b2a34dccfbf4 | f9cfc56f352c1e0c89c7fa75e6394880d476958f | refs/heads/master | 2021-01-05T11:11:11.201817 | 2020-02-17T02:36:44 | 2020-02-17T02:36:44 | 241,004,424 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 956 | py | #! /usr/bin/env python
# -*- coding:utf-8 -*-
"""
Created on 2020年02月07日
@author: jianzhihua
"""
from flask_app import main_app
from initialization.logger_processsor import logger
import utils.custom_response as ucr
@main_app.errorhandler(404)
def S404Handle(error):
"""
@attention: 404服务器警告异常
"""
# msg,data = error.show()
return ucr.op_fail(msg='Resource Not Found Error!', data={}, status=404)
@main_app.errorhandler(405)
def S405Handle(error):
"""
@attention: 405服务器警告异常
"""
# msg,data = error.show()
return ucr.op_fail(msg='The method is not allowed for the requested URL!', data={}, status=405)
@main_app.errorhandler(500)
def S500Handle(error):
"""
@attention: 500服务器警告异常
"""
# msg,data = error.show()
# return ucr.op_fail(msg='System Error!', data={}, status=500)
return ucr.op_fail(msg='System Error!', data={}, status=500)
| [
"[email protected]"
] | |
7734a21170fb4559398072c9c7cc5a5b15ae2302 | e5ff7b465ad359685dc0ff4d4cf8026922810f4b | /poscar_to_cif.py | efcf1d53aa2e0d91dca6ae4a95b152c5b7bd3f85 | [
"MIT"
] | permissive | runxin123/mg_intermetallic_ml | dee5082485873a4ea016255e8eee4eee38bff417 | 9ce496144123442a06c154496fe8695629d5effe | refs/heads/main | 2023-04-28T13:32:23.383737 | 2021-05-24T03:47:47 | 2021-05-24T03:47:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 248 | py | from pymatgen.io.cif import CifWriter
from pymatgen.core.structure import Structure
import os
structs = [i for i in os.listdir() if i[:6] == 'POSCAR']
for s in structs:
CifWriter(Structure.from_file(s)).write_file(f'{s}.cif')
os.remove(s)
| [
"[email protected]"
] | |
31a865b320a6c846e8c56d539938bb9983d0761e | 32deed55475741abffcecb982f98474a6541aa29 | /RI/lib/custom_analyzer.py | c85362ff6f19bf3ea0d2c1d63985677a8c360ddd | [] | no_license | vlad-doru/master | e18275cd5cc3e54f40bb330340c6c790ce3ebd00 | 09574cfe25ec5245205aaeeedca0c1d9eab8e13c | refs/heads/master | 2021-01-17T09:39:49.539382 | 2016-05-07T12:28:28 | 2016-05-07T12:28:28 | 25,204,913 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,387 | py | import lucene
import textract
from lucene.collections import *
from org.tartarus.snowball.ext import RomanianStemmer
from org.apache.pylucene.analysis import PythonAnalyzer
from org.apache.lucene.analysis.core import LowerCaseTokenizer, StopFilter, StopAnalyzer
from org.apache.lucene.analysis.standard import StandardTokenizer
from org.apache.lucene.analysis.standard import StandardFilter
from org.apache.lucene.analysis.snowball import SnowballFilter
from org.apache.lucene.analysis.miscellaneous import ASCIIFoldingFilter
from java.util import HashSet, ArrayList, Arrays
from org.apache.lucene.analysis.util import CharArraySet
from org.apache.lucene.analysis import Analyzer
class CustomRomanianAnalyzer(PythonAnalyzer):
def __init__(self, stopwords_file=""):
super(CustomRomanianAnalyzer, self).__init__()
stopwords = []
if len(stopwords_file) > 0:
stopwords = textract.process(stopwords_file).split()
self.__stopwords = StopFilter.makeStopSet(Arrays.asList(stopwords))
def createComponents(self, field, reader):
tokenizer = LowerCaseTokenizer(reader)
filter = StandardFilter(tokenizer)
filter = StopFilter(filter, self.__stopwords)
filter = ASCIIFoldingFilter(filter)
filter = SnowballFilter(filter, RomanianStemmer())
return Analyzer.TokenStreamComponents(tokenizer, filter)
| [
"[email protected]"
] | |
250c04aa763f71681cae0386ca25309d7883ce8c | 1946d8d6e26b3d6b2573718d77875a554619c05a | /config/settings/environments/production.py | 6de418bddc21e06fbdeebf1e2485769b09b6d4b5 | [] | no_license | hound672/django-blank | f850079a15d21c415fc5120a51daebb0d4d6c297 | 8784db1f4ef1d85f78734b32e465805cf04f50f8 | refs/heads/master | 2022-06-15T21:39:23.516875 | 2019-06-22T15:33:18 | 2019-06-22T15:33:18 | 193,249,674 | 0 | 0 | null | 2022-05-25T02:50:45 | 2019-06-22T15:32:25 | Python | UTF-8 | Python | false | false | 245 | py | # -*- coding: utf-8 -*-
"""
settings/prod
~~~~~~~~~~~~~~~
Настройки для продакшн сервера
"""
# noinspection PyUnresolvedReferences
from config.settings.components.common import *
ALLOWED_HOSTS = ['']
| [
"[email protected]"
] | |
e0e8d831007d77131f6d88f8e73286c498bce730 | 186b7e8d1a07d2b5eb4c93f11279c59422485327 | /src/common/urls.py | 6c90d9eecacf74f2257984652d4f935ff44f3f4f | [] | no_license | nevtum/hivemind | c47080adcbcb7a7df8a9965aea084294fcfecfe9 | 04c0aa06a8a00b98ccb631a6eeaaa1371b0af8dc | refs/heads/master | 2020-04-11T10:25:49.874601 | 2018-12-14T00:45:03 | 2018-12-14T00:45:03 | 161,714,394 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 459 | py | from django.conf.urls import include, url
from django.contrib.auth.decorators import login_required as auth
from .views import CreateProjectView, EditProjectView, ProjectListView
urlpatterns = [
url(r'^projects/$', auth(ProjectListView.as_view()), name='projects'),
url(r'^projects/new/$', auth(CreateProjectView.as_view()), name='create-project'),
url(r'^projects/(?P<slug>[-\w\d]+)/$', auth(EditProjectView.as_view()), name='edit-project'),
]
| [
"[email protected]"
] | |
c49a46ac2549ed50a593ddc7f2d560176aae8fc4 | f24e91b1a9565b0d71a7e2dfffb49efbd759f5f1 | /yare-rl/train.py | 282f25fb4189e37e291e78a84a5457ecc20be63f | [
"MIT"
] | permissive | dpmaloney/yare-rl | aeb99a85635210b83b36ef8a99dcbe2c2f32ddf5 | be16b3868957f7998baa4f0f568e6478787985e0 | refs/heads/main | 2023-07-11T08:39:23.629595 | 2021-08-20T04:14:46 | 2021-08-20T04:14:46 | 398,152,132 | 0 | 0 | MIT | 2021-08-20T04:14:32 | 2021-08-20T04:14:31 | null | UTF-8 | Python | false | false | 1,136 | py | import argparse
import gym
from pettingzoo.butterfly import knights_archers_zombies_v7
from env import YareEnv
from policies import RandomPolicy
def random_baseline(env: gym.Env) -> None:
policy = RandomPolicy(env.action_spaces)
while True:
observations = env.reset()
max_steps: int = 1000
for _ in range(max_steps):
actions = {
agent: policy.get_action(
observations[agent],
agent) for agent in env.agents}
observations, rewards, dones, infos = env.step(actions)
env.render()
if all(dones):
break
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
'--env',
default="yare",
choices=[
"yare",
"butterfly"],
help='Environment to train in.')
args = parser.parse_args()
if args.env == "yare":
env = YareEnv()
elif args.env == "butterfly":
env = knights_archers_zombies_v7.parallel_env()
else:
raise NotImplementedError
random_baseline(env)
| [
"[email protected]"
] | |
a7fbdb3937b39dc612763e233578b4e0abfbbeb1 | 8c1392b2ec81abcfbb18b922c7010089b973f160 | /5.9.py | 2f882431b9fb4341e3dd16ae02bea2d43b967148 | [] | no_license | bmk316/daniel_liang_python_solutions | f714b9d32a57fc9157b1954d2c608952a169c399 | 02b4879caa137d3dcb33be517e4ad513398df49e | refs/heads/main | 2023-03-07T13:47:20.205978 | 2021-02-17T23:20:37 | 2021-02-17T23:20:37 | 339,880,244 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 412 | py | #Financial application : compute future tuition
tuition = 10000
rate = 0.05
four_years = 0.0
for i in range(14):
tuition += tuition * 0.05
if i == 9:
print(f"Tuition in 10 years is: {tuition :,.2f}")
if i == 10 or i == 11 or i == 12 or i == 13:
four_years += tuition
print(f"Total cost of 4 years of tuition, starting 10 years from now is: {four_years :,.2f}")
| [
"[email protected]"
] | |
f129481598f87ce5d355abcd28ab70d637b00f93 | 0d935bbb55e54e95c15bc76dc1c2333296537c3b | /platocdp/devoops/setuphandlers.py | fd18e5a3b072b9f0f241e556277351a6959ceaeb | [] | no_license | koslab/platocdp.devoops | 1c312ceeb9f684f0a22cc1568afcaf15f2a144e7 | 4bcdf1b5dbce4cda2d2ed4963b7100daa4f89321 | refs/heads/master | 2016-08-04T23:52:13.234017 | 2015-07-02T17:04:54 | 2015-07-02T17:04:54 | 34,948,874 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 364 | py | from collective.grok import gs
from platocdp.devoops import MessageFactory as _
@gs.importstep(
name=u'platocdp.devoops',
title=_('platocdp.devoops import handler'),
description=_(''))
def setupVarious(context):
if context.readDataFile('platocdp.devoops.marker.txt') is None:
return
portal = context.getSite()
# do anything here
| [
"[email protected]"
] | |
521d709c5b4a1384fc725ce63d6172eec2fead78 | 55647a80c8b412af9df0ba3f50595cc2f29c25e6 | /res/scripts/common/Lib/distutils/tests/test_build_ext.py | 1d8d27db4f7a5dadb809a807760ccda505ba62bf | [] | no_license | cnsuhao/WOT-0.9.17-CT | 0035eb6070fb4fab8d8ee9f8bbc676c10d511cfb | d1f932d8cabaf8aa21708622e87f83c8d24d6451 | refs/heads/master | 2021-06-08T18:11:07.039293 | 2016-11-19T19:12:37 | 2016-11-19T19:12:37 | null | 0 | 0 | null | null | null | null | WINDOWS-1250 | Python | false | false | 16,296 | py | # 2016.11.19 19:58:33 Střední Evropa (běžný čas)
# Embedded file name: scripts/common/Lib/distutils/tests/test_build_ext.py
import sys
import os
from StringIO import StringIO
import textwrap
from distutils.core import Extension, Distribution
from distutils.command.build_ext import build_ext
from distutils import sysconfig
from distutils.tests import support
from distutils.errors import DistutilsSetupError, CompileError, DistutilsPlatformError
import unittest
from test import test_support
ALREADY_TESTED = False
class BuildExtTestCase(support.TempdirManager, support.LoggingSilencer, unittest.TestCase):
def setUp(self):
super(BuildExtTestCase, self).setUp()
self.tmp_dir = self.mkdtemp()
self.xx_created = False
sys.path.append(self.tmp_dir)
self.addCleanup(sys.path.remove, self.tmp_dir)
if sys.version > '2.6':
import site
self.old_user_base = site.USER_BASE
site.USER_BASE = self.mkdtemp()
from distutils.command import build_ext
build_ext.USER_BASE = site.USER_BASE
def tearDown(self):
if self.xx_created:
test_support.unload('xx')
super(BuildExtTestCase, self).tearDown()
def test_build_ext(self):
global ALREADY_TESTED
support.copy_xxmodule_c(self.tmp_dir)
self.xx_created = True
xx_c = os.path.join(self.tmp_dir, 'xxmodule.c')
xx_ext = Extension('xx', [xx_c])
dist = Distribution({'name': 'xx',
'ext_modules': [xx_ext]})
dist.package_dir = self.tmp_dir
cmd = build_ext(dist)
support.fixup_build_ext(cmd)
cmd.build_lib = self.tmp_dir
cmd.build_temp = self.tmp_dir
old_stdout = sys.stdout
if not test_support.verbose:
sys.stdout = StringIO()
try:
cmd.ensure_finalized()
cmd.run()
finally:
sys.stdout = old_stdout
if ALREADY_TESTED:
self.skipTest('Already tested in %s' % ALREADY_TESTED)
else:
ALREADY_TESTED = type(self).__name__
import xx
for attr in ('error', 'foo', 'new', 'roj'):
self.assertTrue(hasattr(xx, attr))
self.assertEqual(xx.foo(2, 5), 7)
self.assertEqual(xx.foo(13, 15), 28)
self.assertEqual(xx.new().demo(), None)
if test_support.HAVE_DOCSTRINGS:
doc = 'This is a template module just for instruction.'
self.assertEqual(xx.__doc__, doc)
self.assertIsInstance(xx.Null(), xx.Null)
self.assertIsInstance(xx.Str(), xx.Str)
return
def test_solaris_enable_shared(self):
dist = Distribution({'name': 'xx'})
cmd = build_ext(dist)
old = sys.platform
sys.platform = 'sunos'
from distutils.sysconfig import _config_vars
old_var = _config_vars.get('Py_ENABLE_SHARED')
_config_vars['Py_ENABLE_SHARED'] = 1
try:
cmd.ensure_finalized()
finally:
sys.platform = old
if old_var is None:
del _config_vars['Py_ENABLE_SHARED']
else:
_config_vars['Py_ENABLE_SHARED'] = old_var
self.assertGreater(len(cmd.library_dirs), 0)
return
@unittest.skipIf(sys.version < '2.6', 'site.USER_SITE was introduced in 2.6')
def test_user_site(self):
import site
dist = Distribution({'name': 'xx'})
cmd = build_ext(dist)
options = [ name for name, short, label in cmd.user_options ]
self.assertIn('user', options)
cmd.user = 1
lib = os.path.join(site.USER_BASE, 'lib')
incl = os.path.join(site.USER_BASE, 'include')
os.mkdir(lib)
os.mkdir(incl)
cmd.ensure_finalized()
self.assertIn(lib, cmd.library_dirs)
self.assertIn(lib, cmd.rpath)
self.assertIn(incl, cmd.include_dirs)
def test_finalize_options(self):
modules = [Extension('foo', ['xxx'])]
dist = Distribution({'name': 'xx',
'ext_modules': modules})
cmd = build_ext(dist)
cmd.finalize_options()
py_include = sysconfig.get_python_inc()
self.assertIn(py_include, cmd.include_dirs)
plat_py_include = sysconfig.get_python_inc(plat_specific=1)
self.assertIn(plat_py_include, cmd.include_dirs)
cmd = build_ext(dist)
cmd.libraries = 'my_lib, other_lib lastlib'
cmd.finalize_options()
self.assertEqual(cmd.libraries, ['my_lib', 'other_lib', 'lastlib'])
cmd = build_ext(dist)
cmd.library_dirs = 'my_lib_dir%sother_lib_dir' % os.pathsep
cmd.finalize_options()
self.assertIn('my_lib_dir', cmd.library_dirs)
self.assertIn('other_lib_dir', cmd.library_dirs)
cmd = build_ext(dist)
cmd.rpath = 'one%stwo' % os.pathsep
cmd.finalize_options()
self.assertEqual(cmd.rpath, ['one', 'two'])
cmd = build_ext(dist)
cmd.define = 'one,two'
cmd.finalize_options()
self.assertEqual(cmd.define, [('one', '1'), ('two', '1')])
cmd = build_ext(dist)
cmd.undef = 'one,two'
cmd.finalize_options()
self.assertEqual(cmd.undef, ['one', 'two'])
cmd = build_ext(dist)
cmd.swig_opts = None
cmd.finalize_options()
self.assertEqual(cmd.swig_opts, [])
cmd = build_ext(dist)
cmd.swig_opts = '1 2'
cmd.finalize_options()
self.assertEqual(cmd.swig_opts, ['1', '2'])
return
def test_check_extensions_list(self):
dist = Distribution()
cmd = build_ext(dist)
cmd.finalize_options()
self.assertRaises(DistutilsSetupError, cmd.check_extensions_list, 'foo')
exts = [('bar', 'foo', 'bar'), 'foo']
self.assertRaises(DistutilsSetupError, cmd.check_extensions_list, exts)
exts = [('foo-bar', '')]
self.assertRaises(DistutilsSetupError, cmd.check_extensions_list, exts)
exts = [('foo.bar', '')]
self.assertRaises(DistutilsSetupError, cmd.check_extensions_list, exts)
exts = [('foo.bar', {'sources': [''],
'libraries': 'foo',
'some': 'bar'})]
cmd.check_extensions_list(exts)
ext = exts[0]
self.assertIsInstance(ext, Extension)
self.assertEqual(ext.libraries, 'foo')
self.assertFalse(hasattr(ext, 'some'))
exts = [('foo.bar', {'sources': [''],
'libraries': 'foo',
'some': 'bar',
'macros': [('1', '2', '3'), 'foo']})]
self.assertRaises(DistutilsSetupError, cmd.check_extensions_list, exts)
exts[0][1]['macros'] = [('1', '2'), ('3',)]
cmd.check_extensions_list(exts)
self.assertEqual(exts[0].undef_macros, ['3'])
self.assertEqual(exts[0].define_macros, [('1', '2')])
def test_get_source_files(self):
modules = [Extension('foo', ['xxx'])]
dist = Distribution({'name': 'xx',
'ext_modules': modules})
cmd = build_ext(dist)
cmd.ensure_finalized()
self.assertEqual(cmd.get_source_files(), ['xxx'])
def test_compiler_option(self):
dist = Distribution()
cmd = build_ext(dist)
cmd.compiler = 'unix'
cmd.ensure_finalized()
cmd.run()
self.assertEqual(cmd.compiler, 'unix')
def test_get_outputs(self):
tmp_dir = self.mkdtemp()
c_file = os.path.join(tmp_dir, 'foo.c')
self.write_file(c_file, 'void initfoo(void) {};\n')
ext = Extension('foo', [c_file])
dist = Distribution({'name': 'xx',
'ext_modules': [ext]})
cmd = build_ext(dist)
support.fixup_build_ext(cmd)
cmd.ensure_finalized()
self.assertEqual(len(cmd.get_outputs()), 1)
cmd.build_lib = os.path.join(self.tmp_dir, 'build')
cmd.build_temp = os.path.join(self.tmp_dir, 'tempt')
other_tmp_dir = os.path.realpath(self.mkdtemp())
old_wd = os.getcwd()
os.chdir(other_tmp_dir)
try:
cmd.inplace = 1
cmd.run()
so_file = cmd.get_outputs()[0]
finally:
os.chdir(old_wd)
self.assertTrue(os.path.exists(so_file))
self.assertEqual(os.path.splitext(so_file)[-1], sysconfig.get_config_var('SO'))
so_dir = os.path.dirname(so_file)
self.assertEqual(so_dir, other_tmp_dir)
cmd.compiler = None
cmd.inplace = 0
cmd.run()
so_file = cmd.get_outputs()[0]
self.assertTrue(os.path.exists(so_file))
self.assertEqual(os.path.splitext(so_file)[-1], sysconfig.get_config_var('SO'))
so_dir = os.path.dirname(so_file)
self.assertEqual(so_dir, cmd.build_lib)
build_py = cmd.get_finalized_command('build_py')
build_py.package_dir = {'': 'bar'}
path = cmd.get_ext_fullpath('foo')
path = os.path.split(path)[0]
self.assertEqual(path, cmd.build_lib)
cmd.inplace = 1
other_tmp_dir = os.path.realpath(self.mkdtemp())
old_wd = os.getcwd()
os.chdir(other_tmp_dir)
try:
path = cmd.get_ext_fullpath('foo')
finally:
os.chdir(old_wd)
path = os.path.split(path)[0]
lastdir = os.path.split(path)[-1]
self.assertEqual(lastdir, 'bar')
return
def test_ext_fullpath(self):
ext = sysconfig.get_config_vars()['SO']
dist = Distribution()
cmd = build_ext(dist)
cmd.inplace = 1
cmd.distribution.package_dir = {'': 'src'}
cmd.distribution.packages = ['lxml', 'lxml.html']
curdir = os.getcwd()
wanted = os.path.join(curdir, 'src', 'lxml', 'etree' + ext)
path = cmd.get_ext_fullpath('lxml.etree')
self.assertEqual(wanted, path)
cmd.inplace = 0
cmd.build_lib = os.path.join(curdir, 'tmpdir')
wanted = os.path.join(curdir, 'tmpdir', 'lxml', 'etree' + ext)
path = cmd.get_ext_fullpath('lxml.etree')
self.assertEqual(wanted, path)
build_py = cmd.get_finalized_command('build_py')
build_py.package_dir = {}
cmd.distribution.packages = ['twisted', 'twisted.runner.portmap']
path = cmd.get_ext_fullpath('twisted.runner.portmap')
wanted = os.path.join(curdir, 'tmpdir', 'twisted', 'runner', 'portmap' + ext)
self.assertEqual(wanted, path)
cmd.inplace = 1
path = cmd.get_ext_fullpath('twisted.runner.portmap')
wanted = os.path.join(curdir, 'twisted', 'runner', 'portmap' + ext)
self.assertEqual(wanted, path)
def test_build_ext_inplace(self):
etree_c = os.path.join(self.tmp_dir, 'lxml.etree.c')
etree_ext = Extension('lxml.etree', [etree_c])
dist = Distribution({'name': 'lxml',
'ext_modules': [etree_ext]})
cmd = build_ext(dist)
cmd.ensure_finalized()
cmd.inplace = 1
cmd.distribution.package_dir = {'': 'src'}
cmd.distribution.packages = ['lxml', 'lxml.html']
curdir = os.getcwd()
ext = sysconfig.get_config_var('SO')
wanted = os.path.join(curdir, 'src', 'lxml', 'etree' + ext)
path = cmd.get_ext_fullpath('lxml.etree')
self.assertEqual(wanted, path)
def test_setuptools_compat(self):
import distutils.core, distutils.extension, distutils.command.build_ext
saved_ext = distutils.extension.Extension
try:
test_support.import_module('setuptools_build_ext', deprecated=True)
from setuptools_build_ext import build_ext as setuptools_build_ext
from setuptools_extension import Extension
etree_c = os.path.join(self.tmp_dir, 'lxml.etree.c')
etree_ext = Extension('lxml.etree', [etree_c])
dist = Distribution({'name': 'lxml',
'ext_modules': [etree_ext]})
cmd = setuptools_build_ext(dist)
cmd.ensure_finalized()
cmd.inplace = 1
cmd.distribution.package_dir = {'': 'src'}
cmd.distribution.packages = ['lxml', 'lxml.html']
curdir = os.getcwd()
ext = sysconfig.get_config_var('SO')
wanted = os.path.join(curdir, 'src', 'lxml', 'etree' + ext)
path = cmd.get_ext_fullpath('lxml.etree')
self.assertEqual(wanted, path)
finally:
distutils.extension.Extension = saved_ext
distutils.core.Extension = saved_ext
distutils.command.build_ext.Extension = saved_ext
def test_build_ext_path_with_os_sep(self):
dist = Distribution({'name': 'UpdateManager'})
cmd = build_ext(dist)
cmd.ensure_finalized()
ext = sysconfig.get_config_var('SO')
ext_name = os.path.join('UpdateManager', 'fdsend')
ext_path = cmd.get_ext_fullpath(ext_name)
wanted = os.path.join(cmd.build_lib, 'UpdateManager', 'fdsend' + ext)
self.assertEqual(ext_path, wanted)
@unittest.skipUnless(sys.platform == 'win32', 'these tests require Windows')
def test_build_ext_path_cross_platform(self):
dist = Distribution({'name': 'UpdateManager'})
cmd = build_ext(dist)
cmd.ensure_finalized()
ext = sysconfig.get_config_var('SO')
ext_name = 'UpdateManager/fdsend'
ext_path = cmd.get_ext_fullpath(ext_name)
wanted = os.path.join(cmd.build_lib, 'UpdateManager', 'fdsend' + ext)
self.assertEqual(ext_path, wanted)
@unittest.skipUnless(sys.platform == 'darwin', 'test only relevant for MacOSX')
def test_deployment_target_default(self):
self._try_compile_deployment_target('==', None)
return
@unittest.skipUnless(sys.platform == 'darwin', 'test only relevant for MacOSX')
def test_deployment_target_too_low(self):
self.assertRaises(DistutilsPlatformError, self._try_compile_deployment_target, '>', '10.1')
@unittest.skipUnless(sys.platform == 'darwin', 'test only relevant for MacOSX')
def test_deployment_target_higher_ok(self):
deptarget = sysconfig.get_config_var('MACOSX_DEPLOYMENT_TARGET')
if deptarget:
deptarget = [ int(x) for x in deptarget.split('.') ]
deptarget[-1] += 1
deptarget = '.'.join((str(i) for i in deptarget))
self._try_compile_deployment_target('<', deptarget)
def _try_compile_deployment_target(self, operator, target):
orig_environ = os.environ
os.environ = orig_environ.copy()
self.addCleanup(setattr, os, 'environ', orig_environ)
if target is None:
if os.environ.get('MACOSX_DEPLOYMENT_TARGET'):
del os.environ['MACOSX_DEPLOYMENT_TARGET']
else:
os.environ['MACOSX_DEPLOYMENT_TARGET'] = target
deptarget_c = os.path.join(self.tmp_dir, 'deptargetmodule.c')
with open(deptarget_c, 'w') as fp:
fp.write(textwrap.dedent(' #include <AvailabilityMacros.h>\n\n int dummy;\n\n #if TARGET %s MAC_OS_X_VERSION_MIN_REQUIRED\n #else\n #error "Unexpected target"\n #endif\n\n ' % operator))
target = sysconfig.get_config_var('MACOSX_DEPLOYMENT_TARGET')
target = tuple(map(int, target.split('.')))
target = '%02d%01d0' % target
deptarget_ext = Extension('deptarget', [deptarget_c], extra_compile_args=['-DTARGET=%s' % (target,)])
dist = Distribution({'name': 'deptarget',
'ext_modules': [deptarget_ext]})
dist.package_dir = self.tmp_dir
cmd = build_ext(dist)
cmd.build_lib = self.tmp_dir
cmd.build_temp = self.tmp_dir
try:
cmd.ensure_finalized()
cmd.run()
except CompileError:
self.fail('Wrong deployment target during compilation')
return
def test_suite():
return unittest.makeSuite(BuildExtTestCase)
if __name__ == '__main__':
test_support.run_unittest(test_suite())
# okay decompyling c:\Users\PC\wotsources\files\originals\res\scripts\common\Lib\distutils\tests\test_build_ext.pyc
# decompiled 1 files: 1 okay, 0 failed, 0 verify failed
# 2016.11.19 19:58:34 Střední Evropa (běžný čas)
| [
"[email protected]"
] | |
72c8d67aaedfd2efae3a52f2fef35b40c901f3fc | 6cbb6f523070dfe1acd40ac5c1c576e157ac715b | /tests/app/cli/test_main.py | a0d3b9bd342f81dcd6d4e10cfd4b11720b16a34a | [
"MIT"
] | permissive | ymoch/preacher | 1607350c6b2ba50bb0e0d9ec3322e69f9ce2cb5b | ceeb048c6ed120102d9cdd6829ee2b05ad977c83 | refs/heads/master | 2023-02-22T21:00:06.791304 | 2022-06-14T17:21:34 | 2022-06-14T17:21:34 | 174,710,438 | 3 | 2 | MIT | 2023-02-16T05:56:52 | 2019-03-09T15:21:20 | Python | UTF-8 | Python | false | false | 5,976 | py | import os
from tempfile import TemporaryDirectory
from click.testing import CliRunner
from pytest import fixture, mark
from preacher.app.cli.executor import PROCESS_POOL_FACTORY, THREAD_POOL_FACTORY
from preacher.app.cli.main import main
from preacher.core.status import Status
PKG = "preacher.app.cli.main"
@fixture
def base_dir():
with TemporaryDirectory() as path:
with open(os.path.join(path, "foo.yml"), "w") as f:
f.write("foo")
with open(os.path.join(path, "bar.yml"), "w") as f:
f.write("bar")
with open(os.path.join(path, "plugin.py"), "w") as f:
f.write("bar")
os.makedirs(os.path.join(path, "dir"))
yield path
@mark.parametrize(
"args",
[
["-h"],
["--help"],
["--version"],
],
)
def test_show_and_exit(args):
result = CliRunner().invoke(main, args)
assert result.exit_code == 0
@mark.parametrize(
"args",
(
["-a", ""],
["--argument", "foo"],
["--argument", "foo=["],
["--argument", "foo=!include file.yml"],
["-l", "foo"],
["--level", "bar"],
["-r", "foo"],
["--retry", "-1"],
["-d", "foo"],
["--delay", "-0.1"],
["-t", "foo"],
["--timeout", "0.0"],
["-c", "foo"],
["--concurrency", "0"],
["-C", "foo"],
["--concurrent-executor", "foo"],
["-p", "invalid"],
["--plugin", "invalid"],
["dir"],
),
)
def test_given_invalid_options(args, base_dir):
runner = CliRunner()
result = runner.invoke(main, args)
assert result.exit_code == 2
@mark.parametrize(
"env",
(
{},
{
"PREACHER_CLI_BASE_URL": "",
"PREACHER_CLI_ARGUMENT": "",
"PREACHER_CLI_LEVEL": "",
"PREACHER_CLI_RETRY": "",
"PREACHER_CLI_DELAY": "",
"PREACHER_CLI_TIMEOUT": "",
"PREACHER_CLI_CONCURRENCY": "",
"PREACHER_CLI_CONCURRENT_EXECUTOR": "",
"PREACHER_CLI_PLUGIN": "",
},
),
)
def test_default(mocker, env):
app = mocker.patch(f"{PKG}.app", return_value=0)
result = CliRunner().invoke(main, env=env)
assert result.exit_code == 0
app.assert_called_once_with(
paths=(),
base_url="",
arguments={},
level=Status.SUCCESS,
report_dir=None,
retry=0,
delay=0.1,
timeout=None,
concurrency=1,
executor_factory=PROCESS_POOL_FACTORY,
plugins=(),
verbosity=0,
)
def test_arguments(mocker, base_dir):
app = mocker.patch(f"{PKG}.app", return_value=0)
args = (
"--base-url",
"https://your-domain.com/api",
"-a",
"foo=",
"--argument",
"bar=1",
"--argument",
"baz=1.2",
"--argument",
"spam=[ham,eggs]",
"--level",
"unstable",
"--report",
os.path.join(base_dir, "report"),
"--retry",
"5",
"--delay",
"2.5",
"--timeout",
"3.5",
"--concurrency",
"4",
"--executor",
"thread",
"-p",
os.path.join(base_dir, "plugin.py"),
"--plugin",
os.path.join(base_dir, "dir"),
"--verbose",
os.path.join(base_dir, "foo.yml"),
os.path.join(base_dir, "bar.yml"),
)
env = {
"PREACHER_CLI_BASE_URL": "https://my-domain.com/api",
"PREACHER_CLI_ARGUMENT": "foo",
"PREACHER_CLI_LEVEL": "foo",
"PREACHER_CLI_RETRY": "foo",
"PREACHER_CLI_DELAY": "foo",
"PREACHER_CLI_TIMEOUT": "foo",
"PREACHER_CLI_CONCURRENCY": "foo",
"PREACHER_CLI_CONCURRENT_EXECUTOR": "foo",
"PREACHER_CLI_PLUGIN": "foo",
}
result = CliRunner().invoke(main, args=args, env=env)
assert result.exit_code == 0
app.assert_called_once_with(
paths=(os.path.join(base_dir, "foo.yml"), os.path.join(base_dir, "bar.yml")),
base_url="https://your-domain.com/api",
arguments={"foo": None, "bar": 1, "baz": 1.2, "spam": ["ham", "eggs"]},
level=Status.UNSTABLE,
report_dir=os.path.join(base_dir, "report"),
retry=5,
delay=2.5,
timeout=3.5,
concurrency=4,
executor_factory=THREAD_POOL_FACTORY,
plugins=(os.path.join(base_dir, "plugin.py"), os.path.join(base_dir, "dir")),
verbosity=1,
)
def test_environ(mocker, base_dir):
app = mocker.patch(f"{PKG}.app", return_value=0)
env = {
"PREACHER_CLI_BASE_URL": "https://my-domain.com/api",
"PREACHER_CLI_ARGUMENT": 'foo=1 bar=" baz " spam="ham\'""eggs"',
"PREACHER_CLI_LEVEL": "failure",
"PREACHER_CLI_REPORT": "reports/",
"PREACHER_CLI_RETRY": "10",
"PREACHER_CLI_DELAY": "1.2",
"PREACHER_CLI_TIMEOUT": "3.4",
"PREACHER_CLI_CONCURRENCY": "5",
"PREACHER_CLI_CONCURRENT_EXECUTOR": "thread",
"PREACHER_CLI_PLUGIN": ":".join(
(
os.path.join(base_dir, "plugin.py"),
os.path.join(base_dir, "dir"),
)
),
}
result = CliRunner().invoke(main, env=env)
assert result.exit_code == 0
app.assert_called_once_with(
paths=(),
base_url="https://my-domain.com/api",
arguments={"foo": 1, "bar": "baz", "spam": "ham'eggs"},
level=Status.FAILURE,
report_dir="reports/",
retry=10,
delay=1.2,
timeout=3.4,
concurrency=5,
executor_factory=THREAD_POOL_FACTORY,
plugins=(os.path.join(base_dir, "plugin.py"), os.path.join(base_dir, "dir")),
verbosity=0,
)
@mark.parametrize("exit_code", list(range(-1, 5)))
def test_exit_code(mocker, exit_code):
mocker.patch(f"{PKG}.app", return_value=exit_code)
result = CliRunner().invoke(main)
assert result.exit_code == exit_code
| [
"[email protected]"
] | |
8a5ec785654093e76f90567eca5c8d6e49cdd8ae | c36f4bd95c7a0706129ac1ed6e1e29f3f13c12b9 | /m8/p2/assignment2.py | e5dc42dedc79539058c35a7c30e4dcabc145dd5f | [] | no_license | prathyushab14/cssp1__assignments | 7fdfe121a48a483942d8b13f0996aeeae18ed9c7 | c0f64d61092fc42dc598fbe966923e4e88af0f93 | refs/heads/master | 2020-03-25T01:57:02.643731 | 2018-08-26T14:32:14 | 2018-08-26T14:32:14 | 143,266,313 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 361 | py | '''Sum of digits using recursion.'''
def sumofdigits(n_1):
'''
n is positive Integer
returns: a positive integer, the sum of digits of n.
'''
if n_1 == 0:
return 0
return n_1%10+sumofdigits(n_1//10)
def main():
'''Main Function.'''
a_1 = input()
print(sumofdigits(int(a_1)))
if __name__ == "__main__":
main()
| [
"[email protected]"
] | |
d131d6d74796cfc319b71c0faeada452be95ec4b | 293045cb32c999d66ee54ce43c42b64036f611b8 | /sqlPiuTabelle2.py | aeff2e67c58ce88ee7fd128a7b111a622e18548b | [] | no_license | Paopand1/sql | 976d91872e0ab7af9412c19ebe31a74200bdddba | 1cad0fa703165922724e5871dab37fc1b476275b | refs/heads/master | 2021-01-17T17:49:08.422810 | 2016-06-12T13:43:42 | 2016-06-12T13:43:42 | 60,872,425 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 921 | py | import sqlite3
with sqlite3.connect("new.db") as connection:
c = connection.cursor()
c.execute("""CREATE TABLE regions
(city TEXT, region TEXT)
""")
# (again, copy and paste the values if you'd like)
cities = [
('New York City', 'Northeast'),
('San Francisco', 'West'),
('Chicago', 'Midwest'),
('Houston', 'South'),
('Phoenix', 'West'),
('Boston', 'Northeast'),
('Los Angeles', 'West'),
('Houston', 'South'),
('Philadelphia', 'Northeast'),
('San Antonio', 'South'),
('San Diego', 'West'),
('Dallas', 'South'),
('San Jose', 'West'),
('Jacksonville', 'South'),
('Indianapolis', 'Midwest'),
('Austin', 'South'),
('Detroit', 'Midwest')
]
c.executemany("INSERT INTO regions VALUES(?, ? )", cities)
c.execute("SELECT * \
FROM regions \
ORDER BY region ASC")
rows = c.fetchall()
for r in rows:
print r[0], r[1]
| [
"[email protected]"
] | |
b81780f815a096f18a8bb8bc8a7f9835d836f10d | 03790bec5f643c9880e5a0f10583f3d29af9548a | /common/asserts.py | bcf1b47fca91bd3b7a867f30c574a3eb2b1d37b6 | [] | no_license | cidisk/backup | 6c6dfe3c6ef2a7c50909e2017777141e2c007b4d | 1e8ae84c5f3122cf3db3d091e84ca2faa7f59a4b | HEAD | 2016-09-09T20:04:12.925024 | 2015-01-08T13:01:48 | 2015-01-08T13:01:48 | 18,168,846 | 0 | 0 | null | null | null | null | GB18030 | Python | false | false | 7,777 | py | # -*- coding: GB18030 -*-
'''
Created on Feb 20, 2014
@author: wangdongsheng<[email protected]>
'''
import os, sys
from common import checker
from common.loger import loger
import string
class AssertException(Exception):
pass
def assert_equal(lhs, rhs, errmsg=None):
'''
@summary: 断言lhs和rhs相等
@param errmsg: 断言失败时显示的信息
'''
if errmsg is None:
errmsg = "%s doesn't equal to: %s" % (lhs, rhs)
if lhs != rhs:
raise AssertException, errmsg
def assert_not_equal(lhs, rhs, errmsg=None):
'''
@summary: 断言lhs和rhs不想等
@param errmsg: 断言失败时显示的信息
'''
if errmsg is None:
errmsg = "%s equals to: %s" % (lhs, rhs)
if lhs == rhs:
raise AssertException, errmsg
def assert_gt(lhs, rhs, errmsg=None):
'''
@summary: 断言lhs大于rhs
@param errmsg: 断言失败时显示的信息
'''
if errmsg is None:
errmsg = "%s is less than or equal to %s" % (lhs, rhs)
if lhs <= rhs:
raise AssertException, errmsg
def assert_bound(value, lb, hb, errmsg=None):
'''
@summary: 断言value介于lb和hb之间
@param errmsg: 断言失败时显示的信息
@param lb: 下限
@param hb: 上限
'''
if errmsg is None:
errmsg = "%s is not in [%s, %s]" % (value, lb, hb)
if value < lb or value > hb:
raise AssertException, errmsg
def assert_in_list(ele, lis, errmsg=None):
'''
@summary: 断言element是list中的一个元素
'''
if errmsg is None:
errmsg = "%s is not in %s" % (ele, lis)
if ele not in lis:
raise AssertException, errmsg
def assert_process_started(processpath, *ports):
'''
@summary: 断言进程启动
@param processpath: 进程的绝对路径
@param ports: 进程端口号列表
'''
if not checker.check_process_started(processpath, *ports):
ports_str = ",".join([str(p) for p in ports])
raise AssertException, "Process is not started: %s [%s]" % (processpath, ports_str)
def assert_process_not_started(processpath, *ports):
'''
@summary: 断言进程未启动
@param processpath: 进程的绝对路径
@param ports: 进程端口号列表
'''
if checker.check_process_started(processpath, *ports):
ports_str = ",".join([str(p) for p in ports])
raise AssertException, "Process is started: %s [%s]" % (processpath, ports_str)
def assert_path_not_contain(dirpath, filename, r=False):
'''
@summary: 断言路径中不含有文件
@param filename: 可以含有扩展符号*, ?
@param r: 是否递归查找。默认False
'''
if checker.check_path_contain(dirpath, filename, r):
raise AssertException, "File '%s' in path: %s" % (filename, dirpath)
def assert_log_not_contain(log_reader, regex, ignore_list=[]):
'''
@summary: 断言日志中不含有正则字符串
@param log_reader: LogReader对象
@param regex: 正则字符串
@param ignore_list: 忽略符合ignore regex的行
'''
if checker.check_log_contain(log_reader, regex, ignore_list):
raise AssertException, "Regex '%s' in log '%s' from pos %d" % (regex, log_reader.logpath, log_reader.pos)
'''
以下是scalar equal实现
'''
def comma(_str):
if _str <> "":
_str += ','
return _str
def key_name(name):
if str(name) <> "":
return '"'+str(name)+'"'+":"
return ""
def which2json(_str, name, item):
ret = 0
if type(item) == int or type(item) == float or type(item) == bool:
_str=numeral2json(_str, name, item)
elif type(item) == str:
_str=str2json(_str, name, item)
elif type(item) == list:
_str=list2json(_str, name, item)
elif type(item) == dict:
_str=dict2json(_str, [], item)
elif hasattr(item,'tojson'):
if hasattr(item,'needname') and item.needname() and name <> "":
_str+='"'+name+'"'+":"
_str+=item.tojson()
else:
ret = -1
parall_log.autoParLog.warning('unknown type'+str(type(item)))
return _str,ret
def numeral2json(_str, name, value):
if value <> "":
#_str=comma(_str)
value=str(value)
if value == "False":
value='false'
if value == 'True':
value='true'
if name == '':
_str += value
else:
_str += key_name(name)+value
return _str
def str2json(_str, name, value):
if value <> "":
#_str=comma(_str)
_str += key_name(name)+'"'+str(value)+'"'
return _str
def list2json(_str, name, _list, isopt=False):
llen=len(_list)
if llen == 0 and isopt == True:
return _str
#_str=comma(_str)
ret = 0
tmp_str=""
for inx in range(0, llen):
item=_list[inx]
if inx > 0:
tmp_str=comma(tmp_str)
tmp_str,ret = which2json(tmp_str, '', item)
if ret == 0:
_str += key_name(name)+"["+tmp_str+"]"
return _str
def dict2json(_str, name, dict):
if len(dict) == 0:
return _str
itemname=[]
if len(name) <> 0:
itemname=name
else:
itemname=dict.keys()
tmp_str=""
ret = 0
for inx in range(0, len(itemname)):
name=itemname[inx]
if dict.has_key(name):
if dict[name] == '':
continue
tmp_str=comma(tmp_str)
tmp_str, ret=which2json(tmp_str, name, dict[name])
if ret == 0:
_str += '{'+tmp_str+'}'
return _str
def scalar_equal(src_scalar, dst_scalar, key_path_stack=[]):
"""
@note:compare src_scalar to dst_scalar variable
means, it will compare each item in src_scalar to dst_scalar recursively
@param src_scalar: expect scalar
@param dst_scalar: real scalar
@key_path_stack: for recrode key_path
"""
if type(src_scalar) != type(dst_scalar) and \
( type(src_scalar) not in (int, bool) and type(dst_scalar) not in (int,bool) ) :
key_path = string.join(key_path_stack, r'.') if len(key_path_stack) > 0 else ""
loger.error("src key %s type:[%s] is not equal dst type:[%s]" %(key_path, type(src_scalar), type(dst_scalar)))
return False
elif type(src_scalar) == dict:
for each_key in src_scalar.keys():
key_path_stack.append(str(each_key))
key_path = string.join(key_path_stack, r'.')
if not dst_scalar.has_key(each_key):
loger.error( "src key:%s does not exists in dst" % key_path)
return False
ret = scalar_equal(src_scalar[each_key], dst_scalar[each_key], key_path_stack)
key_path_stack.pop()
if not ret:
return False
elif type(src_scalar) == list:
key_path = string.join(key_path_stack, r'.') if len(key_path_stack) > 0 else ""
if len(src_scalar) != len(dst_scalar):
loger.error("src key:%s scalar list length %d is not equal dst %d" %(key_path, len(src_scalar), len(dst_scalar)))
for i in range(0, len(src_scalar)):
ret = scalar_equal(src_scalar[i], dst_scalar[i], key_path_stack)
if not ret:
loger.error("src key:%s[%d]->%s not equal dst value%s" %(key_path, i, src_scalar[i], dst_scalar[i]))
return False
elif type(src_scalar) in (int, float, long, bool, str, unicode):
key_path = string.join(key_path_stack, r'.') if len(key_path_stack) > 0 else ""
if src_scalar != dst_scalar:
loger.error("src key %s->%s is not equal dst value %s" %(key_path, src_scalar, dst_scalar))
return False
return True
| [
"[email protected]"
] | |
d5689aaad3864c1d6af8cbaefddf6f80de9d4add | a88658ea37744a3f3c7f34f9df4689523e81f9e3 | /scripts/unittest/script/03-grayscale_to_rgb.py | 693f9b22edd90ab3e8c671a86a178c7f4bb421e5 | [
"MIT"
] | permissive | luuvt/openmv | ad7d559cf0a0ba3d1e3d1163ae0105c3d8efe29b | 1608a2a2f46220c0219abca1982dab80154fc21a | refs/heads/master | 2020-03-28T16:07:13.552279 | 2018-09-10T23:08:10 | 2018-09-10T23:08:10 | 148,661,405 | 1 | 0 | MIT | 2018-09-13T15:48:26 | 2018-09-13T15:48:25 | null | UTF-8 | Python | false | false | 155 | py | def unittest(data_path, temp_path):
import image
rgb = image.grayscale_to_rgb(182)
return (rgb[0] == 182 and rgb[1] == 182 and rgb[2] == 182)
| [
"[email protected]"
] | |
772ce7249aec7511d2d8a3e2e0ac39863bdcb480 | b96ed10d6247e22d4fa1d28bc3314bc319d3109c | /LessonSample/DjangoSample/ch06/exam_01.py | e1b350a860c6888c9645a01777fefae1a2354c9b | [] | no_license | 13555785106/PythonPPT-01 | ac1b22b9b1851f2b3ea6e4ab0a100e5f6896ee8c | 40e5883f248cb342f3a7fc7ad12ba02ebde4c619 | refs/heads/master | 2020-04-26T16:49:59.675964 | 2019-03-04T07:16:21 | 2019-03-04T07:16:21 | 157,095,747 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 764 | py | # -*- coding: utf-8 -*-
import os
import django
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "DjangoSample.settings")
django.setup()
from django.contrib.auth import authenticate
from django.contrib.auth.models import *
# user = User.objects.create_user(username='u001',password='abc')
# User.objects.filter(username='u001').delete()
user = authenticate(username='xiaozh', password='xjfdlj2010')
for up in user.user_permissions.all():
print up
if user:
print user
print user.has_perm('ch03.blog')
print user.has_module_perms('app100')
print '-----------------------'
user = User.objects.get(username='xiaojf')
if user.check_password('django1234'):
print user
print user.has_perm('ch03.blog')
print user.has_module_perms('app100')
| [
"[email protected]"
] | |
44ea730cf6616f7dc520d0befa3eb35596d36ff7 | 9dc6f8d91dc56523b9688990d4ae413b0bcbd4e1 | /examples/fci/35-transition_density_matrix.py | 1dec4c94ae95abb94a55cc40801196ec362f3b78 | [
"Apache-2.0"
] | permissive | sunqm/pyscf | 566bc2447d8072cff442d143891c12e6414de01c | dd179a802f0a35e72d8522503172f16977c8d974 | refs/heads/master | 2023-08-15T18:09:58.195953 | 2023-03-27T21:02:03 | 2023-03-27T21:02:03 | 159,149,096 | 80 | 26 | Apache-2.0 | 2022-02-05T00:19:24 | 2018-11-26T10:10:23 | Python | UTF-8 | Python | false | false | 2,826 | py | #!/usr/bin/env python
#
# Author: Qiming Sun <[email protected]>
#
'''
Transition density matrix between singlet and triplet states
'''
import numpy as np
from pyscf import gto
from pyscf import fci
from pyscf.fci import cistring
# <T|i_alpha^+ j_beta|S>
def make_rdm1_t2s(bra, ket, norb, nelec_ket):
neleca, nelecb = nelec = nelec_ket
ades_index = cistring.gen_des_str_index(range(norb), neleca+1)
bdes_index = cistring.gen_des_str_index(range(norb), nelecb)
na_bra = cistring.num_strings(norb, neleca+1)
nb_bra = cistring.num_strings(norb, nelecb-1)
na_ket = cistring.num_strings(norb, neleca)
nb_ket = cistring.num_strings(norb, nelecb)
assert bra.shape == (na_bra, nb_bra)
assert ket.shape == (na_ket, nb_ket)
t1bra = np.zeros((na_ket,nb_bra,norb))
t1ket = np.zeros((na_ket,nb_bra,norb))
for str0, tab in enumerate(bdes_index):
for _, i, str1, sign in tab:
t1ket[:,str1,i] += sign * ket[:,str0]
for str0, tab in enumerate(ades_index):
for _, i, str1, sign in tab:
t1bra[str1,:,i] += sign * bra[str0,:]
dm1 = np.einsum('abp,abq->pq', t1bra, t1ket)
return dm1
# <S|i_beta^+ j_alpha|T>
def make_rdm1_s2t(bra, ket, norb, nelec_ket):
'''Inefficient version. A check for make_rdm1_t2s'''
neleca, nelecb = nelec = nelec_ket
ades_index = cistring.gen_des_str_index(range(norb), neleca)
bcre_index = cistring.gen_cre_str_index(range(norb), nelecb)
na_bra = cistring.num_strings(norb, neleca-1)
nb_bra = cistring.num_strings(norb, nelecb+1)
na_ket = cistring.num_strings(norb, neleca)
nb_ket = cistring.num_strings(norb, nelecb)
assert bra.shape == (na_bra, nb_bra)
assert ket.shape == (na_ket, nb_ket)
t1ket = np.zeros((na_bra,nb_ket,norb))
for str0, tab in enumerate(ades_index):
for _, i, str1, sign in tab:
t1ket[str1,:,i] += sign * ket[str0]
t1bra = np.zeros((na_bra,nb_bra,norb,norb))
for str0, tab in enumerate(bcre_index):
for a, _, str1, sign in tab:
t1bra[:,str1,a] += sign * t1ket[:,str0]
dm1 = np.einsum('ab,abpq->pq', bra, t1bra)
return dm1
if __name__ == '__main__':
mol = gto.M(
atom = '''
Be 0 0 0
H 0 -.9 .3
H 0 .9 .3
''',
basis = 'sto-3g'
)
mf = mol.RHF().run()
neleca, nelecb = mol.nelec
norb = mf.mo_coeff.shape[1]
np.set_printoptions(4, linewidth=150)
cisolver = fci.FCI(mf)
e_s, wfn_s = cisolver.kernel()
cisolver.spin = 2
e_t, wfn_t = cisolver.kernel()
print(f'Singlet state energy = {e_s}, Triplet state energy = {e_t}')
dm_st = make_rdm1_s2t(wfn_s, wfn_t, norb, (neleca+1, nelecb-1))
dm_ts = make_rdm1_t2s(wfn_t, wfn_s, norb, (neleca, nelecb))
print(abs(dm_st - dm_ts.T).max())
| [
"[email protected]"
] | |
0b2f7992f8b45d0cbfb11ef4e07be25834ca842a | 5c931367136a99ae809bdc8beefc8ff82c1053a3 | /OOP_python/train.py | 2fdb7d0bde05376e740ca4c262d00215915fcd4a | [] | no_license | TanyaDinesh00/FaceRecognitionAttendance | 7bd8696d310ff3d7c9fc3c48efecb88238728e8e | a030cc15cc88431c4eb62486d723c7df136e3c18 | refs/heads/master | 2021-04-20T15:58:36.702259 | 2020-03-24T13:21:51 | 2020-03-24T13:21:51 | 249,697,145 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,924 | py | import csv
import tkinter as tk
from tkinter import *
from tkinter import messagebox
import cv2
window = tk.Tk()
window.title("OOP Project")
window.geometry('800x500')
C = Canvas(window, bg="blue", height=250, width=300)
filename = PhotoImage(file=".\\Jagged2.png")
background_label = Label(window, image=filename)
background_label.place(x=0, y=0, relwidth=1, relheight=1)
C.pack()
# window.attributes('-fullscreen', True)
window.grid_rowconfigure(0, weight=1)
window.grid_columnconfigure(0, weight=1)
message = tk.Label(window, text="Register Students:", bg="#d45a00", fg="black", width=14,
height=1, font=('Monospaced', 19, ''))
message.place(x=310, y=60)
lbl = tk.Label(window, text="Enter ID", width=8, height=1, fg="black", bg="#d45a00", font=('Monospaced', 15, ' '))
lbl.place(x=180, y=150)
txt = tk.Entry(window, width=20, bg="#d45a00", fg="black", font=('Monospaced', 15, ' '))
txt.place(x=400, y=150)
lbl2 = tk.Label(window, text="Enter Name", width=10, height=1, fg="black", bg="#d45a00", font=('Monospaced', 15, ' '))
lbl2.place(x=180, y=250)
txt2 = tk.Entry(window, width=20, bg="#d45a00", fg="black", font=('Monospaced', 15, ' '))
txt2.place(x=400, y=250)
lbl3 = tk.Label(window, text="Notification : ", width=10, fg="black", bg="#d45a00", height=1,
font=('Monospaced', 15, ' '))
lbl3.place(x=180, y=350)
message = tk.Label(window, text="", bg="#d45a00", fg="black", width=23, height=2, activebackground="blue",
font=('Monospaced', 15, ' '))
message.place(x=400, y=340)
def is_number(s):
try:
float(s)
return True
except ValueError:
pass
try:
import unicodedata
unicodedata.numeric(s)
return True
except (TypeError, ValueError):
pass
return False
def TakeImages():
Id = (txt.get())
name = (txt2.get())
if is_number(Id) and name.isalpha():
cam = cv2.VideoCapture(0)
harcascadePath = "haarcascade_frontalface_default.xml"
detector = cv2.CascadeClassifier(harcascadePath)
sampleNum = 0
while (True):
ret, img = cam.read()
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
faces = detector.detectMultiScale(gray, 1.3, 5)
for (x, y, w, h) in faces:
cv2.rectangle(img, (x, y), (x + w, y + h), (255, 0, 0), 2)
sampleNum = sampleNum + 1
cv2.imwrite("TrainingImage\ " + name + "." + Id + '.' + str(sampleNum) + ".jpg", gray[y:y + h, x:x + w])
cv2.imshow('frame', img)
if cv2.waitKey(100) & 0xFF == ord('q'):
break
elif sampleNum > 100:
break
cam.release()
cv2.destroyAllWindows()
res = "Images Saved for ID : " + Id + " Name : " + name
row = [Id, name]
with open('StudentDetails\StudentDetails.csv', 'a+') as csvFile:
writer = csv.writer(csvFile)
writer.writerow(row)
csvFile.close()
message.configure(text=res)
else:
if (is_number(Id)):
res = "Enter Alphabetical Name"
message.configure(text=res)
if (name.isalpha()):
res = "Enter Numeric Id"
message.configure(text=res)
def quit():
dialog_title = 'QUIT'
dialog_text = 'Are you sure?'
answer = messagebox.askyesno(dialog_title, dialog_text)
if answer == TRUE:
window.destroy()
takeImg = tk.Button(window, text="Take Images", command=TakeImages, fg="black", bg="#b0e0e6", width=10, height=1,
activebackground="blue", font=('Monospaced', 13, ' '))
takeImg.place(x=200, y=420)
quitWindow = tk.Button(window, text="Quit", command=quit, fg="black", bg="#b0e0e6", width=10, height=1,
activebackground="blue", font=('Monospaced', 13, ' '))
quitWindow.place(x=450, y=420)
window.mainloop()
| [
"[email protected]"
] | |
1317e97cefcf2b877ccf8c52c3a3d26dfea585f6 | d016a5c398ebc57813f0108ada85c294ff0433b4 | /cart/migrations/0006_auto_20201218_1344.py | 239a5a39b2960b9c890135dc997a9645e64ae426 | [] | no_license | DelroyBrown28/GalleryWithCustomAdmin | c8dd4f0ca6c30665654ef056f906cc847f048938 | 09b40cbdc86bd71ec5377856bce9d06a763583b3 | refs/heads/main | 2023-04-13T05:44:59.860350 | 2021-04-29T13:40:36 | 2021-04-29T13:40:36 | 362,829,797 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 384 | py | # Generated by Django 3.1.4 on 2020-12-18 13:44
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('cart', '0005_auto_20201216_1624'),
]
operations = [
migrations.AlterField(
model_name='product',
name='image',
field=models.ImageField(upload_to='.'),
),
]
| [
"[email protected]"
] | |
f78a99da60f05521a62d7ab4bd26c2d4aad51c6d | a275600c0c95662ba191ed9290c8bb73e755b8d5 | /ReviewBotSpreadsheet.py | c6920341d8bb8dc1222b1b1431e40189dcadfffd | [] | no_license | ALProjects/UofTReviewBot | 449474d395a30cc45f1ae975ef66e332759794a5 | 5d24155e784b3974b178ee2469f6f8e03211afbc | refs/heads/master | 2021-01-11T19:53:40.633744 | 2017-10-18T17:48:15 | 2017-10-18T17:48:15 | 79,419,789 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,556 | py | import gspread
from oauth2client.service_account import ServiceAccountCredentials
# use creds to create a client to interact with the Google Drive API
scope = ['https://spreadsheets.google.com/feeds']
creds = ServiceAccountCredentials.from_json_keyfile_name('client_secret.json', scope)
client = gspread.authorize(creds)
# Find a workbook by name and open the first sheet
# Make sure you use the right name here.
sheet = client.open("UofTFoodBotData").sheet1
# Extract and print all of the values
list_of_hashes = sheet.get_all_records()
def find_restaurant(requestedname):
i = 0
list_of_reviews = []
review_string = ""
end_result = ""
while i < len(list_of_hashes):
curr_restaurant = list_of_hashes[i]
restaurant_name = curr_restaurant['Name']
if requestedname.lower() == restaurant_name.lower():
print("restaurant found")
review_string = "Name: " + list_of_hashes[i]['Name'] + "\n \n Price: " + list_of_hashes[i]['Price'] + "\n \n Rating: " + list_of_hashes[i]['Rating'] + "\n \n Comment: " + list_of_hashes[i]['Comments'] + "\n \n Username: " + list_of_hashes[i]['Reddit Username']
list_of_reviews.append(review_string)
i += 1
for review in list_of_reviews:
end_result += review + "\n \n" + "-----" + "\n \n"
return end_result
def list_restaurants():
my_list = []
i = 0
while i < len(list_of_hashes):
curr_restaurant = list_of_hashes[i]
my_list.append(curr_restaurant['Name'])
i += 1
return my_list
| [
"[email protected]"
] | |
c13c436e4d7f18b5c491d3dd6c011384b7749b93 | 85c82274a3888fa61795bb0600ab96eaf7665b6a | /meet9_22_April_2021/D_BSTPostOrder.py | 4bc9fc77678fc38762f711d242a06f7f8c3333f1 | [] | no_license | refeed/StrukturDataA | 8e5a214569f41b19c05842d003ede5941800482a | 4d3b77bbd28158f1f1e64a49b8e90da731859407 | refs/heads/master | 2023-06-03T08:22:12.442536 | 2021-07-01T03:24:29 | 2021-07-01T03:24:29 | 360,478,966 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,340 | py | '''
Soal 1
Batas Run-time: 1 detik / test-case
Batas Memori: 32 MB
DESKRIPSI SOAL
Diberikan n buah bilangan bulat, a1, a2, ..., an. Kemudian dibentuk binary
search tree dari barisan tersebut. Tugas Anda ialah mengoutputkan secara
postorder.
PETUNJUK MASUKAN
Input terdiri atas 2 baris. Baris pertama berisi sebuah bilangan bulat positif n
yang menyatakan banyak bilangan. Baris kedua berisi n buah bilangan bulat, yang
menyatakan a1, a2, ..., an
PETUNJUK KELUARAN
Outputkan secara preorder.
CONTOH MASUKAN
9
3 0 8 2 4 9 1 5 6
CONTOH KELUARAN 1
3
0
2
1
8
4
5
6
9
KETERANGAN
Pada contoh testcase, binary search tree yang terbentuk adalah sebagai berikut.
'''
class Node:
def __init__(self, number=None):
self.number = number
self.left = None # type: BST
self.right = None # type: BST
class BST:
def __init__(self):
self.root = None # type: Node
def insert(self, number):
if self.root is None:
self.root = Node(number)
self.root.left = BST()
self.root.right = BST()
elif number > self.root.number:
self.root.right.insert(number)
else:
self.root.left.insert(number)
def get_height(self):
if self.root is None:
return 0
return (
1 + max(self.root.right.get_height(), self.root.left.get_height()))
def get_leaf_num(self):
if self.root is None:
return 0
elif self.root.left.root is None and self.root.right.root is None:
return (1 +
self.root.left.get_leaf_num() + self.root.right.get_leaf_num())
return (self.root.left.get_leaf_num() + self.root.right.get_leaf_num())
def postorder_print(self):
if self.root is None:
return
self.root.left.postorder_print()
self.root.right.postorder_print()
print(self.root.number)
def preorder_print(self):
if self.root is None:
return
print(self.root.number)
self.root.left.preorder_print()
self.root.right.preorder_print()
if __name__ == '__main__':
input() # Ignore the first input
input_to_tree_list = list(map(int, input().split()))
bst = BST()
for num in input_to_tree_list:
bst.insert(num)
bst.postorder_print()
| [
"[email protected]"
] | |
ef82d4c4ffde8688092fa0cea993d71cf5e89ca5 | 6f90ed39c673c0553b4173ca1c8dd17fa5994ccf | /ELECTRONIC_STATION/FindSequence.py | d95575bb0045350c087da09e67389e2fec323894 | [
"MIT"
] | permissive | jemg2030/Retos-Python-CheckIO | 390f9a846c79f0ab7d94641d16ab8bfa23fb33fb | ce52aa1fd718d5cabdd4592469148b9b52483ced | refs/heads/master | 2023-08-31T17:45:23.216699 | 2023-08-29T23:33:40 | 2023-08-29T23:33:40 | 144,081,187 | 0 | 0 | MIT | 2023-03-05T05:38:43 | 2018-08-09T00:21:39 | Python | UTF-8 | Python | false | false | 9,541 | py | '''
“There’s nothing here...” sighed Nikola.
“You’re kidding right? All treasure is buried treasure! It wouldn’t be treasure otherwise!” Said
Sofia. “Here, take these.” She produced three shovels from a backpack that seemed to appear out of thin air.
“Where did you get-”
“Don’t ask questions. Just dig!” She hopped on the shovel and began digging furiously.
CLUNK
“Hey we hit something.” Stephen exclaimed in surprise.
“It’s the treasure!” Sofia was jumping up and down in excitement.
The trio dug around the treasure chest and pulled it out of the hole and wiped the dirt off. Sofia
tried grabbing the lid but it was locked. Nikola studied the locking mechanism.
“I’ve seen this type of lock before. It’s pretty simple. We just need to check whether there is
a sequence of 4 or more matching numbers and output a bool.”
“Easy enough. Let’s open this sucker up!” Sofia was shaking in excitement.
You are given a matrix of NxN (4≤N≤10). You should check if there is a sequence of 4 or more matching digits.
The sequence may be positioned horizontally, vertically or diagonally (NW-SE or NE-SW diagonals).
find-sequence
Input: A matrix as list of lists with integers.
Output: Whether or not a sequence exists as a boolean.
Examples:
assert checkio([[1, 2, 1, 1], [1, 1, 4, 1], [1, 3, 1, 6], [1, 7, 2, 5]]) == True
assert checkio([[7, 1, 4, 1], [1, 2, 5, 2], [3, 4, 1, 3], [1, 1, 8, 1]]) == False
assert (
checkio(
[
[2, 1, 1, 6, 1],
[1, 3, 2, 1, 1],
[4, 1, 1, 3, 1],
[5, 5, 5, 5, 5],
[1, 1, 3, 1, 1],
]
)
== True
)
assert (
checkio(
[
[7, 1, 1, 8, 1, 1],
[1, 1, 7, 3, 1, 5],
[2, 3, 1, 2, 5, 1],
[1, 1, 1, 5, 1, 4],
[4, 6, 5, 1, 3, 1],
[1, 1, 9, 1, 2, 1],
]
)
== True
)
How it is used: This concept is useful for games where you need to detect various lines of the same
elements (match 3 games for example). This algorithm can be used for basic pattern recognition.
Preconditions:
0 ≤ len(matrix) ≤ 10;
all(all(0 < x < 10 for x in row) for row in matrix).
----------
----------
"Aquí no hay nada..." suspiró Nikola.
"Estás de broma, ¿verdad? ¡Todo tesoro es un tesoro enterrado! Si no, no sería un tesoro". Dijo
Sofía. "Toma, coge esto". Sacó tres palas de una mochila que pareció aparecer de la nada.
"¿De dónde sacaste..."
"No hagas preguntas. Sólo cava". Ella saltó en la pala y comenzó a cavar furiosamente.
CLUNK
"Hey le dimos a algo". Stephen exclamó sorprendido.
"¡Es el tesoro!" Sofía saltaba de emoción.
El trío escarbó alrededor del cofre del tesoro y lo sacó del agujero y le limpió la suciedad. Sofía
intentó agarrar la tapa, pero estaba cerrada. Nikola estudió el mecanismo de cierre.
"He visto este tipo de cerradura antes. Es bastante sencillo. Sólo tenemos que registrar si hay una
secuencia de 4 o más números iguales y sacar un bool".
"Bastante fácil. Abramos a este mamón". Sofía temblaba de emoción.
Te dan una matriz de NxN (4≤N≤10). Debes registrar si hay una secuencia de 4 o más dígitos iguales. La
secuencia puede colocarse horizontal, vertical o diagonalmente (diagonales NO-SE o NE-SO).
buscar-secuencia
Entrada: Una matriz como lista de listas con enteros.
Salida: Si existe o no una secuencia como booleano.
Ejemplos:
assert checkio([[1, 2, 1, 1], [1, 1, 4, 1], [1, 3, 1, 6], [1, 7, 2, 5]]) == True
assert checkio([[7, 1, 4, 1], [1, 2, 5, 2], [3, 4, 1, 3], [1, 1, 8, 1]]) == False
assert (
checkio(
[
[2, 1, 1, 6, 1],
[1, 3, 2, 1, 1],
[4, 1, 1, 3, 1],
[5, 5, 5, 5, 5],
[1, 1, 3, 1, 1],
]
)
== Verdadero
)
assert (
checkio(
[
[7, 1, 1, 8, 1, 1],
[1, 1, 7, 3, 1, 5],
[2, 3, 1, 2, 5, 1],
[1, 1, 1, 5, 1, 4],
[4, 6, 5, 1, 3, 1],
[1, 1, 9, 1, 2, 1],
]
)
== Verdadero
)
Cómo se utiliza: Este concepto es útil para juegos en los que es necesario detectar varias líneas
de los mismos elementos (juegos de tres en raya, por ejemplo). Este algoritmo puede utilizarse para
el reconocimiento básico de patrones.
Precondiciones:
0 ≤ len(matriz) ≤ 10;
all(all(0 < x < 10 para x en fila) para fila en matriz).
'''
def checkio(matrix: list[list[int]]) -> bool:
# replace this for solution
n = len(matrix)
for i in range(n):
for j in range(n):
# check horizontal sequence
if j + 3 < n and matrix[i][j] == matrix[i][j + 1] == matrix[i][j + 2] == matrix[i][j + 3]:
return True
# check vertical sequence
if i + 3 < n and matrix[i][j] == matrix[i + 1][j] == matrix[i + 2][j] == matrix[i + 3][j]:
return True
# check diagonal (NW-SE) sequence
if i + 3 < n and j + 3 < n and matrix[i][j] == matrix[i + 1][j + 1] == matrix[i + 2][j + 2] == \
matrix[i + 3][j + 3]:
return True
# check diagonal (NE-SW) sequence
if i + 3 < n and j - 3 >= 0 and matrix[i][j] == matrix[i + 1][j - 1] == matrix[i + 2][j - 2] == \
matrix[i + 3][j - 3]:
return True
return False
print("Example:")
print(checkio([[1, 2, 1, 1], [1, 1, 4, 1], [1, 3, 1, 6], [1, 7, 2, 5]]))
# These "asserts" are used for self-checking
assert checkio([[1, 2, 1, 1], [1, 1, 4, 1], [1, 3, 1, 6], [1, 7, 2, 5]]) == True
assert checkio([[7, 1, 4, 1], [1, 2, 5, 2], [3, 4, 1, 3], [1, 1, 8, 1]]) == False
assert (
checkio(
[
[2, 1, 1, 6, 1],
[1, 3, 2, 1, 1],
[4, 1, 1, 3, 1],
[5, 5, 5, 5, 5],
[1, 1, 3, 1, 1],
]
)
== True
)
assert (
checkio(
[
[7, 1, 1, 8, 1, 1],
[1, 1, 7, 3, 1, 5],
[2, 3, 1, 2, 5, 1],
[1, 1, 1, 5, 1, 4],
[4, 6, 5, 1, 3, 1],
[1, 1, 9, 1, 2, 1],
]
)
== True
)
assert (
checkio(
[
[2, 6, 2, 2, 7, 6, 5],
[3, 4, 8, 7, 7, 3, 6],
[6, 7, 3, 1, 2, 4, 1],
[2, 5, 7, 6, 3, 2, 2],
[3, 4, 3, 2, 7, 5, 6],
[8, 4, 6, 5, 2, 9, 7],
[5, 8, 3, 1, 3, 7, 8],
]
)
== False
)
assert (
checkio(
[
[1, 7, 6, 1, 8, 5, 1],
[7, 9, 1, 7, 2, 8, 6],
[5, 1, 4, 5, 8, 8, 3],
[8, 6, 3, 9, 7, 6, 9],
[9, 8, 9, 8, 6, 8, 2],
[1, 7, 2, 4, 9, 3, 8],
[9, 9, 8, 6, 9, 2, 6],
]
)
== False
)
assert (
checkio(
[
[6, 9, 1, 1, 6, 2],
[5, 9, 7, 8, 2, 5],
[2, 1, 1, 7, 9, 8],
[1, 8, 1, 4, 7, 4],
[7, 8, 5, 4, 5, 1],
[6, 4, 8, 8, 1, 8],
]
)
== False
)
assert (
checkio(
[
[2, 7, 6, 2, 1, 5, 2, 8, 4, 4],
[8, 7, 5, 8, 9, 2, 8, 9, 5, 5],
[5, 7, 7, 7, 4, 1, 1, 2, 6, 8],
[4, 6, 6, 3, 2, 7, 6, 6, 5, 1],
[2, 6, 6, 9, 8, 5, 5, 6, 7, 7],
[9, 4, 1, 9, 1, 3, 7, 2, 3, 1],
[5, 1, 4, 3, 6, 5, 9, 3, 4, 1],
[6, 5, 5, 1, 7, 7, 8, 2, 1, 1],
[9, 5, 7, 8, 2, 9, 2, 6, 9, 3],
[8, 2, 5, 7, 3, 7, 3, 8, 6, 2],
]
)
== False
)
assert (
checkio(
[
[1, 9, 7, 8, 9, 3, 6, 5, 6, 2],
[4, 9, 4, 8, 3, 4, 8, 8, 5, 9],
[2, 8, 5, 5, 7, 8, 6, 1, 3, 6],
[6, 4, 7, 6, 9, 1, 4, 5, 7, 8],
[4, 7, 7, 9, 8, 8, 8, 8, 4, 4],
[3, 7, 3, 2, 1, 9, 1, 8, 9, 1],
[4, 7, 2, 4, 8, 1, 2, 3, 6, 2],
[4, 4, 1, 3, 3, 3, 9, 2, 6, 7],
[8, 6, 1, 9, 3, 5, 8, 1, 7, 5],
[7, 3, 6, 5, 3, 6, 6, 4, 8, 2],
]
)
== True
)
assert checkio([[1, 6, 1, 7], [4, 7, 3, 6], [3, 5, 7, 9], [8, 6, 6, 9]]) == False
assert (
checkio(
[
[1, 2, 4, 6, 3],
[2, 5, 2, 6, 3],
[8, 7, 5, 9, 5],
[2, 1, 1, 4, 3],
[4, 2, 7, 5, 1],
]
)
== False
)
assert (
checkio(
[
[2, 3, 6, 5, 6, 2, 8, 3, 7, 4],
[6, 9, 5, 9, 7, 6, 8, 5, 1, 6],
[6, 8, 2, 6, 1, 9, 3, 6, 6, 4],
[5, 8, 3, 2, 3, 8, 7, 4, 6, 4],
[2, 3, 1, 4, 5, 1, 2, 5, 6, 9],
[5, 4, 8, 7, 5, 5, 8, 4, 9, 5],
[9, 7, 9, 9, 5, 9, 9, 8, 1, 2],
[5, 1, 7, 4, 8, 3, 4, 1, 8, 8],
[5, 3, 3, 2, 6, 1, 4, 3, 8, 8],
[4, 8, 1, 4, 5, 8, 8, 7, 4, 7],
]
)
== True
)
assert (
checkio(
[
[7, 7, 4, 4, 8],
[7, 4, 5, 5, 6],
[6, 6, 5, 2, 8],
[6, 2, 3, 8, 4],
[6, 1, 3, 1, 2],
]
)
== False
)
assert (
checkio(
[
[7, 9, 1, 7, 6, 7, 5, 9, 6],
[5, 5, 9, 3, 1, 6, 7, 4, 7],
[1, 7, 5, 2, 3, 1, 6, 4, 7],
[2, 2, 2, 8, 7, 2, 6, 6, 9],
[5, 6, 4, 2, 6, 7, 3, 4, 7],
[5, 5, 6, 4, 9, 4, 3, 1, 7],
[7, 3, 2, 3, 2, 4, 4, 7, 3],
[3, 6, 9, 7, 2, 5, 6, 2, 5],
[4, 1, 3, 9, 4, 2, 4, 8, 4],
]
)
== True
)
print("The mission is done! Click 'Check Solution' to earn rewards!")
| [
"[email protected]"
] | |
324ad24308b072d1d069e4afa0d0f06ec7efcb81 | 6f0517136962ad238963a597bc123994b4ac7201 | /backend/settings.py | 8a08765d2465bdb9080af2971b5d001cb939f3dd | [] | no_license | Astig-1982/proshop-2-backend | 8862869cfc4c93a645bcf04c8e5ac30c49db9682 | caa0e25e90072d358e44e6a92bd87b9709a9c69c | refs/heads/master | 2023-07-11T17:17:06.794880 | 2021-08-16T19:00:13 | 2021-08-16T19:00:13 | 394,761,502 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,647 | py | """
Django settings for backend project.
Generated by 'django-admin startproject' using Django 3.2.6.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.2/ref/settings/
"""
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'django-insecure-royc29fs572uhks+3!rh_^8no@+7c3@g)qacpt2v-o9no*f_o&'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'corsheaders',
'base',
]
MIDDLEWARE = [
'corsheaders.middleware.CorsMiddleware', # this middleware is from cors header, to enable the API
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'backend.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'backend.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3', # these are our databases that we use for the project
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.2/howto/static-files/
STATIC_URL = '/static/'
MEDIA_URL = '/images/' # this also set up in urls.py. These are the images' urls and allow render the images
# the below is added by me (Astig) in order to point to the root directory, and connected to static folder
# STATICFILES_DIRS = (os.path.join(BASE_DIR, 'static'),) ----- this is the same thing as the below, and I have used it in my Solid-Properties project
STATICFILES_DIR = [
BASE_DIR / 'static'
]
# MEDIA_ROOT = os.path.join(BASE_DIR, 'media') ---- this has been used in Solid-Properties
MEDIA_ROOT = 'static/images' # this tells Django to look into the 'static' folder, and then go and look into the 'images' folder. Now, when we upload images from the Django Admin, the images will go directly into 'static/images' folder
CORS_ALLOW_ALL_ORIGINS = True # this enable all HTTP to make request to the API
# Below is the code if we want only certain HTTP to be able to make request to the API:
# this is taken from 'cors headers' documentation
#CORS_ALLOWED_ORIGINS = [
# "https://example.com",
# "https://sub.example.com",
# "http://localhost:8080",
# "http://127.0.0.1:9000"
#] basically only the above HTTPs would be able to make requests to the API
# Default primary key field type
# https://docs.djangoproject.com/en/3.2/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
| [
"[email protected]"
] | |
597a43642a545ace37f7aad5480f7d6e091eb42e | 027f32756152679568fd564bb10499a75c5b7b79 | /tarea2/resta.py | 965c9f43910a7108099379c8293bf5dac121404a | [] | no_license | jorszs/C-S | 1172c66f23b76178244291cf20e075207fdfc111 | a203db1a86bb0f311811d8a4ddc321411b889685 | refs/heads/master | 2021-01-01T08:55:10.334855 | 2020-04-28T05:56:07 | 2020-04-28T05:56:07 | 239,205,591 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 336 | py | import zmq
context = zmq.Context()
socket = context.socket(zmq.REP)
socket.bind("tcp://*:8002")
#message = socket.recv()
message = socket.recv()
message_str = message.decode('utf-8')
l = message_str.split(",")
#print (type(l))
respuesta = int(l[1]) - int(l[2])
print (respuesta)
respuesta = str(respuesta)
socket.send_string(respuesta) | [
"[email protected]"
] | |
d3463fba4186d6fce95381fe6cfbbe8a5d410588 | c2f508483835e652148899044a360f96fcd98633 | /AirBnb_Price/__init__.py | 0b101f128a3f05b67deffb05d585427b912bd837 | [] | no_license | elfranco91/AirBnb_Price | 57c0b782b34697b4d2510a3cab5dd5f06b5fb418 | 72849360dc3a12a34b9a81bf3ebaf034b97f6b1e | refs/heads/master | 2022-09-02T07:55:07.202331 | 2020-05-23T12:39:31 | 2020-05-23T12:39:31 | 266,333,743 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 26 | py | __author__ = 'Al_Cummins'
| [
"[email protected]"
] | |
c2126d552a9bc32f04d2c9ab7ca9429208c3ecf5 | edfc90cafb2f815c3d37a572b2e38dcb936fa35b | /windowKeyPress/xdisplay.py | b9323a894a106d7246548ac9c8c5d690102db618 | [] | no_license | kshen91/pyScripts | 5f28c3bee5651b584c827dfdc266b5a2ab745282 | e9fd3f4fb589b8fd378f6cf6bdbd1f968a459805 | refs/heads/master | 2020-04-22T13:36:54.672744 | 2020-01-11T06:08:47 | 2020-01-11T06:08:47 | 170,415,246 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,816 | py | # -*- coding: utf-8 -*-
# Author: [email protected]
from copy import copy
from Xlib import X, display
from Xlib import __version__ as XlibVer
from Xlib.ext.record import AllClients
from Xlib.protocol.rq import EventField
from .xwindow import XWindow
from .logconfig import ConfigureLog
log = ConfigureLog(__name__)
if XlibVer < (0, 25):
log.warning("Xlib version is older than 0.25, suggest to update Xlib to make sure proper behaivor")
class XDisplay(object):
def __init__(self, disp=None):
self._display = display.Display(disp)
self._display2 = display.Display(disp)
self._root = self._display.screen().root
self._nrOfWindow = 0
self._winNameDict = {} # key:(winClass, winName),value:[XWindow]
self._winIdDict = {} # key:winId, value:[XWindow]
self._winList = []
self._UpdateViewableWindowHierarchy(self._root)
def _UpdateViewableWindowHierarchy(self, window, bOnlyExecuteActive=True, bPrintWindows=False, winHierarchyList=[]):
# reset counter and dict if check from root
if window == self._root:
self._nrOfWindow = 0
self._winNameDict = {}
self._winIdDict = {}
self._winList = []
else:
winHierarchyList = copy(winHierarchyList)
winHierarchyList.append(window)
winName = window.get_wm_name()
winClass = None if window.get_wm_class() is None else window.get_wm_class()[0]
if winClass is not None and winName is not None:
self._nrOfWindow += 1
rootWinOfApp = winHierarchyList[0]
winObj = XWindow(self._display, rootWinOfApp, window)
# update idDict
self._winIdDict[rootWinOfApp.id] = winObj
# update nameDict
nameKey = (winClass, winName) # it's possble to have same values for this key
if nameKey in self._winNameDict.keys():
self._winNameDict[nameKey].append(winObj)
else:
self._winNameDict[nameKey] = [winObj]
# update winList
self._winList.append([rootWinOfApp.id, nameKey])
if bPrintWindows:
print("%d - winId:[%d], winClass:[%s], winName:[%s], xy=%r, size=%r" % (self._nrOfWindow, rootWinOfApp.id, winClass, winName, winObj.GetXY(), winObj.GetSize()))
return
# else try to find name and class in the children
children = window.query_tree().children
for w in children:
if (not bOnlyExecuteActive) or (w.get_attributes().map_state == X.IsViewable):
geo = w.get_geometry()
if geo.x >= 0 and geo.y >= 0:
self._UpdateViewableWindowHierarchy(w, bOnlyExecuteActive, bPrintWindows, winHierarchyList)
def ListActiveWindows(self):
print("Listing all active windows, do not minimize a window if you would like it to be listed here:")
self._UpdateViewableWindowHierarchy(self._root, True, True)
def GetWindowList(self):
return self._winList
def ListAllWindows(self):
print("Listing all windows include both actived and inactived ones:")
self._UpdateViewableWindowHierarchy(self._root, False, True)
def GetWindowByClassAndName(self, winClass, winName):
inquiryKey = (winClass, winName)
if inquiryKey in self._winNameDict.keys():
if len(self._winNameDict[inquiryKey]) > 1:
log.warning("More than one window has winClass:<%s> and winName:<%s>, please try to make it unique by minimizing or closing same windows, and try again. \
Or try to use GetWindowById() function", winClass, winName)
return None
return self._winNameDict[inquiryKey][0]
log.warning("window with winClass:<%s> and winName:<%s> can not be found", winClass, winName)
return None
def GetWindowById(self, winId):
if winId in self._winIdDict.keys():
return self._winIdDict[winId]
log.warning("window with winId:<%s> can not be found", winId)
return None
def _WaitClick(self, offset):
(offsetX, offsetY) = offset
ctx = self._display.record_create_context(
0,
[AllClients],
[{
'core_requests': (0, 0),
'core_replies': (0, 0),
'ext_requests': (0, 0, 0, 0),
'ext_replies': (0, 0, 0, 0),
'delivered_events': (0, 0),
'device_events': (X.KeyPress, X.MotionNotify),
'errors': (0, 0),
'client_started': False,
'client_died': False,
}]
)
holder = {'X': None, 'Y': None}
def OnEvent(r, holder):
data = r.data
while len(data):
event, data = EventField(None).parse_binary_value(data, self._display.display, None, None)
if event.type == X.ButtonPress:
holder['X'] = event.root_x - offsetX
holder['Y'] = event.root_y - offsetY
self._display2.record_disable_context(ctx)
self._display2.flush()
try:
self._display.record_enable_context(ctx, lambda r: OnEvent(r, holder))
finally:
self._display.record_free_context(ctx)
return holder['X'], holder['Y']
def GetClickedXY(self, baseWin=None):
print('Waiting for click')
offset = (0, 0) if baseWin is None else baseWin.GetXY()
x, y = self._WaitClick(offset)
if baseWin is None:
print('Clicked at (%d,%d)' % (x, y))
else:
print('Clicked at (%d,%d) in window:%s' % (x, y, baseWin.GetName()))
return (x, y)
| [
"[email protected]"
] | |
38797c10553c5cd6815abf1b84e21bbdf83c0be8 | b2b9cd537c4c6a216d9b1ee32008cc8e98552405 | /Comprehensive_practice/金融/Tushare.py | 5c0b234909a129dce381f835e310a44257a731cd | [] | no_license | liyanfeng0127/python2_bdrw | ce982813645294b884d73cd2bbc4de5a33fa2cd5 | 52eba0d67d30ed5ce23e01dde69db35a8ed65787 | refs/heads/master | 2021-05-08T06:50:53.916210 | 2017-10-12T10:41:05 | 2017-10-12T10:41:05 | 106,676,637 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 482 | py | import tushare as ts
from sqlalchemy import create_engine
engine = create_engine("mysql://root:1234@localhost/scraping?charset=utf8")
print ts.get_stock_basics()
df1 = ts.get_stock_basics()
df1.to_sql('news_data',engine,if_exists='append')
print df1
print ts.get_tick_data('600019',date = '2016-12-16')
df2 = ts.get_tick_data('600019',date = '2016-12-16')
print df2
print ts.get_latest_news()
df3 = ts.get_latest_news()
df3.to_sql('news_data',engine,if_exists='append')
print df3
| [
"[email protected]"
] | |
178549f90a46caef4444354bee8c1e88ed2a48cb | 1c8fbee29a3e7596a0c7bd0b73396801c8f22965 | /day11.py | edffadb38cdd0c1ee39b64eda5f9f9c095ebe4cd | [] | no_license | yschua/aoc2019 | 9c9b8e913a0195b819d6ba284d81b69605c8eb72 | dd2d13b7443e49a1d820e3585d6a452bd3b5bf5f | refs/heads/master | 2020-09-26T19:19:07.392045 | 2019-12-24T14:45:20 | 2019-12-24T14:45:20 | 226,322,623 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,349 | py | import sys
class IntcodeComputer:
def __init__(self, program):
self.m = dict(zip(range(len(program)), program))
self.i = 0
self.end = False
self.output = []
self.relBase = 0
# self.run(inp)
def idx(self, n):
mode = self.paramModes[-n] if n <= len(self.paramModes) else 0
if mode == 0: return self.m[self.i + n] # position
elif mode == 1: return self.i + n # immediate
elif mode == 2: return self.m[self.i + n] + self.relBase # relative
def set(self, n, val):
self.m[self.idx(n)] = val
def get(self, n):
idx = self.idx(n)
return self.m[idx] if idx in self.m else 0
def setParamModes(self):
self.paramModes = [int(x) for x in str(self.m[self.i] // 100)]
def run(self, inp):
self.output = []
while True:
op = self.m[self.i] % 100
self.setParamModes()
if op == 99:
# terminate
self.end = True
break
elif op == 1:
# add
self.set(3, self.get(1) + self.get(2))
self.i += 4
elif op == 2:
# multiply
self.set(3, self.get(1) * self.get(2))
self.i += 4
elif op == 3:
# input
if inp is None: break
self.set(1, inp)
inp = None
self.i += 2
elif op == 4:
# output
self.output.append(self.get(1))
self.i += 2
elif op == 5:
# jump if true
self.i = self.get(2) if self.get(1) != 0 else self.i + 3
elif op == 6:
# jump if false
self.i = self.get(2) if self.get(1) == 0 else self.i + 3
elif op == 7:
# less than
self.set(3, int(self.get(1) < self.get(2)))
self.i += 4
elif op == 8:
# equals
self.set(3, int(self.get(1) == self.get(2)))
self.i += 4
elif op == 9:
# relative base offset
self.relBase += self.get(1)
self.i += 2
program = [int(x) for x in sys.stdin.readline().split(',')]
# 0 if black, 1 if white
# output 0 to black, 1 to white
# output 0 to left, 1 to right, move forward
grid = {}
pos = (0, 0)
dir = 'up'
robot = IntcodeComputer(program)
color = 1
def move(pos, dir, turnRight):
if dir == 'up':
return (pos[0] + (1 if turnRight else -1), pos[1]), 'right' if turnRight else 'left'
elif dir == 'down':
return (pos[0] + (-1 if turnRight else 1), pos[1]), 'left' if turnRight else 'right'
elif dir == 'right':
return (pos[0], pos[1] + (-1 if turnRight else 1)), 'down' if turnRight else 'up'
else:
return (pos[0], pos[1] + (1 if turnRight else -1)), 'up' if turnRight else 'down'
def getColor(pos):
return 0 if pos not in grid else grid[pos]
while not robot.end:
robot.run(color)
if len(robot.output) > 0:
paint, turnRight = robot.output
grid[pos] = paint
pos, dir = move(pos, dir, turnRight)
color = getColor(pos)
print(len(grid))
xmin = min(grid.keys(), key=lambda x: x[0])[0]
xmax = max(grid.keys(), key=lambda x: x[0])[0]
ymin = min(grid.keys(), key=lambda x: x[1])[1]
ymax = max(grid.keys(), key=lambda x: x[1])[1]
for y in range(ymax, ymin - 1, -1):
for x in range(xmin, xmax + 1):
print('#' if getColor((x,y)) else ' ', end='')
print()
| [
"[email protected]"
] | |
2640a496fbbbfc84cdd75e34d8405c76172e6374 | 12beef17dc4820db0cb15858db316654ff3d847e | /src/models/clientes/views.py | b92451274f073278291d1d89ef9ccedb0a6aa871 | [] | no_license | arlincdt/proyectofinal-eyap | 356ee5969d908d38e5575ee950daa9b25ded9048 | df6115c5ec0f0501313e6366b7f331b66e58329b | refs/heads/master | 2020-03-12T17:54:18.240682 | 2018-04-23T23:03:40 | 2018-04-23T23:03:40 | 130,747,938 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,750 | py | from flask import Blueprint, request, session, url_for, render_template
from werkzeug.utils import redirect
from src.models.clientes.cliente import Cliente
from src.models.promotores.promotor import Promotor
from src.models.fechas.fecha import Fecha
from src.models.inversiones.inversion import Inversion
from src.models.fondo import Fondo
cliente_blueprint = Blueprint('clientes', __name__)
@cliente_blueprint.route('/muesta_clientes')
def getClientes():
fecha = Fecha.getFecha()
promotor = Promotor.getByNombre(session['nombre'])
ganancias = Fondo.getFondo().ganancias
if promotor.isAdmin:
clientes = Cliente.getAllClientes()
else:
clientes = Cliente.getClientesByPromotor(promotor.nombre)
valoresActuales = []
if clientes:
for cliente in clientes:
suma = 0
invs = Inversion.getByCId(cliente.cId)
if invs:
for inv in invs:
monto = inv.getMontoActual()
suma = suma + monto
valoresActuales.append(suma)
return render_template('clientes.jinja2', fecha=fecha, promotor=promotor, clientes=clientes, valoresActuales = valoresActuales, ganancias = ganancias)
@cliente_blueprint.route('/nuevoCliente', methods=['GET', 'POST'])
def nuevoCliente():
if request.method == 'POST':
promotor = Promotor.getByNombre(session['nombre'])
nombre = request.form['nombre']
saldoInicial = request.form['saldoInicial']
cliente = Cliente(1, nombre, saldoInicial, 0, promotor.nombre)
cliente.saveToDb()
return redirect(url_for('clientes.getClientes'))
fecha = Fecha.getFecha()
return render_template("nuevo_cliente.jinja2", fecha=fecha)
| [
"[email protected]"
] | |
4d45d2e69c0f3a32153cb25061b933bb185d2166 | 53c48bda124090ad855e62d5ad58e70ac08aba97 | /manage.py | faf7daab8b7c89c6ca9eee045f96269809429d14 | [
"BSD-2-Clause-Views"
] | permissive | ivanalejandro0/django-products-crud | 290369b3e7b89e71457447cc4d55904639dad3b8 | cce0bc5a5048bacf2ea5d5d83fdf01e5a1eba3ab | refs/heads/master | 2021-01-19T11:22:14.298832 | 2013-04-01T22:59:58 | 2013-04-01T22:59:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 263 | py | #!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "django_products_crud.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| [
"[email protected]"
] | |
146d54b62a28cf1b24b9aa9c7bab66ab4e33d32a | 3e4fec214d82b5547df9ff94e163beb9369547f4 | /src/analog_in.py | a694d25c0f58a5fe6cbfed47d7abc6ea4be19130 | [] | no_license | physikier/magnetometer | 582caf47d3869340cd18a0aaa2d171e37591c79a | e9e6bd314d48b4b40ac5a2145b821305dec2d05e | refs/heads/master | 2021-01-21T14:44:11.460815 | 2016-04-30T21:15:02 | 2016-04-30T21:15:02 | 59,505,445 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 383 | py | import hardware.nidaq
import numpy as np
import matplotlib.pylab as plt
freq=100e3
N=10000
ai_measurement = hardware.nidaq.AITask('/Dev2/ai0', N, freq, read_timeout=1.0, range=(-10, 10))
ai_measurement.Start()
x=np.linspace(0.,1./freq*N,N)
y1,y2,y3 = ai_measurement.Read()
import matplotlib.pyplot as pyplot
plt.plot(x,y1)
#plt.plot(x,y2)
plt.show()
ai_measurement.Stop()
| [
"[email protected]"
] | |
e460a548878e52c16c7ce3e5951320f921419a3f | ad6133a14250c895b8222c894e1048681150e15a | /venv/bin/sqlformat | 69b388d630a24f98018be87c05b3dafbb441863b | [] | no_license | hangockhue/healthybackend | aba1aace4b9ebb2bb126bb89ab85c83fe0d363d0 | dd7ad4114f3f6c3f82c1ae249c5a798646a8bb3e | refs/heads/main | 2023-07-04T04:54:41.250502 | 2021-08-15T04:03:00 | 2021-08-15T04:03:00 | 396,208,581 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 264 | #!/Users/hangockhue/Desktop/Project/healthybackend/venv/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from sqlparse.__main__ import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"[email protected]"
] | ||
2b366ea58327bfd99677e527493481876123fc92 | 7b61f50bde439f21057a9ef81f6d79b5142d12b7 | /rename.py | e8a3e05781ce91a74745b04557b9989a22ca7c52 | [] | no_license | BlakeERichey/CNN | 2cc95828d8936c49af57c738b2f4ad707d6ffa76 | 1bd53bcecfa5726a32851534e64176de99fed514 | refs/heads/master | 2020-09-10T20:03:40.097169 | 2019-12-29T02:30:26 | 2019-12-29T02:30:26 | 221,821,996 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 251 | py | #renames all files in a directory sequentially
import os
from os import listdir
from os.path import isfile, join
root = './healthy_corn'
images = listdir(root)
for i, image in enumerate(images):
os.corn_env(f'{root}/{image}', f'{root}/hc_{i}.jpg') | [
"[email protected]"
] | |
8d9c008de26fd93ab4672533b1239457eaa8dbb5 | ae29d4dab4f10c7ccd5b106544e00fb1a11de82b | /Python/320/assignment 08/users.py | d00132a89f099d90f4a60a987f57872fd8943679 | [] | no_license | djgordon-maker/Portfolio | 9ab2d4c8b8eaf72f27c8aae20e3a900209f4360d | 9d336a29a2349b1c2f89fec93bafd4e27b20a8d9 | refs/heads/main | 2023-04-22T05:59:35.022742 | 2021-05-16T23:08:49 | 2021-05-16T23:08:49 | 367,220,031 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,874 | py | '''
Methods to access user information for the social network project
'''
import logging
from functools import partial
from peewee import IntegrityError
from socialnetwork_model import user_table as table
from socialnetwork_model import status_table
logger = logging.getLogger('main.users')
keys = ('user_id', 'user_email', 'user_name', 'user_last_name')
user_zip = partial(zip, keys)
def make_user(user_id, email, user_name, user_last_name):
'''
Creates a dictionary with a users information
Varifies that information meets requirements
'''
if len(user_id) > 30:
logger.debug('user_id [%s] cannot be longer than 30 characters', user_id)
return None
if len(user_name) > 30:
logger.debug('user_name [%s] cannot be longer than 30 characters', user_name)
return None
if len(user_last_name) > 100:
logger.debug('user_last_name [%s] cannot be longer that 100 characters', user_last_name)
return None
return dict(user_zip((user_id, email, user_name, user_last_name)))
def add_user(user_id, email, user_name, user_last_name):
'''
Adds a new user to the dataset
'''
new_user = make_user(user_id, email, user_name, user_last_name)
if not new_user:
logger.error('User %s did not meet requirements', user_id)
return False
try:
table.insert(**new_user)
except IntegrityError:
logger.error('User %s already exists', user_id)
return False
logger.info('User %s sucessfuly added', user_id)
return True
def modify_user(user_id, email, user_name, user_last_name):
'''
Modifes an existing user
'''
exists = table.find_one(user_id=user_id)
if not exists:
logger.error('User %s does not exist', user_id)
return False
edit_user = make_user(user_id, email, user_name, user_last_name)
if not edit_user:
logger.error('User %s did not meet requirements', user_id)
return False
table.update(columns=['user_id'], **edit_user)
logger.info('User %s sucessfuly modified', user_id)
return True
def delete_user(user_id):
'''
Deletes an existing user
'''
exists = table.find_one(user_id=user_id)
if not exists:
logger.error('User %s does not exist', user_id)
return False
table.delete(user_id=user_id)
# Delete any statuses belonging to the deleted user
status_table.delete(user_id=user_id)
logger.info('User %s sucessfuly deleted', user_id)
return True
def search_user(user_id):
'''
Searches for user data
Returns an empty dictionary if user is not found
'''
user = table.find_one(user_id=user_id)
if not user:
logger.info('User %s not found', user_id)
return dict(zip(keys, (None, None, None, None)))
logger.info('User %s sucessfuly found', user_id)
return user
| [
"[email protected]"
] | |
2420bf8c3d936774c07e187dce907a62f2186a17 | d1aa6e7d5631d7806531660febbd1f856eaeece7 | /python/paddle/distributed/fleet/utils/fs.py | 7ea639d70e641e550910ec3f8740a11a51a623bd | [
"Apache-2.0"
] | permissive | gongweibao/Paddle | 510cd4bc0ef89bc6ccee7b6b8eca52c00e014b77 | 60f9c60cd8196c66c391d79c35d341e9072f8838 | refs/heads/develop | 2023-03-13T17:43:35.675875 | 2022-09-20T08:46:15 | 2022-09-20T08:46:15 | 82,279,237 | 3 | 2 | Apache-2.0 | 2021-05-26T06:17:43 | 2017-02-17T09:16:16 | Python | UTF-8 | Python | false | false | 46,491 | py | # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
import subprocess
import multiprocessing
from datetime import datetime
import re
import copy
import errno
import time
import logging
import six
import abc
import paddle.fluid as fluid
from paddle.fluid import core
import functools
import shutil
__all__ = []
class ExecuteError(Exception):
pass
class FSFileExistsError(Exception):
pass
class FSFileNotExistsError(Exception):
pass
class FSTimeOut(Exception):
pass
class FSShellCmdAborted(ExecuteError):
pass
class FS(object):
@abc.abstractmethod
def ls_dir(self, fs_path):
raise NotImplementedError
@abc.abstractmethod
def is_file(self, fs_path):
raise NotImplementedError
@abc.abstractmethod
def is_dir(self, fs_path):
raise NotImplementedError
@abc.abstractmethod
def is_exist(self, fs_path):
raise NotImplementedError
@abc.abstractmethod
def upload(self, local_path, fs_path):
raise NotImplementedError
@abc.abstractmethod
def download(self, fs_path, local_path):
raise NotImplementedError
@abc.abstractmethod
def mkdirs(self, fs_path):
raise NotImplementedError
@abc.abstractmethod
def delete(self, fs_path):
raise NotImplementedError
@abc.abstractmethod
def need_upload_download(self):
raise NotImplementedError
@abc.abstractmethod
def rename(self, fs_src_path, fs_dst_path):
raise NotImplementedError
@abc.abstractmethod
def mv(self, fs_src_path, fs_dst_path, overwrite=False, test_exists=False):
raise NotImplementedError
@abc.abstractmethod
def upload_dir(self, local_dir, dest_dir):
raise NotImplementedError
@abc.abstractmethod
def list_dirs(self, fs_path):
raise NotImplementedError
@abc.abstractmethod
def touch(self, fs_path, exist_ok=True):
raise NotImplementedError
@abc.abstractmethod
def cat(self, fs_path=None):
raise NotImplementedError
class LocalFS(FS):
"""
A tool of local file system.
Examples:
.. code-block:: python
from paddle.distributed.fleet.utils import LocalFS
client = LocalFS()
subdirs, files = client.ls_dir("./")
"""
def ls_dir(self, fs_path):
"""
List directorys and files under `fs_path` .
Args:
fs_path(str): The local file path.
Returns:
Tuple: Return a 2-tuple, the first is a list of all its subdirectories,
and the second is a list of all its subfiles, e.g. ([subdirname1, subdirname1, ...], [filename1, filename2, ...]).
Examples:
.. code-block:: python
from paddle.distributed.fleet.utils import LocalFS
client = LocalFS()
subdirs, files = client.ls_dir("./")
"""
if not self.is_exist(fs_path):
return [], []
dirs = []
files = []
for f in os.listdir(fs_path):
if os.path.isdir(fs_path + "/" + f):
dirs.append(f)
else:
files.append(f)
return dirs, files
def mkdirs(self, fs_path):
"""
Create a local directory.
Args:
fs_path(str): The local directory path.
Examples:
.. code-block:: python
from paddle.distributed.fleet.utils import LocalFS
client = LocalFS()
client.mkdirs("test_mkdirs")
client.delete("test_mkdirs")
"""
assert not os.path.isfile(fs_path), "{} is already a file".format(
fs_path)
os.system("mkdir -p {}".format(fs_path))
def rename(self, fs_src_path, fs_dst_path):
"""
Rename the file.
Args:
fs_src_path(str): The actual name of the file or directory
fs_dst_path(str): The new name of the file or directory.
Examples:
.. code-block:: python
from paddle.distributed.fleet.utils import LocalFS
client = LocalFS()
client.touch("test_rename_src")
print(client.is_exists("test_rename_src")) # True
client.rename("test_rename_src", "test_rename_dst")
print(client.is_exists("test_rename_src")) # False
print(client.is_exists("test_rename_dst")) # True
client.delete("test_rename_dst")
"""
os.rename(fs_src_path, fs_dst_path)
def _rmr(self, fs_path):
shutil.rmtree(fs_path)
def _rm(self, fs_path):
os.remove(fs_path)
def delete(self, fs_path):
"""
Delete the local file path, whether it's a file or directory.
Args:
fs_path(str): The local file path.
Examples:
.. code-block:: python
from paddle.distributed.fleet.utils import LocalFS
client = LocalFS()
client.mkdirs("test_localFS_mkdirs")
client.delete("test_localFS_mkdirs")
"""
if not self.is_exist(fs_path):
return
if os.path.isfile(fs_path):
return self._rm(fs_path)
return self._rmr(fs_path)
def need_upload_download(self):
return False
def is_file(self, fs_path):
"""
Whether the local file path is a file.
Args:
fs_path(str): The local file path.
Returns:
Bool: Return true if the path exists and it's a file, otherwise return false.
Examples:
.. code-block:: python
from paddle.distributed.fleet.utils import LocalFS
client = LocalFS()
client.touch("test_is_file")
print(client.is_file("test_is_file")) # True
client.delete("test_is_file")
"""
return os.path.isfile(fs_path)
def is_dir(self, fs_path):
"""
Whether the local file path is a directory.
Args:
fs_path(str): The local file path.
Returns:
Bool: Return true if the path exists and it's a directory, otherwise return false.
Examples:
.. code-block:: python
from paddle.distributed.fleet.utils import LocalFS
client = LocalFS()
client.mkdirs("test_is_dir")
print(client.is_dir("test_is_file")) # True
client.delete("test_is_dir")
"""
return os.path.isdir(fs_path)
def is_exist(self, fs_path):
"""
Whether the local file path exists.
Args:
fs_path(str): The local file path.
Returns:
Bool: Wheter it's a file or directory, return true if the path exists,
otherwise return false.
Examples:
.. code-block:: python
from paddle.distributed.fleet.utils import LocalFS
client = LocalFS()
ret = local_fs.is_exist("test_is_exist")
"""
return os.path.exists(fs_path)
def touch(self, fs_path, exist_ok=True):
"""
Create a local file.
Args:
fs_path(str): The local file path.
exist_ok(bool): When `fs_path` exists, if `exist_ok` is set false,
program will throw an Exception. Default is true.
Examples:
.. code-block:: python
from paddle.distributed.fleet.utils import LocalFS
client = LocalFS()
client.touch("test_touch")
client.delete("test_touch")
"""
if self.is_exist(fs_path):
if exist_ok:
return
raise FSFileExistsError
os.system("touch {}".format(fs_path))
def mv(self, src_path, dst_path, overwrite=False, test_exists=False):
"""
Move a local file or directory from `src_path` to `dst_path` .
Args:
src_path(str): Name of the file or directory, that's needed to be moved.
dst_path(str): Name of the file or directory to which to move to.
overwrite(bool): Whether to re-write `dst_path` if that exists. Default is False.
Examples:
.. code-block:: python
from paddle.distributed.fleet.utils import LocalFS
client = LocalFS()
client.touch("test_mv_src")
client.mv("test_mv_src", "test_mv_dst")
client.delete("test_mv_dst")
"""
if not self.is_exist(src_path):
raise FSFileNotExistsError
if overwrite and self.is_exist(dst_path):
self.delete(dst_path)
if self.is_exist(dst_path):
raise FSFileExistsError
return self.rename(src_path, dst_path)
def list_dirs(self, fs_path):
"""
Only list directorys under `fs_path` .
Args:
fs_path(str): The local file path.
Returns:
List: A list of all its subdirectories, e.g. [subdirname1, subdirname1, ...].
Examples:
.. code-block:: python
from paddle.distributed.fleet.utils import LocalFS
client = LocalFS()
subdirs = client.list_dirs("./")
"""
if not self.is_exist(fs_path):
return []
dirs = [
f for f in os.listdir(fs_path) if os.path.isdir(fs_path + "/" + f)
]
return dirs
def _handle_errors(max_time_out=None):
def decorator(f):
@functools.wraps(f)
def handler(*args, **kwargs):
o = args[0]
time_out = max_time_out
if time_out is None:
time_out = float(o._time_out) / 1000.0
else:
time_out /= 1000.0
inter = float(o._sleep_inter) / 1000.0
start = time.time()
last_print_time = start
while True:
try:
return f(*args, **kwargs)
# important: only ExecuteError need to retry
except ExecuteError as e:
if time.time() - start >= time_out:
raise FSTimeOut("args:{} timeout:{}".format(
args,
time.time() - start))
time.sleep(inter)
if time.time() - last_print_time > 30:
print("hadoop operator timeout:args:{} timeout:{}".format(
args,
time.time() - start))
last_print_time = time.time()
return handler
return decorator
class HDFSClient(FS):
"""
A tool of HDFS.
Args:
hadoop_home(str): Hadoop home.
configs(dict): Hadoop config. It is a dictionary and needs to contain the
keys: "fs.default.name" and "hadoop.job.ugi".
Examples:
.. code-block:: text
from paddle.distributed.fleet.utils import HDFSClient
hadoop_home = "/home/client/hadoop-client/hadoop/"
configs = {
"fs.default.name": "hdfs://xxx.hadoop.com:54310",
"hadoop.job.ugi": "hello,hello123"
}
client = HDFSClient(hadoop_home, configs)
client.ls_dir("hdfs:/test_hdfs_client")
"""
def __init__(
self,
hadoop_home,
configs,
time_out=5 * 60 * 1000, # ms
sleep_inter=1000): # ms
self.pre_commands = []
hadoop_bin = '%s/bin/hadoop' % hadoop_home
self.pre_commands.append(hadoop_bin)
dfs = 'fs'
self.pre_commands.append(dfs)
if configs:
for k, v in six.iteritems(configs):
config_command = '-D%s=%s' % (k, v)
self.pre_commands.append(config_command)
self._time_out = time_out
self._sleep_inter = sleep_inter
self._base_cmd = " ".join(self.pre_commands)
self._bd_err_re = re.compile(
r'\s?responseErrorMsg\s?\:.*, errorCode\:\s?[0-9]+, path\:')
def _run_cmd(self, cmd, redirect_stderr=False, retry_times=5):
exe_cmd = "{} -{}".format(self._base_cmd, cmd)
ret = 0
output = None
retry_sleep_second = 3
for x in range(retry_times + 1):
ret, output = core.shell_execute_cmd(exe_cmd, 0, 0, redirect_stderr)
ret = int(ret)
if ret == 0:
break
time.sleep(retry_sleep_second)
if ret == 134:
raise FSShellCmdAborted(cmd)
return ret, output.splitlines()
@_handle_errors()
def list_dirs(self, fs_path):
"""
Only list directorys under `fs_path` .
Args:
fs_path(str): The HDFS file path.
Returns:
List: A list of all its subdirectories, e.g. [subdirname1, subdirname1, ...].
Examples:
.. code-block:: text
from paddle.distributed.fleet.utils import HDFSClient
hadoop_home = "/home/client/hadoop-client/hadoop/"
configs = {
"fs.default.name": "hdfs://xxx.hadoop.com:54310",
"hadoop.job.ugi": "hello,hello123"
}
client = HDFSClient(hadoop_home, configs)
subdirs = client.list_dirs("hdfs:/test_hdfs_client")
"""
if not self.is_exist(fs_path):
return []
dirs, files = self._ls_dir(fs_path)
return dirs
@_handle_errors()
def ls_dir(self, fs_path):
"""
List directorys and files under `fs_path` .
Args:
fs_path(str): The HDFS file path.
Returns:
Tuple: Return a 2-tuple, the first element is the list of all its subdirectories,
and the second one is the list of all its subfiles, e.g. ([subdirname1, subdirname1, ...], [filename1, filename2, ...]).
Examples:
.. code-block:: text
from paddle.distributed.fleet.utils import HDFSClient
hadoop_home = "/home/client/hadoop-client/hadoop/"
configs = {
"fs.default.name": "hdfs://xxx.hadoop.com:54310",
"hadoop.job.ugi": "hello,hello123"
}
client = HDFSClient(hadoop_home, configs)
subdirs, files = client.ls_dir("hdfs:/test_hdfs_client")
"""
if not self.is_exist(fs_path):
return [], []
return self._ls_dir(fs_path)
def _ls_dir(self, fs_path):
cmd = "ls {}".format(fs_path)
ret, lines = self._run_cmd(cmd)
if ret != 0:
raise ExecuteError(cmd)
dirs = []
files = []
for line in lines:
arr = line.split()
if len(arr) != 8:
continue
p = os.path.basename(arr[7])
if arr[0][0] == 'd':
dirs.append(p)
else:
files.append(p)
return dirs, files
def _test_match(self, lines):
for l in lines:
m = self._bd_err_re.match(l)
if m != None:
return m
return None
@_handle_errors()
def is_dir(self, fs_path):
"""
Whether the remote HDFS path is a directory.
Args:
fs_path(str): The HDFS file path.
Returns:
Bool: Return true if the path exists and it's a directory, otherwise return false.
Examples:
.. code-block:: text
from paddle.distributed.fleet.utils import HDFSClient
hadoop_home = "/home/client/hadoop-client/hadoop/"
configs = {
"fs.default.name": "hdfs://xxx.hadoop.com:54310",
"hadoop.job.ugi": "hello,hello123"
}
client = HDFSClient(hadoop_home, configs)
ret = client.is_file("hdfs:/test_hdfs_client")
"""
if not self.is_exist(fs_path):
return False
return self._is_dir(fs_path)
def _is_dir(self, fs_path):
cmd = "test -d {}".format(fs_path, redirect_stderr=True)
ret, lines = self._run_cmd(cmd, retry_times=1)
if ret:
# other error
if self._test_match(lines):
print('raise exception: ')
print('\n'.join(lines))
raise ExecuteError(cmd)
return False
return True
def is_file(self, fs_path):
"""
Whether the remote HDFS path is a file.
Args:
fs_path(str): The HDFS file path.
Returns:
Bool: Return true if the path exists and it's a file, otherwise return false.
Examples:
.. code-block:: text
from paddle.distributed.fleet.utils import HDFSClient
hadoop_home = "/home/client/hadoop-client/hadoop/"
configs = {
"fs.default.name": "hdfs://xxx.hadoop.com:54310",
"hadoop.job.ugi": "hello,hello123"
}
client = HDFSClient(hadoop_home, configs)
ret = client.is_file("hdfs:/test_hdfs_client")
"""
if not self.is_exist(fs_path):
return False
return not self._is_dir(fs_path)
@_handle_errors()
def is_exist(self, fs_path):
"""
Whether the remote HDFS path exists.
Args:
fs_path(str): The hdfs file path.
Returns:
Bool: Whether it's is file or directory, return true if the path exists,
otherwise return false.
Examples:
.. code-block:: text
from paddle.distributed.fleet.utils import HDFSClient
hadoop_home = "/home/client/hadoop-client/hadoop/"
configs = {
"fs.default.name": "hdfs://xxx.hadoop.com:54310",
"hadoop.job.ugi": "hello,hello123"
}
client = HDFSClient(hadoop_home, configs)
ret = client.is_exist("hdfs:/test_hdfs_client")
"""
cmd = "test -e {} ".format(fs_path)
ret, out = self._run_cmd(cmd, redirect_stderr=True, retry_times=1)
if ret != 0:
return False
return True
def upload_dir(self, local_dir, dest_dir, overwrite=False):
"""
upload dir to hdfs
Args:
local_dir(str): local dir
dest_dir(str): hdfs dest dir
overwrite(bool): is overwrite
Returns:
return code
"""
local_dir = local_dir.rstrip("/")
dest_dir = dest_dir.rstrip("/")
local_basename = os.path.basename(local_dir)
if self.is_exist(dest_dir + "/" + local_basename) and overwrite:
self.delete(dest_dir + "/" + local_basename)
if not self.is_exist(dest_dir):
self.mkdirs(dest_dir)
self._try_upload(local_dir, dest_dir)
# can't retry
def upload(self, local_path, fs_path, multi_processes=5, overwrite=False):
"""
Upload the local path to remote HDFS.
Args:
local_path(str): The local path.
fs_path(str): The HDFS path.
multi_processes(int|1): the upload data process at the same time, default=5
overwrite(bool|False): will overwrite file on HDFS or not
Examples:
.. code-block:: text
from paddle.distributed.fleet.utils import HDFSClient
hadoop_home = "/home/client/hadoop-client/hadoop/"
configs = {
"fs.default.name": "hdfs://xxx.hadoop.com:54310",
"hadoop.job.ugi": "hello,hello123"
}
client = HDFSClient(hadoop_home, configs)
client.upload("test_hdfs_client", "hdfs:/test_hdfs_client")
"""
def __subprocess_upload(hdfs_path_single, datas):
for data in datas:
self._try_upload(data, hdfs_path_single)
def get_local_files(path):
"""
get local files
Args:
path(str): local path
Returns:
list of local files
"""
rlist = []
if not os.path.exists(path):
return rlist
if os.path.isdir(path):
for file in os.listdir(path):
t = os.path.join(path, file)
rlist.append(t)
else:
rlist.append(path)
return rlist
local = LocalFS()
if not local.is_exist(local_path):
raise FSFileNotExistsError("{} not exists".format(local_path))
all_files = get_local_files(local_path)
if not all_files:
print("there are nothing need to upload, function exit")
return
if self.is_exist(fs_path) and overwrite:
self.delete(fs_path)
self.mkdirs(fs_path)
procs = []
for i in range(multi_processes):
process_datas = self._split_files(all_files, i, multi_processes)
p = multiprocessing.Process(target=__subprocess_upload,
args=(fs_path, process_datas))
procs.append(p)
p.start()
# complete the processes
for proc in procs:
proc.join()
@_handle_errors()
def _try_upload(self, local_path, fs_path):
cmd = "put {} {}".format(local_path, fs_path)
ret = 0
try:
ret, _ = self._run_cmd(cmd)
if ret != 0:
raise ExecuteError(cmd)
except Exception as e:
self.delete(fs_path)
raise e
# can't retry
def download(self, fs_path, local_path, multi_processes=5, overwrite=False):
"""
Download remote HDFS path to the local.
Args:
fs_path(str): The HDFS path.
local_path(str): The local path.
multi_processes(int|1): the download data process at the same time, default=1
overwrite(bool): is overwrite
Examples:
.. code-block:: text
from paddle.distributed.fleet.utils import HDFSClient
hadoop_home = "/home/client/hadoop-client/hadoop/"
configs = {
"fs.default.name": "hdfs://xxx.hadoop.com:54310",
"hadoop.job.ugi": "hello,hello123"
}
client = HDFSClient(hadoop_home, configs)
client.download("hdfs:/test_hdfs_client", "./")
"""
def __subprocess_download(local_path, datas):
"""
download file from HDFS
Args:
local_path(str): the local file path
datas(str): the hdfs file path list
"""
for data in datas:
self._try_download(data, local_path)
if not self.is_exist(fs_path):
raise FSFileNotExistsError("{} not exits".format(fs_path))
# download file
if self.is_file(fs_path):
return self._try_download(fs_path, local_path)
# download dir
dirs, all_filenames = self.ls_dir(fs_path)
all_files = [fs_path + "/" + i for i in all_filenames]
all_files.extend([fs_path + "/" + i for i in dirs])
procs = []
for i in range(multi_processes):
process_datas = self._split_files(all_files, i, multi_processes)
p = multiprocessing.Process(target=__subprocess_download,
args=(local_path, process_datas))
procs.append(p)
p.start()
# complete the processes
for proc in procs:
proc.join()
@_handle_errors()
def _try_download(self, fs_path, local_path):
cmd = "get {} {}".format(fs_path, local_path)
ret = 0
try:
ret, _ = self._run_cmd(cmd)
if ret != 0:
raise ExecuteError(cmd)
except Exception as e:
local_fs = LocalFS()
local_fs.delete(local_path)
raise e
@_handle_errors()
def mkdirs(self, fs_path):
"""
Create a remote HDFS directory.
Args:
fs_path(str): The HDFS directory path.
Examples:
.. code-block:: text
from paddle.distributed.fleet.utils import HDFSClient
hadoop_home = "/home/client/hadoop-client/hadoop/"
configs = {
"fs.default.name": "hdfs://xxx.hadoop.com:54310",
"hadoop.job.ugi": "hello,hello123"
}
client = HDFSClient(hadoop_home, configs)
client.mkdirs("hdfs:/test_hdfs_client")
"""
if self.is_exist(fs_path):
return
out_hdfs = False
cmd = "mkdir {} ".format(fs_path)
ret, out = self._run_cmd(cmd, redirect_stderr=True)
if ret != 0:
for l in out:
if "No such file or directory" in l:
out_hdfs = True
break
if not out_hdfs:
raise ExecuteError(cmd)
if out_hdfs and not self.is_exist(fs_path):
cmd = "mkdir -p {}".format(fs_path)
ret, _ = self._run_cmd(cmd)
if ret != 0:
raise ExecuteError(cmd)
def mv(self, fs_src_path, fs_dst_path, overwrite=False, test_exists=True):
"""
Move a remote HDFS file or directory from `fs_src_path` to `fs_dst_path` .
Args:
fs_src_path(str): Name of the file or directory, that's needed to be moved.
fs_dst_path(str): Name of the file or directory to which to move to.
overwrite(bool): Whether to re-write `fs_dst_path` if that exists. Default is False.
test_exists(bool): Check the existence of `fs_src_path` and `fs_dst_path` . When `test_exists` is set true, if `fs_src_path` doesn't exist or `fs_dst_path` exists, program will throw an Excetption.
Examples:
.. code-block:: text
from paddle.distributed.fleet.utils import HDFSClient
hadoop_home = "/home/client/hadoop-client/hadoop/"
configs = {
"fs.default.name": "hdfs://xxx.hadoop.com:54310",
"hadoop.job.ugi": "hello,hello123"
}
client = HDFSClient(hadoop_home, configs)
client.mv("hdfs:/test_hdfs_client", "hdfs:/test_hdfs_client2")
"""
if overwrite and self.is_exist(fs_dst_path):
self.delete(fs_dst_path)
if test_exists:
if not self.is_exist(fs_src_path):
raise FSFileNotExistsError(
"{} is not exists".format(fs_src_path))
if self.is_exist(fs_dst_path):
raise FSFileExistsError("{} exists already".format(fs_dst_path))
return self._try_mv(fs_src_path, fs_dst_path)
@_handle_errors()
def _try_mv(self, fs_src_path, fs_dst_path):
cmd = "mv {} {}".format(fs_src_path, fs_dst_path)
ret = 0
try:
ret, _ = self._run_cmd(cmd, retry_times=1)
if ret != 0:
raise ExecuteError(cmd)
except Exception as e:
if not self.is_exist(fs_src_path) and \
self.is_exist(fs_dst_path):
return
raise e
def _rmr(self, fs_path):
cmd = "rmr {}".format(fs_path)
ret, _ = self._run_cmd(cmd)
if ret != 0:
raise ExecuteError(cmd)
def _rm(self, fs_path):
cmd = "rm {}".format(fs_path)
ret, _ = self._run_cmd(cmd)
if ret != 0:
raise ExecuteError(cmd)
@_handle_errors()
def delete(self, fs_path):
"""
Delete a remote HDFS path, whether it's a file or directory.
Args:
fs_path(str): The HDFS file path.
Examples:
.. code-block:: text
from paddle.distributed.fleet.utils import HDFSClient
hadoop_home = "/home/client/hadoop-client/hadoop/"
configs = {
"fs.default.name": "hdfs://xxx.hadoop.com:54310",
"hadoop.job.ugi": "hello,hello123"
}
client = HDFSClient(hadoop_home, configs)
client.delete("hdfs:/test_hdfs_client")
"""
if not self.is_exist(fs_path):
return
is_dir = self._is_dir(fs_path)
if is_dir:
return self._rmr(fs_path)
return self._rm(fs_path)
def touch(self, fs_path, exist_ok=True):
"""
Create a remote HDFS file.
Args:
fs_path(str): The HDFS file path.
exist_ok(bool): When `fs_path` exists, if `exist_ok` is set false,
program will throw an Exception. Default is true.
Examples:
.. code-block:: text
from paddle.distributed.fleet.utils import HDFSClient
hadoop_home = "/home/client/hadoop-client/hadoop/"
configs = {
"fs.default.name": "hdfs://xxx.hadoop.com:54310",
"hadoop.job.ugi": "hello,hello123"
}
client = HDFSClient(hadoop_home, configs)
client.touch("hdfs:/test_hdfs_client")
"""
if self.is_exist(fs_path):
if exist_ok:
return
raise FSFileExistsError
return self._touchz(fs_path)
@_handle_errors()
def _touchz(self, fs_path):
cmd = "touchz {}".format(fs_path)
ret, _ = self._run_cmd(cmd)
if ret != 0:
raise ExecuteError(cmd)
def need_upload_download(self):
return True
def cat(self, fs_path=None):
"""
Cat a remote HDFS file.
Args:
fs_path(str): The HDFS file path.
Returns:
file content
Examples:
.. code-block:: text
from paddle.distributed.fleet.utils import HDFSClient
hadoop_home = "/home/client/hadoop-client/hadoop/"
configs = {
"fs.default.name": "hdfs://xxx.hadoop.com:54310",
"hadoop.job.ugi": "hello,hello123"
}
client = HDFSClient(hadoop_home, configs)
client.cat("hdfs:/test_hdfs_client")
"""
if self.is_file(fs_path):
output = self._try_cat(fs_path)
return "\n".join(output)
else:
return ""
@_handle_errors()
def _try_cat(self, fs_path):
cmd = "cat {}".format(fs_path)
ret, output = self._run_cmd(cmd, retry_times=1)
if ret != 0:
raise ExecuteError(cmd)
return output
def _split_files(self, files, trainer_id, trainers):
"""
split file list
Args:
files(list): file list
trainer_id(int): trainer mpi rank id
trainers(int): all trainers num
Returns:
fileist(list): file list of current trainer
"""
remainder = len(files) % trainers
blocksize = len(files) // trainers
blocks = [blocksize] * trainers
for i in range(remainder):
blocks[i] += 1
trainer_files = [[]] * trainers
begin = 0
for i in range(trainers):
trainer_files[i] = files[begin:begin + blocks[i]]
begin += blocks[i]
return trainer_files[trainer_id]
def list_files_info(self, path_list):
"""
list_files return file path and size
Args:
path_list(list): file list
Returns:
fileist(list): file list with file path and size
"""
if len(path_list) <= 0:
return []
file_list = []
#concat filelist can speed up 'hadoop ls'
str_concat = ""
for path in path_list:
str_concat += path + " "
cmd = "ls " + str_concat + " | awk '{if ($8 != \"\") {print $5\" \"$8 }}'"
ret, lines = self._run_cmd(cmd)
if (len(lines) == 0):
logger.warning("list_files empty, path[%s]" % path_list)
return []
for line in lines:
arr = line.split(' ')
if len(arr) < 2:
continue
file_path = arr[1]
file_size = int(arr[0])
file_list.append({'path': file_path, 'size': file_size})
return file_list
class AFSClient(FS):
"""
A tool of AFS. Use AfsWrapper.
Examples:
.. code-block:: text
from paddle.distributed.fleet.utils import AFSClient
client = AFSClient()
client.init("hdfs://xxx.hadoop.com:54310", "hello", "hello123", "./fs_conf")
client.ls_dir("hdfs:/test_hdfs_client")
"""
def __init__(
self,
time_out=5 * 60 * 1000, # ms
sleep_inter=1000): # ms
self._fs = core.AfsWrapper()
self._time_out = time_out
def init(self, fs_name, fs_user, fs_passwd, fs_conf):
self._fs.init(fs_name, fs_user, fs_passwd, fs_conf)
def list_dirs(self, fs_path):
"""
Only list directorys under `fs_path` .
Args:
fs_path(str): The HDFS file path.
Returns:
List: A list of all its subdirectories, e.g. [subdirname1, subdirname1, ...].
Examples:
.. code-block:: text
from paddle.distributed.fleet.utils import AFSClient
client = AFSClient()
client.init("hdfs://xxx.hadoop.com:54310", "hello", "hello123", "./fs_conf")
subdirs = client.list_dirs("hdfs:/test_hdfs_client")
"""
if not self.is_exist(fs_path):
return []
# TODO:fengdanlei
dirs, files = self._ls_dir(fs_path)
return dirs
def ls_dir(self, fs_path):
"""
List directorys and files under `fs_path` .
Args:
fs_path(str): The HDFS file path.
Returns:
Tuple: Return a 2-tuple, the first element is the list of all its subdirectories,
and the second one is the list of all its subfiles, e.g. ([subdirname1, subdirname1, ...], [filename1, filename2, ...]).
Examples:
.. code-block:: text
from paddle.distributed.fleet.utils import AFSClient
client = AFSClient()
client.init("hdfs://xxx.hadoop.com:54310", "hello", "hello123", "./fs_conf")
subdirs, files = client.ls_dir("hdfs:/test_hdfs_client")
"""
if not self.is_exist(fs_path):
return [], []
return self._ls_dir(fs_path)
def _ls_dir(self, fs_path):
files = self._fs.list(fs_path)
dirs = [fs_path]
return dirs, files
def is_dir(self, fs_path):
"""
Whether the remote HDFS path is a directory.
Args:
fs_path(str): The HDFS file path.
Returns:
Bool: Return true if the path exists and it's a directory, otherwise return false.
Examples:
.. code-block:: text
from paddle.distributed.fleet.utils import AFSClient
client = AFSClient()
client.init("hdfs://xxx.hadoop.com:54310", "hello", "hello123", "./fs_conf")
ret = client.is_file("hdfs:/test_hdfs_client")
"""
if not self.is_exist(fs_path):
return False
return self._is_dir(fs_path)
def _is_dir(self, fs_path):
list_path = self._fs.list(fs_path)
if (len(list_path)) > 0:
return True
else:
return False
def is_file(self, fs_path):
"""
Whether the remote HDFS path is a file.
Args:
fs_path(str): The HDFS file path.
Returns:
Bool: Return true if the path exists and it's a file, otherwise return false.
Examples:
.. code-block:: text
from paddle.distributed.fleet.utils import AFSClient
client = AFSClient()
client.init("hdfs://xxx.hadoop.com:54310", "hello", "hello123", "./fs_conf")
ret = client.is_file("hdfs:/test_hdfs_client")
"""
if not self.is_exist(fs_path):
return False
return not self._is_dir(fs_path)
def is_exist(self, fs_path):
"""
Whether the remote HDFS path exists.
Args:
fs_path(str): The hdfs file path.
Returns:
Bool: Whether it's is file or directory, return true if the path exists,
otherwise return false.
Examples:
.. code-block:: text
from paddle.distributed.fleet.utils import AFSClient
client = AFSClient()
client.init("hdfs://xxx.hadoop.com:54310", "hello", "hello123", "./fs_conf")
ret = client.is_exist("hdfs:/test_hdfs_client")
"""
return self._fs.exist(fs_path)
def upload_dir(self, local_dir, dest_dir, overwrite=False):
"""
upload dir to hdfs
Args:
local_dir(str): local dir
dest_dir(str): hdfs dest dir
overwrite(bool): is overwrite
Returns:
return code
"""
local_dir = local_dir.rstrip("/")
dest_dir = dest_dir.rstrip("/")
local_basename = os.path.basename(local_dir)
if self.is_exist(dest_dir + "/" + local_basename) and overwrite:
self.delete(dest_dir + "/" + local_basename)
if not self.is_exist(dest_dir):
self.mkdirs(dest_dir)
self._fs.upload(local_dir, dest_dir)
# can't retry
def upload(self, local_path, fs_path, multi_processes=1, overwrite=False):
"""
Upload the local path to remote HDFS.
Args:
local_path(str): The local path.
fs_path(str): The HDFS path.
multi_processes(int|1): the upload data process at the same time, default=5
overwrite(bool|False): will overwrite file on HDFS or not
Examples:
.. code-block:: text
from paddle.distributed.fleet.utils import AFSClient
client = AFSClient()
client.init("hdfs://xxx.hadoop.com:54310", "hello", "hello123", "./fs_conf")
client.upload("test_hdfs_client", "hdfs:/test_hdfs_client")
"""
local = LocalFS()
if not local.is_exist(local_path):
raise FSFileNotExistsError("{} not exists".format(local_path))
self._fs.upload(local_path, fs_path)
def download(self, fs_path, local_path, multi_processes=1, overwrite=False):
"""
Download remote HDFS path to the local.
Args:
fs_path(str): The HDFS path.
local_path(str): The local path.
multi_processes(int|1): the download data process at the same time, default=1
overwrite(bool): is overwrite
Examples:
.. code-block:: text
from paddle.distributed.fleet.utils import AFSClient
client = AFSClient()
client.init("hdfs://xxx.hadoop.com:54310", "hello", "hello123", "./fs_conf")
client.download("hdfs:/test_hdfs_client", "./")
"""
def __subprocess_download(local_path, datas):
"""
download file from HDFS
Args:
local_path(str): the local file path
datas(str): the hdfs file path list
"""
for data in datas:
self._fs.download(local_path, data)
if not self.is_exist(fs_path):
raise FSFileNotExistsError("{} not exits".format(fs_path))
# download file
if self.is_file(fs_path):
return self._fs.download(local_path, fs_path)
# download dir
_, all_filenames = self.ls_dir(fs_path)
all_files = [fs_path + i for i in all_filenames]
procs = []
for i in range(multi_processes):
process_datas = self._split_files(all_files, i, multi_processes)
p = multiprocessing.Process(target=__subprocess_download,
args=(local_path, process_datas))
procs.append(p)
p.start()
# complete the processes
for proc in procs:
proc.join()
def mkdirs(self, fs_path):
"""
Create a remote HDFS directory.
Args:
fs_path(str): The HDFS directory path.
Examples:
.. code-block:: text
from paddle.distributed.fleet.utils import AFSClient
client = AFSClient()
client.init("hdfs://xxx.hadoop.com:54310", "hello", "hello123", "./fs_conf")
client.mkdirs("hdfs:/test_hdfs_client")
"""
if self.is_exist(fs_path):
return
self._fs.mkdir(fs_path)
def mv(self, fs_src_path, fs_dst_path, overwrite=False, test_exists=True):
"""
Move a remote HDFS file or directory from `fs_src_path` to `fs_dst_path` .
Args:
fs_src_path(str): Name of the file or directory, that's needed to be moved.
fs_dst_path(str): Name of the file or directory to which to move to.
overwrite(bool): Whether to re-write `fs_dst_path` if that exists. Default is False.
test_exists(bool): Check the existence of `fs_src_path` and `fs_dst_path` . When `test_exists` is set true, if `fs_src_path` doesn't exist or `fs_dst_path` exists, program will throw an Excetption.
Examples:
.. code-block:: text
from paddle.distributed.fleet.utils import AFSClient
client = AFSClient()
client.init("hdfs://xxx.hadoop.com:54310", "hello", "hello123", "./fs_conf")
client.mv("hdfs:/test_hdfs_client", "hdfs:/test_hdfs_client2")
"""
if overwrite and self.is_exist(fs_dst_path):
self.delete(fs_dst_path)
if test_exists:
if not self.is_exist(fs_src_path):
raise FSFileNotExistsError(
"{} is not exists".format(fs_src_path))
if self.is_exist(fs_dst_path):
raise FSFileExistsError("{} exists already".format(fs_dst_path))
self._fs.mv(fs_src_path, fs_dst_path)
def delete(self, fs_path):
"""
Delete a remote HDFS path, whether it's a file or directory.
Args:
fs_path(str): The HDFS file path.
Examples:
.. code-block:: text
from paddle.distributed.fleet.utils import HDFSClient
from paddle.distributed.fleet.utils import AFSClient
client = AFSClient()
client.init("hdfs://xxx.hadoop.com:54310", "hello", "hello123", "./fs_conf")
client.delete("hdfs:/test_hdfs_client")
"""
if not self.is_exist(fs_path):
return
self._fs.remove(fs_path)
def touch(self, fs_path, exist_ok=True):
"""
Create a remote HDFS file.
Args:
fs_path(str): The HDFS file path.
exist_ok(bool): When `fs_path` exists, if `exist_ok` is set false,
program will throw an Exception. Default is true.
Examples:
.. code-block:: text
from paddle.distributed.fleet.utils import AFSClient
client = AFSClient()
client.init("hdfs://xxx.hadoop.com:54310", "hello", "hello123", "./fs_conf")
client.touch("hdfs:/test_hdfs_client")
"""
if self.is_exist(fs_path):
if exist_ok:
return
raise FSFileExistsError
return self._fs.touchz(fs_path)
def need_upload_download(self):
return True
def cat(self, fs_path=None):
"""
Cat a remote HDFS file.
Args:
fs_path(str): The HDFS file path.
Returns:
file content
Examples:
.. code-block:: text
from paddle.distributed.fleet.utils import AFSClient
client = AFSClient()
client.init("hdfs://xxx.hadoop.com:54310", "hello", "hello123", "./fs_conf")
client.cat("hdfs:/test_hdfs_client")
"""
if self.is_file(fs_path):
return self._fs.cat(fs_path)
else:
return ""
def _split_files(self, files, trainer_id, trainers):
"""
split file list
Args:
files(list): file list
trainer_id(int): trainer mpi rank id
trainers(int): all trainers num
Returns:
fileist(list): file list of current trainer
"""
remainder = len(files) % trainers
blocksize = len(files) // trainers
blocks = [blocksize] * trainers
for i in range(remainder):
blocks[i] += 1
trainer_files = [[]] * trainers
begin = 0
for i in range(trainers):
trainer_files[i] = files[begin:begin + blocks[i]]
begin += blocks[i]
return trainer_files[trainer_id]
| [
"[email protected]"
] | |
e15d35e8e6957d5fc37271c0440d3b5e2eeb58c7 | 6fa7f99d3d3d9b177ef01ebf9a9da4982813b7d4 | /pj5yRacNC8huzhy85_6.py | 1c295171c1a8bc2b686e83e74d269d75cd0a86f4 | [] | no_license | daniel-reich/ubiquitous-fiesta | 26e80f0082f8589e51d359ce7953117a3da7d38c | 9af2700dbe59284f5697e612491499841a6c126f | refs/heads/master | 2023-04-05T06:40:37.328213 | 2021-04-06T20:17:44 | 2021-04-06T20:17:44 | 355,318,759 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 76 | py |
def shhh(txt):
return "\""+txt.capitalize()+"\""+', whispered Edabit.'
| [
"[email protected]"
] | |
7c26ba48a445a3be021bb7879617d75244d366bf | d8c5f1d5f5b7d222834d233db26d8b03e37bdfa3 | /clients/kratos/python/test/test_submit_self_service_registration_flow_with_password_method_body.py | f66db886c346ff8c345df7d0e38ed686861526e2 | [
"Apache-2.0"
] | permissive | kolotaev/sdk | 31a9585720c3649b8830cf054fc7e404abe8e588 | 0dda1becd70be8d7b9d678321ebe780c1ba00485 | refs/heads/master | 2023-07-09T21:36:44.459798 | 2021-08-17T09:05:22 | 2021-08-17T09:05:22 | 397,220,777 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,441 | py | """
Ory Kratos API
Documentation for all public and administrative Ory Kratos APIs. Public and administrative APIs are exposed on different ports. Public APIs can face the public internet without any protection while administrative APIs should never be exposed without prior authorization. To protect the administative API port you should use something like Nginx, Ory Oathkeeper, or any other technology capable of authorizing incoming requests. # noqa: E501
The version of the OpenAPI document: v0.7.0-alpha.1
Contact: [email protected]
Generated by: https://openapi-generator.tech
"""
import sys
import unittest
import ory_kratos_client
from ory_kratos_client.model.submit_self_service_registration_flow_with_password_method_body import SubmitSelfServiceRegistrationFlowWithPasswordMethodBody
class TestSubmitSelfServiceRegistrationFlowWithPasswordMethodBody(unittest.TestCase):
"""SubmitSelfServiceRegistrationFlowWithPasswordMethodBody unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testSubmitSelfServiceRegistrationFlowWithPasswordMethodBody(self):
"""Test SubmitSelfServiceRegistrationFlowWithPasswordMethodBody"""
# FIXME: construct object with mandatory attributes with example values
# model = SubmitSelfServiceRegistrationFlowWithPasswordMethodBody() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| [
"[email protected]"
] | |
c05000978353355f219b6a9327f6d1fc60eb72d1 | dc4c96b7b986f7d1f451b6af8381fe94d787cc48 | /Nishida/WBAI_open_code/lstm/lstm.py | a9353c2af4301f1335174b43b0cf17171cecfcf9 | [
"Apache-2.0"
] | permissive | wbap/Hackathon2015 | 3aacf2addab6dd0c08a6f7c7d6776468d886a0cc | 62eb973623ac316199ebf83de30151944f38a750 | refs/heads/master | 2016-09-06T04:37:48.108544 | 2016-01-22T11:59:51 | 2016-01-22T11:59:51 | 42,838,895 | 6 | 9 | null | 2016-01-22T11:59:53 | 2015-09-21T02:15:48 | Python | UTF-8 | Python | false | false | 1,640 | py | #coding:utf-8
import numpy as np
from chainer import Variable, FunctionSet
import chainer.functions as F
class LSTM(FunctionSet):
def __init__(self,f_n_units, n_units):
super(LSTM, self).__init__(
l1_x = F.Linear(f_n_units, 4*n_units),
l1_h = F.Linear(n_units, 4*n_units),
l6 = F.Linear(n_units, f_n_units)
)
# パラメータの値を-0.08~0.08の範囲で初期化
for param in self.parameters:
param[:] = np.random.uniform(-0.08, 0.08, param.shape)
def forward_one_step(self, x_data, y_data, state, train=True,dropout_ratio=0.0):
x ,t = Variable(x_data,volatile=not train),Variable(y_data,volatile=not train)
h1_in = self.l1_x(F.dropout(x, ratio=dropout_ratio, train=train)) + self.l1_h(state['h1'])
c1, h1 = F.lstm(state['c1'], h1_in)
y = self.l6(F.dropout(h1, ratio=dropout_ratio, train=train))
state = {'c1': c1, 'h1': h1}
return state, F.mean_squared_error(y, t)
def predict(self, x_data, y_data, state):
x ,t = Variable(x_data,volatile=False),Variable(y_data,volatile=False)
h1_in = self.l1_x(x) + self.l1_h(state['h1'])
c1, h1 = F.lstm(state['c1'], h1_in)
y = self.l6(h1)
state = {'c1': c1, 'h1': h1}
return state,F.mean_squared_error(y,t)
def make_initial_state(n_units,train = True):
return {name: Variable(np.zeros((1,n_units), dtype=np.float32),
volatile=not train)
for name in ('c1', 'h1')}
#for name in ('c1', 'h1', 'c2', 'h2', 'c3', 'h3','c4','h4','c5','h5')}
| [
"[email protected]"
] | |
aeb95cdd28029f048b32223a7ae7d673d281359d | 9e56256fddab7825573af0635f26b45855e2a2ee | /prostate_split_A1.py | 3e0e26353689db9596c5ed2608c9c92c24a2f2ef | [] | no_license | liq07lzucn/Scripts-RayStation-4.7.2 | 257a8163ee96aac4260a5d2de049ba02331d1a1a | eabc6a737dc62212d07fcf8517c341ff0c9dbdf5 | refs/heads/master | 2021-08-31T09:13:25.640927 | 2017-12-20T21:51:10 | 2017-12-20T21:51:10 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,858 | py | # -*- coding: utf-8 -*-
"""
.. note::
LA VERSION LA PLUS À JOUR DE CE DOCUMENT SE TROUVE DANS DRUPAL: `Planification des prostates vmat`_
.. _`Planification des prostates vmat`: http://drupal.radonc.hmr/content/planification-des-prostates-vmat-0
Ce script à comme but de convertir un plan A1 de prostate VMAT standard en plan "split", c'est à dire
avec deux niveaux de dose dans le PTV A1. Le script prend pour acquis que le premier plan était
fait avec le script plan_prostate_A1 et que le contour WALL RECTUM existe toujours. Normalement,
le script devrait préserver les objectifs DVHs rectum tel quel, pour permettre de rapidement redémarrer
le plan en version split sans perdre les pourcentages ni les poids utilisés dans les optimisations à date.
.. warning::
IL EST FORTEMENT RECOMMANDÉ DE SAUVEGARDER VOTRE PLAN ET DE ROULER CE SCRIPT DANS UNE NOUVELLE COPIE.
.. rubric::
Étapes du script
Avant de rouler le script, il faut vérifier que les ROIs PTV A1, RECTUM, WALL RECT et RECTUM ds PTV A1 existent.
1. Remet le nombre de fractions du plan A1 à 40
2. Crée les contours PTV A1-RECTUM et WALL RECT ds PTV A1
3. Remplace l'objectif de min dose sur le PTV A1 avec un min dose sur le PTV A1-RECTUM (76 Gy à 99.5%, poids 100)
4. Ajoute un objectif min dose 73.2 Gy à RECTUM ds PTV A1 (poids 10)
5. Ajoute un objectif max dose 75 Gy à WALL RECT ds PTV A1 (poids 10)
6. Ajoute un clinical goal pour la couverture du PTV A1-RECTUM (76 Gy à 99.5%)
7. Ajoute un clinical goal pour la couverture du RECTUM ds PTV A1 (73.2 Gy à 99.5%)
.. note::
BUG CONNU: le script ne peut pas effacer le clinical goal pour la couverture du PTV A1 qui reste du plan A1 initial,
il faut l'enlever à la main.
"""
import setpath
import hmrlib.lib as lib
import prostate
with lib.RSScriptWrapper(__file__):
prostate.prostate_split_A1() | [
"[email protected]"
] | |
53fb143eff98e78c0a1780dd917372fec933c0db | 76f55428277f1a951f648b202d5c6757bf9c446a | /youtube/get_video_thumb.py | 129240511f24bbbf347a717ed73d4899f4c87727 | [] | no_license | zdenekhynek/browser_snapshots | 5003378e01d73dbb877771097ad5ddb0a9aec997 | b63937032a0620f47b1f49fdd8e87ffd3a45a2d2 | refs/heads/master | 2023-01-30T14:10:48.321789 | 2018-12-02T22:39:32 | 2018-12-02T23:19:21 | 113,761,833 | 0 | 0 | null | 2023-01-12T23:55:40 | 2017-12-10T15:39:13 | Python | UTF-8 | Python | false | false | 173 | py | from youtube.get_id_from_url import get_id_from_url
def get_video_thumb(url):
video_id = get_id_from_url(url)
return 'https://img.youtube.com/vi/%s/0.jpg' % (video_id)
| [
"[email protected]"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.