blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
616
| content_id
stringlengths 40
40
| detected_licenses
sequencelengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 777
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 149
values | src_encoding
stringclasses 26
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 3
10.2M
| extension
stringclasses 188
values | content
stringlengths 3
10.2M
| authors
sequencelengths 1
1
| author_id
stringlengths 1
132
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f0f23530fb3cbc50111cfe4a488934543ebda0da | c7d7dfa5ac23b940e852a67155364439d9069486 | /widget_image_tools/tests/test_data_get.py | e4b4e188f3230a7c9c77643e2405c1d4efe1f91c | [] | no_license | shurshilov/odoo | d163f6c939bcbfb36bdf83eeeeffca368f0a4722 | 8099e62254b7f1e113be7b522585dbc352aea5a8 | refs/heads/16.0 | 2023-09-04T03:02:31.427240 | 2023-09-03T16:25:28 | 2023-09-03T16:25:28 | 89,852,559 | 20 | 43 | null | 2023-09-03T06:30:22 | 2017-04-30T13:32:08 | JavaScript | UTF-8 | Python | false | false | 625 | py | import logging
from openerp.tests.common import HttpCase
_logger = logging.getLogger(__name__)
class TestDataGet(HttpCase):
at_install = False
post_install = True
def test_data_get(self):
test_attachment = self.env.ref("ir_attachment_url.test_url_attachment")
self.env["ir.attachment"].search_read(
[("id", "=", test_attachment.id)], ["id", "datas"]
)
def test_open_url(self):
user_demo = self.env.ref("base.user_demo")
url = "/web/image?model=res.users&id={}&field=image_medium".format(
user_demo.id
)
self.url_open(url)
| [
"[email protected]"
] | |
92f627735e41204e95c82484985badde9791b02d | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02271/s655155562.py | 70e2e74705b0aa77c4c0f55bb17c1e3a42de475a | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 198 | py | n = int(input())
SET1 = {0}
for a in map(int, input().split()):
for b in tuple(SET1):
SET1.add(a + b)
input()
for m in map(int, input().split()):
print('yes' if m in SET1 else 'no')
| [
"[email protected]"
] | |
b9168ca4256f538357f56f9175e1e1b507d98538 | 56f5b2ea36a2258b8ca21e2a3af9a5c7a9df3c6e | /CMGTools/H2TauTau/prod/25aug_corrMC/up/mc/VBF_HToTauTau_M-120_8TeV-powheg-pythia6/Summer12_DR53X-PU_S10_START53_V7A-v1/AODSIM/V5_B/PAT_CMG_V5_16_0_1377467448/HTT_24Jul_newTES_manzoni_Up_Jobs/Job_80/run_cfg.py | c87bf367bb3c41826a53f12310721f39747c57ee | [] | no_license | rmanzoni/HTT | 18e6b583f04c0a6ca10142d9da3dd4c850cddabc | a03b227073b2d4d8a2abe95367c014694588bf98 | refs/heads/master | 2016-09-06T05:55:52.602604 | 2014-02-20T16:35:34 | 2014-02-20T16:35:34 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,477 | py | import FWCore.ParameterSet.Config as cms
import os,sys
sys.path.append('/afs/cern.ch/user/m/manzoni/summer13/CMGTools/CMSSW_5_3_9/src/CMGTools/H2TauTau/prod/25aug_corrMC/up/mc/VBF_HToTauTau_M-120_8TeV-powheg-pythia6/Summer12_DR53X-PU_S10_START53_V7A-v1/AODSIM/V5_B/PAT_CMG_V5_16_0_1377467448/HTT_24Jul_newTES_manzoni_Up_Jobs')
from base_cfg import *
process.source = cms.Source("PoolSource",
noEventSort = cms.untracked.bool(True),
inputCommands = cms.untracked.vstring('keep *',
'drop cmgStructuredPFJets_cmgStructuredPFJetSel__PAT'),
duplicateCheckMode = cms.untracked.string('noDuplicateCheck'),
fileNames = cms.untracked.vstring('/store/cmst3/user/cmgtools/CMG/VBF_HToTauTau_M-120_8TeV-powheg-pythia6/Summer12_DR53X-PU_S10_START53_V7A-v1/AODSIM/V5_B/PAT_CMG_V5_16_0/cmgTuple_5.root',
'/store/cmst3/user/cmgtools/CMG/VBF_HToTauTau_M-120_8TeV-powheg-pythia6/Summer12_DR53X-PU_S10_START53_V7A-v1/AODSIM/V5_B/PAT_CMG_V5_16_0/cmgTuple_50.root',
'/store/cmst3/user/cmgtools/CMG/VBF_HToTauTau_M-120_8TeV-powheg-pythia6/Summer12_DR53X-PU_S10_START53_V7A-v1/AODSIM/V5_B/PAT_CMG_V5_16_0/cmgTuple_51.root',
'/store/cmst3/user/cmgtools/CMG/VBF_HToTauTau_M-120_8TeV-powheg-pythia6/Summer12_DR53X-PU_S10_START53_V7A-v1/AODSIM/V5_B/PAT_CMG_V5_16_0/cmgTuple_52.root',
'/store/cmst3/user/cmgtools/CMG/VBF_HToTauTau_M-120_8TeV-powheg-pythia6/Summer12_DR53X-PU_S10_START53_V7A-v1/AODSIM/V5_B/PAT_CMG_V5_16_0/cmgTuple_53.root')
)
| [
"[email protected]"
] | |
68add924debc9d40e2c9e6e8b0177bc5b786cd89 | 2adcbbbecf90e4fbb89755a8a68f86b8fe70910a | /pythinkutils/aio/jwt/tornado/handler/AuthHandler.py | cf9795dfe29d2da870a4610c81273d1f7a526deb | [] | no_license | ThinkmanWang/ThinkEventTrack | 53e3b205787c2fcefb20d24fef0f98465dcb925e | b65072a3236a183c1cc1ac835cd79f2f46fd10d7 | refs/heads/master | 2023-08-03T11:38:33.099014 | 2021-09-19T13:17:39 | 2021-09-19T13:17:39 | 406,753,292 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 485 | py | # -*- coding: utf-8 -*-
from pythinkutils.aio.jwt.tornado.handler.BaseHandler import BaseHandler
from pythinkutils.common.StringUtils import *
class AuthHandler(BaseHandler):
async def create_token(self, szAppId, szSecret):
pass
async def token_valid(self):
pass
async def get_uid_name(self):
pass
async def get_userinfo(self):
pass
async def get_token(self):
pass
async def get_permission_list(self):
pass | [
"[email protected]"
] | |
23bb6267bc9316d2c29589fa9f9f4bbc4070480d | 9084751a90f977fc722f90892e62c6596d0a26c6 | /staticpy/util/helper.py | c426b40cab25aefe97cd88114e4f10b9981420d3 | [
"BSD-3-Clause"
] | permissive | SnowWalkerJ/StaticPy | 5d3b4723cd7b78283ab95ec3021bdcf0dfe67a6c | 818b7f009af7a6040313791993f543779781dddf | refs/heads/master | 2020-07-28T17:50:21.072169 | 2020-01-14T06:21:39 | 2020-01-14T06:21:39 | 209,484,058 | 19 | 0 | null | null | null | null | UTF-8 | Python | false | false | 457 | py | class Cls:
"""
This class is to refer to the "current" class when it hasn't been completely
defined.
Example
=======
.. code-block:: python
class A:
def __init__(self, value):
self.value: int = 1
def __add__(self, other: Cls) -> Cls:
return A(self.value + othervalue)
The signature of operator `__add__` means it adds and returns an object of type `A`
"""
| [
"[email protected]"
] | |
0afdca03ef1e24ba433fba827473bc5ce22df989 | 8015f1c62a2cb4efd21aa8938336913bf8117868 | /bamap/ba2475.pngMap.py | 630606503aa002216a84acfe71c6c7c22e31f61c | [] | no_license | GamerNoTitle/Beepers-and-OLED | 675b5e3c179df0f0e27b42bf594c43860d03b9af | afe1340e5394ae96bda5f9022a8a66824368091e | refs/heads/master | 2020-04-20T00:09:47.122471 | 2019-04-29T04:59:35 | 2019-04-29T04:59:35 | 168,515,579 | 4 | 2 | null | null | null | null | UTF-8 | Python | false | false | 8,468 | py | ba2475.pngMap = [
'11111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111111111111111111111111111111111111101111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111000111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111111111111111111111111111111111111111100000010000000111111111111111111111111',
'11111111111111111111111111111111111111111111111111111111111111111111111111111111111111111000000010000000011111111111111111111111',
'11111111111111111111111111111111111111111111111111111111111111111111111111111111111111111110000000000000001111111111111111111111',
'11111111111111111111111111111111111111111111111111111111111111111111111111111111111111111110000000000000001111111111111111111111',
'11111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111100000100001111111111111111111111',
'11111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111110000000001111111111111111111111',
'11111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111000000001011111111111111111111',
'11111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111000000000001011111111111111111',
'11111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111110000000000000001111111111111111',
'11111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111100000000000000001111111111111111',
'11111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111100000000000000000111111111111111',
'11111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111100000000000000001111111111111111',
'11111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111100000000000000000011111111111111',
'11111111111111111111111111111111111111111111111111111111111111111111110011111111111111111111111111000000000000000011111111111111',
'11111111111111111111111111111111111111111111111111111111111111111110000000011110011011100001000011000000000000000011111111111111',
'11111111111111111111111111111111111111111111111111111111111111111100000000000000000000000000000000000000000000000011111111111111',
'11111111111111111111111111111111111111111111111111111111111111111111000000000000000000000000000000000000000000000011111111111111',
'11111111111111111111111111111111111111111111111111111111111111111111000000000000000000000000000000000000000000000011111111111111',
'11111111111111111111111111111111111111111111111111111111111111111111111011100000000000000000000000000000000000000000111111111111',
'11111111111111111111111111111111111111111111111111111111111111111111111111111111111100111100000001100000000000000010111111111111',
'11111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111000000000000000000111111111111',
'11111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111000000000000000000011111111111',
'11111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111110000000000000000111111111111',
'11111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111110000000000000000001111111111',
'11111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111110000000000000000001111111111',
'11111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111110000000000000000001111111111',
'11111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111010000000000000000001111111111',
'11111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111100000000000000000001111111111',
'11111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111000000000000000000011111111111',
'11111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111000000000000000000000111111111',
'11111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111100000000000000000000001111111111',
'11111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111110000000000000000000000111111111',
'11111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111100000000000000000000000111111111',
'11111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111100000000000000000000000111111111',
'11111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111110100000000000000000000000111111111',
'11111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111110000000000000000000000000111111111',
'11111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111110000000000000000000000001111111111',
'11111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111110000000000000000000000001111111111',
'11111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111100000000000000000000001111111111',
'11111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111100000000000000000000001111111111',
'11111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111100000000010000000000111111111111',
'11111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111000000000010000000011111111111111',
'11111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111000000000010000000011111111111111',
'11111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111110000000000110000000011111111111111',
'11111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111100000000111110000000011111111111111',
'11111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111100000000011111000000011111111111111',
'11111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111110000001111111000000011111111111111',
'11111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111100000001111111000000011111111111111',
'11111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111000000011111111000001111111111111111',
'11111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111000000111111111000001111111111111111',
]
| [
"[email protected]"
] | |
f4c2668459c92a992bfea23f219d566210944f98 | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/verbs/_imported.py | 878ac0a3c2bf379c3bee6feb4d0d7b3a98dbeee7 | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 240 | py |
from xai.brain.wordbase.verbs._import import _IMPORT
#calss header
class _IMPORTED(_IMPORT, ):
def __init__(self,):
_IMPORT.__init__(self)
self.name = "IMPORTED"
self.specie = 'verbs'
self.basic = "import"
self.jsondata = {}
| [
"[email protected]"
] | |
525b6b2e8ce96895bb616067710c9ec0bf91b842 | a7528729c9b9de9736e550d4891ba6ae1289276a | /venv/lib/python3.8/site-packages/wget.py | c0fbb8342278909a8a64b233666d0ec9be9bdf3f | [
"MIT"
] | permissive | oguzhan2142/Youtube-Spotify-Music-Downloader | 69b6c61030bd1a5b099f58efb9bc848ec801fb29 | 3dfa51aa5e5549f3f82b87444bd27aa684a93061 | refs/heads/master | 2021-01-14T22:46:16.680870 | 2020-11-06T11:51:41 | 2020-11-06T11:51:41 | 242,784,685 | 1 | 0 | MIT | 2020-11-06T11:51:42 | 2020-02-24T16:23:47 | Python | UTF-8 | Python | false | false | 22,355 | py | #!/usr/bin/env python
"""
Download utility as an easy way to get file from the net
python -m wget <URL>
python wget.py <URL>
Downloads: http://pypi.python.org/pypi/wget/
Development: http://bitbucket.org/techtonik/python-wget/
wget.py is not option compatible with Unix wget utility,
to make command line interface intuitive for new people.
Public domain by anatoly techtonik <[email protected]>
Also available under the terms of MIT license
Copyright (c) 2010-2015 anatoly techtonik
"""
__version__ = "3.2"
import sys, shutil, os
import tempfile
import math
PY3K = sys.version_info >= (3, 0)
if PY3K:
import urllib.request as ulib
import urllib.parse as urlparse
else:
import urllib as ulib
import urlparse
# --- workarounds for Python misbehavior ---
# enable passing unicode arguments from command line in Python 2.x
# https://stackoverflow.com/questions/846850/read-unicode-characters
def win32_utf8_argv():
"""Uses shell32.GetCommandLineArgvW to get sys.argv as a list of Unicode
strings.
Versions 2.x of Python don't support Unicode in sys.argv on
Windows, with the underlying Windows API instead replacing multi-byte
characters with '?'.
"""
from ctypes import POINTER, byref, cdll, c_int, windll
from ctypes.wintypes import LPCWSTR, LPWSTR
GetCommandLineW = cdll.kernel32.GetCommandLineW
GetCommandLineW.argtypes = []
GetCommandLineW.restype = LPCWSTR
CommandLineToArgvW = windll.shell32.CommandLineToArgvW
CommandLineToArgvW.argtypes = [LPCWSTR, POINTER(c_int)]
CommandLineToArgvW.restype = POINTER(LPWSTR)
cmd = GetCommandLineW()
argc = c_int(0)
argv = CommandLineToArgvW(cmd, byref(argc))
argnum = argc.value
sysnum = len(sys.argv)
result = []
if argnum > 0:
# Remove Python executable and commands if present
start = argnum - sysnum
for i in range(start, argnum):
result.append(argv[i].encode('utf-8'))
return result
# enable unicode output to windows console
# https://stackoverflow.com/questions/878972/windows-cmd-encoding-change-causes-python-crash
def win32_unicode_console():
import codecs
from ctypes import WINFUNCTYPE, windll, POINTER, byref, c_int
from ctypes.wintypes import BOOL, HANDLE, DWORD, LPWSTR, LPCWSTR, LPVOID
original_stderr = sys.stderr
# Output exceptions in this code to original_stderr, so that we can at least see them
def _complain(message):
original_stderr.write(message if isinstance(message, str) else repr(message))
original_stderr.write('\n')
codecs.register(lambda name: codecs.lookup('utf-8') if name == 'cp65001' else None)
try:
GetStdHandle = WINFUNCTYPE(HANDLE, DWORD)(("GetStdHandle", windll.kernel32))
STD_OUTPUT_HANDLE = DWORD(-11)
STD_ERROR_HANDLE = DWORD(-12)
GetFileType = WINFUNCTYPE(DWORD, DWORD)(("GetFileType", windll.kernel32))
FILE_TYPE_CHAR = 0x0002
FILE_TYPE_REMOTE = 0x8000
GetConsoleMode = WINFUNCTYPE(BOOL, HANDLE, POINTER(DWORD))(("GetConsoleMode", windll.kernel32))
INVALID_HANDLE_VALUE = DWORD(-1).value
def not_a_console(handle):
if handle == INVALID_HANDLE_VALUE or handle is None:
return True
return ((GetFileType(handle) & ~FILE_TYPE_REMOTE) != FILE_TYPE_CHAR
or GetConsoleMode(handle, byref(DWORD())) == 0)
old_stdout_fileno = None
old_stderr_fileno = None
if hasattr(sys.stdout, 'fileno'):
old_stdout_fileno = sys.stdout.fileno()
if hasattr(sys.stderr, 'fileno'):
old_stderr_fileno = sys.stderr.fileno()
STDOUT_FILENO = 1
STDERR_FILENO = 2
real_stdout = (old_stdout_fileno == STDOUT_FILENO)
real_stderr = (old_stderr_fileno == STDERR_FILENO)
if real_stdout:
hStdout = GetStdHandle(STD_OUTPUT_HANDLE)
if not_a_console(hStdout):
real_stdout = False
if real_stderr:
hStderr = GetStdHandle(STD_ERROR_HANDLE)
if not_a_console(hStderr):
real_stderr = False
if real_stdout or real_stderr:
WriteConsoleW = WINFUNCTYPE(BOOL, HANDLE, LPWSTR, DWORD, POINTER(DWORD), LPVOID)(("WriteConsoleW", windll.kernel32))
class UnicodeOutput:
def __init__(self, hConsole, stream, fileno, name):
self._hConsole = hConsole
self._stream = stream
self._fileno = fileno
self.closed = False
self.softspace = False
self.mode = 'w'
self.encoding = 'utf-8'
self.name = name
self.flush()
def isatty(self):
return False
def close(self):
# don't really close the handle, that would only cause problems
self.closed = True
def fileno(self):
return self._fileno
def flush(self):
if self._hConsole is None:
try:
self._stream.flush()
except Exception as e:
_complain("%s.flush: %r from %r" % (self.name, e, self._stream))
raise
def write(self, text):
try:
if self._hConsole is None:
if not PY3K and isinstance(text, unicode):
text = text.encode('utf-8')
elif PY3K and isinstance(text, str):
text = text.encode('utf-8')
self._stream.write(text)
else:
if not PY3K and not isinstance(text, unicode):
text = str(text).decode('utf-8')
elif PY3K and not isinstance(text, str):
text = text.decode('utf-8')
remaining = len(text)
while remaining:
n = DWORD(0)
# There is a shorter-than-documented limitation on the
# length of the string passed to WriteConsoleW (see
# <http://tahoe-lafs.org/trac/tahoe-lafs/ticket/1232>.
retval = WriteConsoleW(self._hConsole, text, min(remaining, 10000), byref(n), None)
if retval == 0 or n.value == 0:
raise IOError("WriteConsoleW returned %r, n.value = %r" % (retval, n.value))
remaining -= n.value
if not remaining:
break
text = text[n.value:]
except Exception as e:
_complain("%s.write: %r" % (self.name, e))
raise
def writelines(self, lines):
try:
for line in lines:
self.write(line)
except Exception as e:
_complain("%s.writelines: %r" % (self.name, e))
raise
if real_stdout:
sys.stdout = UnicodeOutput(hStdout, None, STDOUT_FILENO, '<Unicode console stdout>')
else:
sys.stdout = UnicodeOutput(None, sys.stdout, old_stdout_fileno, '<Unicode redirected stdout>')
if real_stderr:
sys.stderr = UnicodeOutput(hStderr, None, STDERR_FILENO, '<Unicode console stderr>')
else:
sys.stderr = UnicodeOutput(None, sys.stderr, old_stderr_fileno, '<Unicode redirected stderr>')
except Exception as e:
_complain("exception %r while fixing up sys.stdout and sys.stderr" % (e,))
# --- helpers ---
def to_unicode(filename):
""":return: filename decoded from utf-8 to unicode"""
#
if PY3K:
# [ ] test this on Python 3 + (Windows, Linux)
# [ ] port filename_from_headers once this works
# [ ] add test to repository / Travis
return filename
else:
if isinstance(filename, unicode):
return filename
else:
return unicode(filename, 'utf-8')
def filename_from_url(url):
""":return: detected filename as unicode or None"""
# [ ] test urlparse behavior with unicode url
fname = os.path.basename(urlparse.urlparse(url).path)
if len(fname.strip(" \n\t.")) == 0:
return None
return to_unicode(fname)
def filename_from_headers(headers):
"""Detect filename from Content-Disposition headers if present.
http://greenbytes.de/tech/tc2231/
:param: headers as dict, list or string
:return: filename from content-disposition header or None
"""
if type(headers) == str:
headers = headers.splitlines()
if type(headers) == list:
headers = dict([x.split(':', 1) for x in headers])
cdisp = headers.get("Content-Disposition")
if not cdisp:
return None
cdtype = cdisp.split(';')
if len(cdtype) == 1:
return None
if cdtype[0].strip().lower() not in ('inline', 'attachment'):
return None
# several filename params is illegal, but just in case
fnames = [x for x in cdtype[1:] if x.strip().startswith('filename=')]
if len(fnames) > 1:
return None
name = fnames[0].split('=')[1].strip(' \t"')
name = os.path.basename(name)
if not name:
return None
return name
def filename_fix_existing(filename):
"""Expands name portion of filename with numeric ' (x)' suffix to
return filename that doesn't exist already.
"""
dirname = u'.'
name, ext = filename.rsplit('.', 1)
names = [x for x in os.listdir(dirname) if x.startswith(name)]
names = [x.rsplit('.', 1)[0] for x in names]
suffixes = [x.replace(name, '') for x in names]
# filter suffixes that match ' (x)' pattern
suffixes = [x[2:-1] for x in suffixes
if x.startswith(' (') and x.endswith(')')]
indexes = [int(x) for x in suffixes
if set(x) <= set('0123456789')]
idx = 1
if indexes:
idx += sorted(indexes)[-1]
return '%s (%d).%s' % (name, idx, ext)
# --- terminal/console output helpers ---
def get_console_width():
"""Return width of available window area. Autodetection works for
Windows and POSIX platforms. Returns 80 for others
Code from http://bitbucket.org/techtonik/python-pager
"""
if os.name == 'nt':
STD_INPUT_HANDLE = -10
STD_OUTPUT_HANDLE = -11
STD_ERROR_HANDLE = -12
# get console handle
from ctypes import windll, Structure, byref
try:
from ctypes.wintypes import SHORT, WORD, DWORD
except ImportError:
# workaround for missing types in Python 2.5
from ctypes import (
c_short as SHORT, c_ushort as WORD, c_ulong as DWORD)
console_handle = windll.kernel32.GetStdHandle(STD_OUTPUT_HANDLE)
# CONSOLE_SCREEN_BUFFER_INFO Structure
class COORD(Structure):
_fields_ = [("X", SHORT), ("Y", SHORT)]
class SMALL_RECT(Structure):
_fields_ = [("Left", SHORT), ("Top", SHORT),
("Right", SHORT), ("Bottom", SHORT)]
class CONSOLE_SCREEN_BUFFER_INFO(Structure):
_fields_ = [("dwSize", COORD),
("dwCursorPosition", COORD),
("wAttributes", WORD),
("srWindow", SMALL_RECT),
("dwMaximumWindowSize", DWORD)]
sbi = CONSOLE_SCREEN_BUFFER_INFO()
ret = windll.kernel32.GetConsoleScreenBufferInfo(
console_handle, byref(sbi))
if ret == 0:
return 0
return sbi.srWindow.Right+1
elif os.name == 'posix':
from fcntl import ioctl
from termios import TIOCGWINSZ
from array import array
winsize = array("H", [0] * 4)
try:
ioctl(sys.stdout.fileno(), TIOCGWINSZ, winsize)
except IOError:
pass
return (winsize[1], winsize[0])[0]
return 80
def bar_thermometer(current, total, width=80):
"""Return thermometer style progress bar string. `total` argument
can not be zero. The minimum size of bar returned is 3. Example:
[.......... ]
Control and trailing symbols (\r and spaces) are not included.
See `bar_adaptive` for more information.
"""
# number of dots on thermometer scale
avail_dots = width-2
shaded_dots = int(math.floor(float(current) / total * avail_dots))
return '[' + '.'*shaded_dots + ' '*(avail_dots-shaded_dots) + ']'
def bar_adaptive(current, total, width=80):
"""Return progress bar string for given values in one of three
styles depending on available width:
[.. ] downloaded / total
downloaded / total
[.. ]
if total value is unknown or <= 0, show bytes counter using two
adaptive styles:
%s / unknown
%s
if there is not enough space on the screen, do not display anything
returned string doesn't include control characters like \r used to
place cursor at the beginning of the line to erase previous content.
this function leaves one free character at the end of string to
avoid automatic linefeed on Windows.
"""
# process special case when total size is unknown and return immediately
if not total or total < 0:
msg = "%s / unknown" % current
if len(msg) < width: # leaves one character to avoid linefeed
return msg
if len("%s" % current) < width:
return "%s" % current
# --- adaptive layout algorithm ---
#
# [x] describe the format of the progress bar
# [x] describe min width for each data field
# [x] set priorities for each element
# [x] select elements to be shown
# [x] choose top priority element min_width < avail_width
# [x] lessen avail_width by value if min_width
# [x] exclude element from priority list and repeat
# 10% [.. ] 10/100
# pppp bbbbb sssssss
min_width = {
'percent': 4, # 100%
'bar': 3, # [.]
'size': len("%s" % total)*2 + 3, # 'xxxx / yyyy'
}
priority = ['percent', 'bar', 'size']
# select elements to show
selected = []
avail = width
for field in priority:
if min_width[field] < avail:
selected.append(field)
avail -= min_width[field]+1 # +1 is for separator or for reserved space at
# the end of line to avoid linefeed on Windows
# render
output = ''
for field in selected:
if field == 'percent':
# fixed size width for percentage
output += ('%s%%' % (100 * current // total)).rjust(min_width['percent'])
elif field == 'bar': # [. ]
# bar takes its min width + all available space
output += bar_thermometer(current, total, min_width['bar']+avail)
elif field == 'size':
# size field has a constant width (min == max)
output += ("%s / %s" % (current, total)).rjust(min_width['size'])
selected = selected[1:]
if selected:
output += ' ' # add field separator
return output
# --/ console helpers
__current_size = 0 # global state variable, which exists solely as a
# workaround against Python 3.3.0 regression
# http://bugs.python.org/issue16409
# fixed in Python 3.3.1
def callback_progress(blocks, block_size, total_size, bar_function):
"""callback function for urlretrieve that is called when connection is
created and when once for each block
draws adaptive progress bar in terminal/console
use sys.stdout.write() instead of "print,", because it allows one more
symbol at the line end without linefeed on Windows
:param blocks: number of blocks transferred so far
:param block_size: in bytes
:param total_size: in bytes, can be -1 if server doesn't return it
:param bar_function: another callback function to visualize progress
"""
global __current_size
width = min(100, get_console_width())
if sys.version_info[:3] == (3, 3, 0): # regression workaround
if blocks == 0: # first call
__current_size = 0
else:
__current_size += block_size
current_size = __current_size
else:
current_size = min(blocks*block_size, total_size)
progress = bar_function(current_size, total_size, width)
if progress:
sys.stdout.write("\r" + progress)
def detect_filename(url=None, out=None, headers=None, default="download.wget"):
"""Return filename for saving file. If no filename is detected from output
argument, url or headers, return default (download.wget)
"""
names = dict(out='', url='', headers='')
if out:
names["out"] = out or ''
if url:
names["url"] = filename_from_url(url) or ''
if headers:
names["headers"] = filename_from_headers(headers) or ''
return names["out"] or names["headers"] or names["url"] or default
def download(url, out=None, bar=bar_adaptive):
"""High level function, which downloads URL into tmp file in current
directory and then renames it to filename autodetected from either URL
or HTTP headers.
:param bar: function to track download progress (visualize etc.)
:param out: output filename or directory
:return: filename where URL is downloaded to
"""
# detect of out is a directory
outdir = None
if out and os.path.isdir(out):
outdir = out
out = None
# get filename for temp file in current directory
prefix = detect_filename(url, out)
(fd, tmpfile) = tempfile.mkstemp(".tmp", prefix=prefix, dir=".")
os.close(fd)
os.unlink(tmpfile)
# set progress monitoring callback
def callback_charged(blocks, block_size, total_size):
# 'closure' to set bar drawing function in callback
callback_progress(blocks, block_size, total_size, bar_function=bar)
if bar:
callback = callback_charged
else:
callback = None
if PY3K:
# Python 3 can not quote URL as needed
binurl = list(urlparse.urlsplit(url))
binurl[2] = urlparse.quote(binurl[2])
binurl = urlparse.urlunsplit(binurl)
else:
binurl = url
(tmpfile, headers) = ulib.urlretrieve(binurl, tmpfile, callback)
filename = detect_filename(url, out, headers)
if outdir:
filename = outdir + "/" + filename
# add numeric ' (x)' suffix if filename already exists
if os.path.exists(filename):
filename = filename_fix_existing(filename)
shutil.move(tmpfile, filename)
#print headers
return filename
usage = """\
usage: wget.py [options] URL
options:
-o --output FILE|DIR output filename or directory
-h --help
--version
"""
if __name__ == "__main__":
if len(sys.argv) < 2 or "-h" in sys.argv or "--help" in sys.argv:
sys.exit(usage)
if "--version" in sys.argv:
sys.exit("wget.py " + __version__)
# patch Python 2.x to read unicode from command line
if not PY3K and sys.platform == "win32":
sys.argv = win32_utf8_argv()
# patch Python to write unicode characters to console
if sys.platform == "win32":
win32_unicode_console()
from optparse import OptionParser
parser = OptionParser()
parser.add_option("-o", "--output", dest="output")
(options, args) = parser.parse_args()
url = sys.argv[1]
filename = download(args[0], out=options.output)
print("")
print("Saved under %s" % filename)
r"""
features that require more tuits for urlretrieve API
http://www.python.org/doc/2.6/library/urllib.html#urllib.urlretrieve
[x] autodetect filename from URL
[x] autodetect filename from headers - Content-Disposition
http://greenbytes.de/tech/tc2231/
[ ] make HEAD request to detect temp filename from Content-Disposition
[ ] process HTTP status codes (i.e. 404 error)
http://ftp.de.debian.org/debian/pool/iso-codes_3.24.2.orig.tar.bz2
[ ] catch KeyboardInterrupt
[ ] optionally preserve incomplete file
[x] create temp file in current directory
[ ] resume download (broken connection)
[ ] resume download (incomplete file)
[x] show progress indicator
http://mail.python.org/pipermail/tutor/2005-May/038797.html
[x] do not overwrite downloaded file
[x] rename file automatically if exists
[x] optionally specify path for downloaded file
[ ] options plan
[x] -h, --help, --version (CHAOS speccy)
[ ] clpbar progress bar style
_ 30.0Mb at 3.0 Mbps eta: 0:00:20 30% [===== ]
[ ] test "bar \r" print with \r at the end of line on Windows
[ ] process Python 2.x urllib.ContentTooShortError exception gracefully
(ideally retry and continue download)
(tmpfile, headers) = urllib.urlretrieve(url, tmpfile, callback_progress)
File "C:\Python27\lib\urllib.py", line 93, in urlretrieve
return _urlopener.retrieve(url, filename, reporthook, data)
File "C:\Python27\lib\urllib.py", line 283, in retrieve
"of %i bytes" % (read, size), result)
urllib.ContentTooShortError: retrieval incomplete: got only 15239952 out of 24807571 bytes
[ ] find out if urlretrieve may return unicode headers
[ ] write files with unicode characters
https://bitbucket.org/techtonik/python-wget/issues/7/filename-issue
[x] Python 2, Windows
[x] Python 3, Windows
[ ] Linux
[ ] add automatic tests
[ ] specify unicode URL from command line
[ ] specify unicode output file from command line
[ ] test suite for unsafe filenames from url and from headers
[ ] security checks
[ ] filename_from_url
[ ] filename_from_headers
[ ] MITM redirect from https URL
[ ] https certificate check
[ ] size+hash check helpers
[ ] fail if size is known and mismatch
[ ] fail if hash mismatch
"""
| [
"[email protected]"
] | |
c3e66a8e8814e8e5285b5c62076236e5e92a2c5c | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/otherforms/_barmaids.py | ed81aeb5d23f31b1164a1e5b2abbd9d585783d17 | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 226 | py |
#calss header
class _BARMAIDS():
def __init__(self,):
self.name = "BARMAIDS"
self.definitions = barmaid
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.basic = ['barmaid']
| [
"[email protected]"
] | |
41362c6f99bb8f283f580c623b5714413bdd9cef | b201a0a88022b4c567f9c74346d60ab17f46ef64 | /supervised_learning/0x08-deep_cnns/0-inception_block.py | c25e5a099742a0fae8f1a181e73471d332ce9741 | [] | no_license | Diegokernel/holbertonschool-machine_learning | 929d9b8ac0fcdecbf28b76c09799f86c4b48d38e | a51fbcb76dae9281ff34ace0fb762ef899b4c380 | refs/heads/master | 2020-12-21T18:34:03.579632 | 2020-10-15T20:51:32 | 2020-10-15T20:51:32 | 236,523,731 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,189 | py | #!/usr/bin/env python3
"""inception"""
import tensorflow.keras as K
def inception_block(A_prev, filters):
"""Builds an inception block as described in Going Deeper with
Convolutions (2014).
The concatenated output of the inception block
"""
convly_1 = K.layers.Conv2D(filters=filters[0],
kernel_size=1,
padding='same',
kernel_initializer='he_normal',
activation='relu')(A_prev)
convly_2P = K.layers.Conv2D(filters=filters[1],
kernel_size=1,
padding='same',
kernel_initializer='he_normal',
activation='relu')(A_prev)
layer_pool = K.layers.MaxPooling2D(pool_size=(3, 3),
padding='same',
strides=(1, 1))(A_prev)
convly_3 = K.layers.Conv2D(filters=filters[2],
kernel_size=3,
padding='same',
kernel_initializer='he_normal',
activation='relu')(convly_2P)
convly_3P = K.layers.Conv2D(filters=filters[3],
kernel_size=1,
padding='same',
kernel_initializer='he_normal',
activation='relu')(A_prev)
convly_3s = K.layers.Conv2D(filters=filters[4],
kernel_size=5,
padding='same',
kernel_initializer='he_normal',
activation='relu')(convly_3P)
OFPP = K.layers.Conv2D(filters=filters[5],
kernel_size=1,
padding='same',
kernel_initializer='he_normal',
activation='relu')(layer_pool)
mid_layer = K.layers.Concatenate(axis=3)([convly_1,
convly_3, convly_3s, OFPP])
return mid_layer
| [
"[email protected]"
] | |
7efd7bacbcbce83194fa14d887cdaec9746271a3 | 714e36b745a5b2b5fc4e9b267b3fa214a9fa3d9a | /scripts/matplotlib/32plot_multi_ax.py | 2d24a31b7ad2f8682a42094864e96bfd9abaa085 | [] | no_license | j3ffyang/ai | e89b4618c96e2085f37047c88d95f89d0a5409c9 | 5da753d2a1c9793564a32ac80911c1d2e35e8605 | refs/heads/master | 2022-12-10T21:12:48.432682 | 2020-08-12T07:56:11 | 2020-08-12T07:56:11 | 141,972,057 | 2 | 1 | null | 2022-11-22T02:55:29 | 2018-07-23T06:37:15 | Python | UTF-8 | Python | false | false | 251 | py | import matplotlib.pyplot as plt
fig= plt.figure(figsize=(10, 5))
ax1= fig.add_subplot(121) # 1= axes lie horizontally, 2= 2 cols
ax2= fig.add_subplot(122)
ax1.bar([1,2,3], [3,4,5])
ax2.barh([0.5, 1, 2.5], [0, 1, 2]) # horizontal bar
plt.show()
| [
"[email protected]"
] | |
f1e2664fa2d0bd72aa21b35789a9c70c94b02c4b | 29bd55d171733586f24f42151d44f4312b6a610e | /keras/keras09_R2_test_answer.py | 290e451c6c941e5b4cc6a1795f1120eefece9faa | [] | no_license | votus777/AI_study | 66ab1da2b8e760d0c52b0ed2b2f74158e14f435b | f4e38d95690c8ee84d87c02dc20a1ea59c495f04 | refs/heads/master | 2022-12-04T15:52:14.855624 | 2020-08-20T06:12:52 | 2020-08-20T06:12:52 | 262,975,960 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,406 | py | '''
* 정답 *
Overfitting이 나게 해라!
epoch를 컴터지기 직전까지 돌려라
단 너무 많이 돌리면 - 값이 나온다.
'''
# 1. 데이터
import numpy as np
x_train = np.array([1,2,3,4,5,6,7,8,9,10])
y_train = np.array([1,2,3,4,5,6,7,8,9,10])
x_test = np.array([11,12,13,14,15])
y_test = np.array([11,12,13,14,15])
x_pred = np.array([16, 17, 18])
# 2. 모델 구성
from keras.models import Sequential
from keras.layers import Dense
model = Sequential()
model.add(Dense(5, input_dim = 1))
model.add(Dense(5))
model.add(Dense(5))
model.add(Dense(5))
model.add(Dense(1))
# 3. 훈련
model.compile(loss='mse', optimizer='adam', metrics=['mse'])
model.fit(x_train, y_train, epochs=30000, batch_size = 1)
# 4. 평가, 예측
loss,mse = model.evaluate(x_test, y_test, batch_size = 1)
print("loss : ", loss)
print("mse : ", mse)
'''
y_pred = model.predict(x_pred)
print("y_pred : ", y_pred)
'''
y_predict = model.predict(x_test)
print(y_predict)
#________RMSE 구하기___________________
from sklearn.metrics import mean_squared_error
def RMSE(y_test ,y_pred) :
return np.sqrt(mean_squared_error(y_test, y_predict))
# y_test = 실제값, y_pred = 예측값
print("RMSE : ", RMSE(y_test, y_predict))
#________R2 구하기_____________________
from sklearn.metrics import r2_score
r2 = r2_score(y_test, y_predict)
print("R2 score : ", r2)
| [
"[email protected]"
] | |
637294feb6424a6229c798af7673ec45462eb36b | 6f1034b17b49f373a41ecf3a5a8923fb4948992b | /pychron/entry/providers/geodeepdive.py | 5b215e86d942b7a12705bd99e2a5055fe0130c7a | [
"Apache-2.0"
] | permissive | NMGRL/pychron | a6ec1854488e74eb5d3ff53eee8537ecf98a6e2f | 8cfc8085393ace2aee6b98d36bfd6fba0bcb41c6 | refs/heads/main | 2023-08-30T07:00:34.121528 | 2023-06-12T17:43:25 | 2023-06-12T17:43:25 | 14,438,041 | 38 | 28 | Apache-2.0 | 2023-08-09T22:47:17 | 2013-11-15T23:46:10 | Python | UTF-8 | Python | false | false | 1,128 | py | # ===============================================================================
# Copyright 2019 ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
import requests
API_URL = "https://geodeepdive.org/api"
def get_snippet(term):
s = requests.Session()
url = "{}/snippets?term={}".format(API_URL, term)
r = s.get(url)
obj = r.json()
return obj["success"]["data"]
if __name__ == "__main__":
g = get_snippet("Fish Canyon")
for o in g:
print(o)
# ============= EOF =============================================
| [
"[email protected]"
] | |
5bbd918ca6dd43ecbc614c38cc3504aae453f7ff | 3d4247362747e3763b72cd97ba39164387cfc07b | /tests/conftest.py | f2e6030f9a4269a460b002190ed04975970e3692 | [
"MIT"
] | permissive | gitter-badger/bocadillo | d5f104ff47ef5b9ee61163b7a4f3eb21d3e7da8b | ec1122ec6d62e8c90060b3ab3eb8cf7fb7deb433 | refs/heads/master | 2020-04-12T23:34:39.904514 | 2018-12-22T12:17:13 | 2018-12-22T12:17:13 | 162,823,961 | 0 | 0 | MIT | 2018-12-22T15:56:06 | 2018-12-22T15:56:06 | null | UTF-8 | Python | false | false | 1,147 | py | from typing import NamedTuple
import pytest
from click.testing import CliRunner
from bocadillo import API
from .utils import RouteBuilder
@pytest.fixture
def api():
return API()
@pytest.fixture
def builder(api: API):
return RouteBuilder(api)
class TemplateWrapper(NamedTuple):
name: str
context: dict
rendered: str
source_directory: str
def _create_template(api, tmpdir_factory, dirname):
templates_dir = tmpdir_factory.mktemp(dirname)
template_file = templates_dir.join("hello.html")
template_file.write("<h1>Hello, {{ name }}!</h1>")
api.templates_dir = str(templates_dir)
return TemplateWrapper(
name="hello.html",
context={"name": "Bocadillo"},
rendered="<h1>Hello, Bocadillo!</h1>",
source_directory=dirname,
)
@pytest.fixture
def template_file(api: API, tmpdir_factory):
return _create_template(api, tmpdir_factory, dirname="templates")
@pytest.fixture
def template_file_elsewhere(api: API, tmpdir_factory):
return _create_template(api, tmpdir_factory, dirname="templates_elsewhere")
@pytest.fixture
def runner():
return CliRunner()
| [
"[email protected]"
] | |
81e4750f6e1eafec47a415e18716602934030d5a | ef6229d281edecbea3faad37830cb1d452d03e5b | /ucsmsdk/mometa/ape/ApeControllerChassis.py | 92e616892a55b50b06f2dfb1df372998f463b50d | [
"Apache-2.0"
] | permissive | anoop1984/python_sdk | 0809be78de32350acc40701d6207631322851010 | c4a226bad5e10ad233eda62bc8f6d66a5a82b651 | refs/heads/master | 2020-12-31T00:18:57.415950 | 2016-04-26T17:39:38 | 2016-04-26T17:39:38 | 57,148,449 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,276 | py | """This module contains the general information for ApeControllerChassis ManagedObject."""
import sys, os
from ...ucsmo import ManagedObject
from ...ucscoremeta import UcsVersion, MoPropertyMeta, MoMeta
from ...ucsmeta import VersionMeta
class ApeControllerChassisConsts():
pass
class ApeControllerChassis(ManagedObject):
"""This is ApeControllerChassis class."""
consts = ApeControllerChassisConsts()
naming_props = set([u'index'])
mo_meta = MoMeta("ApeControllerChassis", "apeControllerChassis", "Chassis-[index]", VersionMeta.Version101e, "InputOutput", 0x3f, [], ["read-only"], [u'apeControllerManager'], [u'apeControllerEeprom'], [None])
prop_meta = {
"child_action": MoPropertyMeta("child_action", "childAction", "string", VersionMeta.Version101e, MoPropertyMeta.INTERNAL, 0x2, None, None, r"""((deleteAll|ignore|deleteNonPresent),){0,2}(deleteAll|ignore|deleteNonPresent){0,1}""", [], []),
"dn": MoPropertyMeta("dn", "dn", "string", VersionMeta.Version101e, MoPropertyMeta.READ_ONLY, 0x4, 0, 256, None, [], []),
"index": MoPropertyMeta("index", "index", "uint", VersionMeta.Version101e, MoPropertyMeta.NAMING, 0x8, None, None, None, [], []),
"rn": MoPropertyMeta("rn", "rn", "string", VersionMeta.Version101e, MoPropertyMeta.READ_ONLY, 0x10, 0, 256, None, [], []),
"sacl": MoPropertyMeta("sacl", "sacl", "string", VersionMeta.Version302a, MoPropertyMeta.READ_ONLY, None, None, None, r"""((none|del|mod|addchild|cascade),){0,4}(none|del|mod|addchild|cascade){0,1}""", [], []),
"status": MoPropertyMeta("status", "status", "string", VersionMeta.Version101e, MoPropertyMeta.READ_WRITE, 0x20, None, None, r"""((removed|created|modified|deleted),){0,3}(removed|created|modified|deleted){0,1}""", [], []),
}
prop_map = {
"childAction": "child_action",
"dn": "dn",
"index": "index",
"rn": "rn",
"sacl": "sacl",
"status": "status",
}
def __init__(self, parent_mo_or_dn, index, **kwargs):
self._dirty_mask = 0
self.index = index
self.child_action = None
self.sacl = None
self.status = None
ManagedObject.__init__(self, "ApeControllerChassis", parent_mo_or_dn, **kwargs)
| [
"[email protected]"
] | |
8f304b7f865adf1b910f47d4f554e7f22e87c2c7 | b6406cd1e6d951934e762ec2ac925cddf4716ae9 | /verification_feed/app.py | f72a52e96e4339287768e573bf79e527800013db | [
"MIT"
] | permissive | agiamas/activity-stream | e5da92f00c26a8d7d99c4b2c5d4469e3500315cc | 2b6a23de082950736e71380932b89b0a0e984b89 | refs/heads/master | 2020-03-28T17:14:02.023352 | 2018-09-05T17:32:51 | 2018-09-05T17:32:51 | 148,770,350 | 0 | 0 | null | 2018-09-14T09:57:53 | 2018-09-14T09:57:53 | null | UTF-8 | Python | false | false | 3,912 | py | import asyncio
from datetime import (
datetime,
timedelta,
timezone,
)
import logging
import os
import sys
from aiohttp import web
LOGGER_NAME = 'activity-stream-verification-feed'
async def run_application():
app_logger = logging.getLogger(LOGGER_NAME)
app_logger.debug('Examining environment...')
port = os.environ['PORT']
app_logger.debug('Examining environment: done')
await create_incoming_application(port)
async def create_incoming_application(port):
app_logger = logging.getLogger(LOGGER_NAME)
async def handle(request):
timestamp = int(request.match_info['timestamp'])
def get_next_page_href(next_timestamp):
return str(request.url.with_scheme(request.headers.get(
'X-Forwarded-Proto', 'http')).with_path(f'/{next_timestamp}'))
return web.json_response(get_page(timestamp, get_next_page_href))
app_logger.debug('Creating listening web application...')
app = web.Application()
app.add_routes([web.get(r'/{timestamp:\d+}', handle)])
access_log_format = '%a %t "%r" %s %b "%{Referer}i" "%{User-Agent}i" %{X-Forwarded-For}i'
runner = web.AppRunner(app, access_log_format=access_log_format)
await runner.setup()
site = web.TCPSite(runner, '0.0.0.0', port)
await site.start()
app_logger.debug('Creating listening web application: done')
def setup_logging():
stdout_handler = logging.StreamHandler(sys.stdout)
aiohttp_log = logging.getLogger('aiohttp.access')
aiohttp_log.setLevel(logging.DEBUG)
aiohttp_log.addHandler(stdout_handler)
app_logger = logging.getLogger(LOGGER_NAME)
app_logger.setLevel(logging.DEBUG)
app_logger.addHandler(stdout_handler)
def get_page(timestamp, get_next_page_href):
''' Creates dummy activities where one has been created every second for the past 24 hours'''
now = datetime.now(timezone.utc).replace(microsecond=0)
one_day_ago = now - timedelta(hours=24)
first_timestamp = int(one_day_ago.timestamp())
final_timestamp = int(now.timestamp())
max_per_page = 1000
first_timestamp_of_page = max(first_timestamp, timestamp)
final_timestamp_of_page = min(first_timestamp_of_page + max_per_page, final_timestamp)
timestamps = range(first_timestamp_of_page, final_timestamp_of_page)
return {
'@context': [
'https://www.w3.org/ns/ettystreams',
{
'dit': 'https://www.trade.gov.uk/ns/activitystreams/v1'
}
],
'orderedItems': [
{
'actor': {
'dit:activityStreamVerificationFeedOrganizationId': '1',
'type': [
'Organization',
'dit:activityStreamVerificationFeedOrganization'
]
},
'dit:application': 'activityStreamVerificationFeed',
'id': f'dit:activityStreamVerificationFeed:Verifier:{activity_id}:Create',
'object': {
'id': f'dit:activityStreamVerificationFeed:Verifier:{activity_id}',
'type': [
'Document',
'dit:activityStreamVerificationFeed:Verifier'
],
'url': f'https://activitystream.uktrade.io/activities/{activity_id}'
},
'published': datetime.utcfromtimestamp(timestamp).isoformat(),
'type': 'Create'
}
for timestamp in timestamps
for activity_id in [str(timestamp)]
],
'type': 'Collection',
**({'next': get_next_page_href(final_timestamp_of_page)} if timestamps else {}),
}
def main():
setup_logging()
loop = asyncio.get_event_loop()
loop.create_task(run_application())
loop.run_forever()
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
b6e9b3b07c2c919a348d0916afd90fca07c31ad4 | a74cabbe1b11fc8ef575ea86f2543cd95db78ec9 | /python_program/q1457_Pseudo_Palindromic_Paths_in_a_Binary_Tree.py | 3cb032d2740ae837af114fe21693659a88007536 | [] | no_license | tszandy/leetcode | 87e3ccf291b2879637d2d8238935a455b401a78a | f1f4361541dcffbb291285663c8820d7ffb37d2f | refs/heads/master | 2023-04-06T15:34:04.847875 | 2023-03-26T12:22:42 | 2023-03-26T12:22:42 | 204,069,234 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,485 | py | from typing import List
from collections import Counter,defaultdict,deque
from math import *
from functools import reduce,lru_cache,total_ordering
import numpy as np
from heapq import *
from bisect import bisect_left,bisect_right
from itertools import count,zip_longest
import queue
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
class Solution:
def pseudoPalindromicPaths (self, root: Optional[TreeNode]) -> int:
self.counter = Counter()
self.count_palin = 0
self.recursive(root)
return self.count_palin
def recursive(self,node):
if node == None:
return
self.counter[node.val]+=1
if node.left==node.right==None:
self.check_palindrome()
else:
self.recursive(node.left)
self.recursive(node.right)
self.counter[node.val]+=1
def check_palindrome(self):
count_single = 0
for val in self.counter.values():
if val%2==1:
count_single+=1
if count_single>=2:
return
self.count_palin +=1
sol = Solution()
# input
[2,3,1,3,1,null,1]
[2,1,1,1,3,null,null,null,null,null,1]
[9]
# output
output = sol.pseudoPalindromicPaths(root)
# answer
answer = ""
print(output, answer, answer == output)
| [
"[email protected]"
] | |
ae65ef4938bb05f4be99939c1298eac1dfe34aed | 76f1331d083d360fb3822312537e72d4ff9d50b5 | /spider/strong_spider/spider/antispider/proxypool/proxy_crawler.py | 47c2dca816b1f993382664123b02647d33042a3d | [] | no_license | ZouJoshua/ml_project | 2fe0efee49aa1454b04cd83c61455232601720a6 | b1d8eb050182cd782bc6f3bb3ac1429fe22ab7b7 | refs/heads/master | 2021-07-22T10:37:56.452484 | 2020-05-09T09:54:39 | 2020-05-09T09:54:39 | 158,562,016 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,767 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@Author : Joshua
@Time : 2018/9/5 15:51
@File : proxy_crawler.py
@Desc : 代理下载调度
"""
from gevent import monkey
monkey.patch_all()
import sys
import time
import gevent
from gevent.pool import Pool
from multiprocessing import Queue, Process, Value
import setting
from spider.tools.utils import md5
from proxy_htmldownloader import HtmlDownloader
from proxy_htmlparser import HtmlParser
from proxy_validator import Validator
from proxy_sqlitedb import ProxySqliteDB
from proxy_pipeline import SqlitePipeline
def start_proxycrawl(proxy_queue, db_proxy_num, myip):
crawl = ProxyCrawl(proxy_queue, db_proxy_num, myip)
crawl.run()
class ProxyCrawl(object):
proxies = set()
def __init__(self, proxy_queue, db_proxy_num, myip):
self.crawl_pool = Pool(setting.THREADNUM)
self.queue = proxy_queue
self.db_proxy_num = db_proxy_num
self.myip = myip
def run(self):
while True:
self.proxies.clear()
str_ = 'Starting crawl proxy!'
sys.stdout.write(str_ + "\r\n")
sys.stdout.flush()
proxylist = ProxySqliteDB.get_all()
spawns = []
for proxy in proxylist:
spawns.append(gevent.spawn(Validator.detect_from_db, self.myip, proxy, self.proxies))
if len(spawns) >= setting.MAX_CHECK_CONCURRENT_PER_PROCESS:
gevent.joinall(spawns)
spawns= []
gevent.joinall(spawns)
self.db_proxy_num.value = len(self.proxies)
str_ = 'IPProxyPool----->>>>>>>>db exists ip:%d' % len(self.proxies)
if len(self.proxies) < setting.MINNUM:
str_ += '\r\nIPProxyPool----->>>>>>>>now ip num < MINNUM, start crawling...'
sys.stdout.write(str_ + "\r\n")
sys.stdout.flush()
spawns = []
for p in setting.parserList:
spawns.append(gevent.spawn(self.crawl, p))
if len(spawns) >= setting.MAX_DOWNLOAD_CONCURRENT:
gevent.joinall(spawns)
spawns= []
gevent.joinall(spawns)
else:
str_ += '\r\nIPProxyPool----->>>>>>>>now ip num meet the requirement,wait UPDATE_TIME...'
sys.stdout.write(str_ + "\r\n")
sys.stdout.flush()
time.sleep(setting.UPDATE_TIME)
def crawl(self, parser):
html_parser = HtmlParser()
for url in parser['urls']:
response = HtmlDownloader.download(url)
if response is not None:
proxylist = html_parser.parse(response, parser)
if proxylist is not None:
for proxy in proxylist:
proxy_str = '%s:%s' % (proxy['ip'], proxy['port'])
proxy['proxy_id'] = md5(proxy_str)
if proxy_str not in self.proxies:
self.proxies.add(proxy_str)
while True:
if self.queue.full():
time.sleep(0.1)
else:
self.queue.put(proxy)
break
if __name__ == "__main__":
DB_PROXY_NUM = Value('i', 0)
q1 = Queue()
q2 = Queue()
p0 = Process(target=start_api_server)
p1 = Process(target=start_proxycrawl, args=(q1, DB_PROXY_NUM))
p2 = Process(target=Validator.validator, args=(q1, q2))
p3 = Process(target=SqlitePipeline.save_data, args=(q2, DB_PROXY_NUM))
p0.start()
p1.start()
p2.start()
p3.start() | [
"[email protected]"
] | |
5b7240a4bf1fca1148ffca29d3a7222e1e4a4373 | b565143dbd490ad2721af7d8578483aa053383d0 | /recipes/tsl-hopscotch-map/all/conanfile.py | bd7f158542108161ebd2d641b6d256fd96a8682a | [
"MIT"
] | permissive | czoido/conan-center-index | da7fbe837c88e3a65f7f2d6ed24ada62eb601c69 | 7952190873e49e23996fc7192a76e5917c49ec8a | refs/heads/master | 2023-07-20T02:13:08.706521 | 2022-12-13T17:01:47 | 2022-12-13T17:01:47 | 228,573,093 | 0 | 0 | MIT | 2022-12-13T17:01:49 | 2019-12-17T08:49:29 | Python | UTF-8 | Python | false | false | 1,853 | py | from conans import ConanFile, tools
import os
required_conan_version = ">=1.43.0"
class TslHopscotchMapConan(ConanFile):
name = "tsl-hopscotch-map"
license = "MIT"
description = "C++ implementation of a fast hash map and hash set using hopscotch hashing"
topics = ("structure", "hash map", "hash set")
homepage = "https://github.com/Tessil/hopscotch-map"
url = "https://github.com/conan-io/conan-center-index"
settings = "os", "arch", "compiler", "build_type"
no_copy_source = True
@property
def _source_subfolder(self):
return "source_subfolder"
def package_id(self):
self.info.header_only()
def source(self):
tools.get(**self.conan_data["sources"][self.version],
destination=self._source_subfolder, strip_root=True)
def package(self):
self.copy("LICENSE", dst="licenses", src=self._source_subfolder)
self.copy("*.h", dst="include", src=os.path.join(self._source_subfolder, "include"))
def package_info(self):
self.cpp_info.set_property("cmake_file_name", "tsl-hopscotch-map")
self.cpp_info.set_property("cmake_target_name", "tsl::hopscotch_map")
# TODO: to remove in conan v2 once cmake_find_package* generators removed
self.cpp_info.filenames["cmake_find_package"] = "tsl-hopscotch-map"
self.cpp_info.filenames["cmake_find_package_multi"] = "tsl-hopscotch-map"
self.cpp_info.names["cmake_find_package"] = "tsl"
self.cpp_info.names["cmake_find_package_multi"] = "tsl"
self.cpp_info.components["hopscotch_map"].names["cmake_find_package"] = "hopscotch_map"
self.cpp_info.components["hopscotch_map"].names["cmake_find_package_multi"] = "hopscotch_map"
self.cpp_info.components["hopscotch_map"].set_property("cmake_target_name", "tsl::hopscotch_map")
| [
"[email protected]"
] | |
694f4c1a1be63ef2db87c8057b17478b668dafac | 7da87c6d4c4d8443f1a9930b5edc277ce2a6c358 | /009_triangles.py | 642b3ede7b4aa5ace5b0e7d418acbdaca8585824 | [] | no_license | kazamari/CodeAbbey | 2e1f28a20d5f773fc08b5b20899d437d5ba420f2 | 4f5031585ddad8d8be71ee1f80872712b139051e | refs/heads/master | 2021-04-15T14:49:10.839383 | 2018-03-23T12:18:17 | 2018-03-23T12:18:17 | 126,477,191 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 481 | py | '''
Input data: First line will contain number of triplets.
Other lines will contain triplets themselves (each in separate line).
Answer: You should output 1 or 0 for each triplet (1 if triangle could be built and 0 otherwise).
Example:
data:
2
3 4 5
1 2 4
answer:
1 0
'''
from sys import stdin
def is_triangle(triplet):
a, b, c = sorted(triplet)
return int(c <= a + b)
print(*[is_triangle(map(int, line.rstrip().split())) for i, line in enumerate(stdin) if i > 0])
| [
"[email protected]"
] | |
15a64d10e6323cff866a56ab5326f9f9abfe8c10 | 97aa1181a8305fab0cfc635954c92880460ba189 | /torch/nn/intrinsic/modules/fused.py | 47f26dbdc203a870d09cbb6c19b5a68b2b260f95 | [
"BSD-2-Clause"
] | permissive | zhujiang73/pytorch_mingw | 64973a4ef29cc10b96e5d3f8d294ad2a721ccacb | b0134a0acc937f875b7c4b5f3cef6529711ad336 | refs/heads/master | 2022-11-05T12:10:59.045925 | 2020-08-22T12:10:32 | 2020-08-22T12:10:32 | 123,688,924 | 8 | 4 | NOASSERTION | 2022-10-17T12:30:52 | 2018-03-03T12:15:16 | C++ | UTF-8 | Python | false | false | 5,967 | py | from __future__ import absolute_import, division, print_function, unicode_literals
import torch
from torch.nn import Conv1d, Conv2d, Conv3d, ReLU, Linear, BatchNorm1d, BatchNorm2d, BatchNorm3d
class ConvReLU1d(torch.nn.Sequential):
r"""This is a sequential container which calls the Conv 1d and ReLU modules.
During quantization this will be replaced with the corresponding fused module."""
def __init__(self, conv, relu):
assert type(conv) == Conv1d and type(relu) == ReLU, \
'Incorrect types for input modules{}{}'.format(
type(conv), type(relu))
super(ConvReLU1d, self).__init__(conv, relu)
class ConvReLU2d(torch.nn.Sequential):
r"""This is a sequential container which calls the Conv 2d and ReLU modules.
During quantization this will be replaced with the corresponding fused module."""
def __init__(self, conv, relu):
assert type(conv) == Conv2d and type(relu) == ReLU, \
'Incorrect types for input modules{}{}'.format(
type(conv), type(relu))
super(ConvReLU2d, self).__init__(conv, relu)
class ConvReLU3d(torch.nn.Sequential):
r"""This is a sequential container which calls the Conv 3d and ReLU modules.
During quantization this will be replaced with the corresponding fused module."""
def __init__(self, conv, relu):
assert type(conv) == Conv3d and type(relu) == ReLU, \
'Incorrect types for input modules{}{}'.format(
type(conv), type(relu))
super(ConvReLU3d, self).__init__(conv, relu)
class LinearReLU(torch.nn.Sequential):
r"""This is a sequential container which calls the Linear and ReLU modules.
During quantization this will be replaced with the corresponding fused module."""
def __init__(self, linear, relu):
assert type(linear) == Linear and type(relu) == ReLU, \
'Incorrect types for input modules{}{}'.format(
type(linear), type(relu))
super(LinearReLU, self).__init__(linear, relu)
class ConvBn1d(torch.nn.Sequential):
r"""This is a sequential container which calls the Conv 1d and Batch Norm 1d modules.
During quantization this will be replaced with the corresponding fused module."""
def __init__(self, conv, bn):
assert type(conv) == Conv1d and type(bn) == BatchNorm1d, \
'Incorrect types for input modules{}{}'.format(
type(conv), type(bn))
super(ConvBn1d, self).__init__(conv, bn)
class ConvBn2d(torch.nn.Sequential):
r"""This is a sequential container which calls the Conv 2d and Batch Norm 2d modules.
During quantization this will be replaced with the corresponding fused module."""
def __init__(self, conv, bn):
assert type(conv) == Conv2d and type(bn) == BatchNorm2d, \
'Incorrect types for input modules{}{}'.format(
type(conv), type(bn))
super(ConvBn2d, self).__init__(conv, bn)
class ConvBnReLU1d(torch.nn.Sequential):
r"""This is a sequential container which calls the Conv 1d, Batch Norm 1d, and ReLU modules.
During quantization this will be replaced with the corresponding fused module."""
def __init__(self, conv, bn, relu):
assert type(conv) == Conv1d and type(bn) == BatchNorm1d and \
type(relu) == ReLU, 'Incorrect types for input modules{}{}{}' \
.format(type(conv), type(bn), type(relu))
super(ConvBnReLU1d, self).__init__(conv, bn, relu)
class ConvBnReLU2d(torch.nn.Sequential):
r"""This is a sequential container which calls the Conv 2d, Batch Norm 2d, and ReLU modules.
During quantization this will be replaced with the corresponding fused module."""
def __init__(self, conv, bn, relu):
assert type(conv) == Conv2d and type(bn) == BatchNorm2d and \
type(relu) == ReLU, 'Incorrect types for input modules{}{}{}' \
.format(type(conv), type(bn), type(relu))
super(ConvBnReLU2d, self).__init__(conv, bn, relu)
class ConvBn3d(torch.nn.Sequential):
r"""This is a sequential container which calls the Conv 3d and Batch Norm 3d modules.
During quantization this will be replaced with the corresponding fused module."""
def __init__(self, conv, bn):
assert type(conv) == Conv3d and type(bn) == BatchNorm3d, \
'Incorrect types for input modules{}{}'.format(
type(conv), type(bn))
super(ConvBn3d, self).__init__(conv, bn)
class ConvBnReLU3d(torch.nn.Sequential):
r"""This is a sequential container which calls the Conv 3d, Batch Norm 3d, and ReLU modules.
During quantization this will be replaced with the corresponding fused module."""
def __init__(self, conv, bn, relu):
assert type(conv) == Conv3d and type(bn) == BatchNorm3d and \
type(relu) == ReLU, 'Incorrect types for input modules{}{}{}' \
.format(type(conv), type(bn), type(relu))
super(ConvBnReLU3d, self).__init__(conv, bn, relu)
class BNReLU2d(torch.nn.Sequential):
r"""This is a sequential container which calls the BatchNorm 2d and ReLU modules.
During quantization this will be replaced with the corresponding fused module."""
def __init__(self, batch_norm, relu):
assert type(batch_norm) == BatchNorm2d and type(relu) == ReLU, \
'Incorrect types for input modules{}{}'.format(
type(batch_norm), type(relu))
super(BNReLU2d, self).__init__(batch_norm, relu)
class BNReLU3d(torch.nn.Sequential):
r"""This is a sequential container which calls the BatchNorm 3d and ReLU modules.
During quantization this will be replaced with the corresponding fused module."""
def __init__(self, batch_norm, relu):
assert type(batch_norm) == BatchNorm3d and type(relu) == ReLU, \
'Incorrect types for input modules{}{}'.format(
type(batch_norm), type(relu))
super(BNReLU3d, self).__init__(batch_norm, relu)
| [
"[email protected]"
] | |
7ca2f0338361b838e4f68540221bd77ee9e62925 | 3c349aa9cd58b50d9179bbc9d5f5c2403c491543 | /ex33_polygons.py | 035896acae30a0b26de698d7161f57a52b167776 | [] | no_license | dryabokon/geometry | a2f85f8681d5e878a327235380668ebdb858e70c | 9024e963f6a9f308101e6d477d89ce3323038117 | refs/heads/master | 2023-04-30T12:06:18.130607 | 2023-04-27T18:23:49 | 2023-04-27T18:23:49 | 156,690,211 | 4 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,872 | py | import cv2
import numpy
import shapely.geometry as geom
from scipy.spatial import ConvexHull
# ----------------------------------------------------------------------------------------------------------------------
import tools_polygons_i
import tools_draw_numpy
# ----------------------------------------------------------------------------------------------------------------------
N = 50
image = numpy.full((600, 800, 3), 255, dtype=numpy.uint8)
folder_out = './images/output/'
# ----------------------------------------------------------------------------------------------------------------------
def get_shape(N,center,radius):
x = center[0] + radius*numpy.array([numpy.sin(a/(N-1)*2*numpy.pi) for a in range(N)])
y = center[1] + radius*numpy.array([numpy.cos(a/(N-1)*2*numpy.pi) for a in range(N)])
x+= 0.9*radius*numpy.random.random(N)
y+= 0.9*radius*numpy.random.random(N)
points = numpy.concatenate((x.reshape((-1,1)),y.reshape((-1,1))),axis=1)
hull = ConvexHull(numpy.array(points))
cntrs = numpy.array(points)[hull.vertices]
points = numpy.array([(point[0], point[1]) for point in cntrs])
return points
# ----------------------------------------------------------------------------------------------------------------------
def interpolate(points1,points2,color1,color2):
p1s = geom.Polygon(points1)
p2s = geom.Polygon(points2)
I = tools_polygons_i.PolygonInterpolator(p1=p1s, p2=p2s)
X, Y, C0, C1, C2 = [], [], [], [], []
for pair in I.tuple_pairs:
X.append(numpy.linspace(pair[0][0], pair[1][0], N))
Y.append(numpy.linspace(pair[0][1], pair[1][1], N))
C0 = numpy.linspace(color1[0], color2[0], N).reshape((-1, 1))
C1 = numpy.linspace(color1[1], color2[1], N).reshape((-1, 1))
C2 = numpy.linspace(color1[2], color2[2], N).reshape((-1, 1))
X = numpy.array(X).T
Y = numpy.array(Y).T
C = numpy.concatenate([C0, C1, C2], axis=1).astype(numpy.uint8)
return X, Y, C
# ----------------------------------------------------------------------------------------------------------------------
if __name__ == '__main__':
N1 = 13
N2 = 15
c1 = (400,300)
c2 = (430,220)
r1 = 100
r2 = 150
color1 = (0, 10, 255)
color2 = (255,128,15)
points1 = get_shape(N1,c1,r1)
points2 = get_shape(N2,c2,r2)
# cv2.imwrite(folder_out+'start.png',tools_draw_numpy.draw_contours(image, points1, color=color1,transperency=0.9))
# cv2.imwrite(folder_out+'stop.png' ,tools_draw_numpy.draw_contours(image, points2, color=color2,transperency=0.9))
X, Y, C = interpolate(points1, points2, color1, color2)
for i in range(X.shape[0]):
p = numpy.concatenate((X[i].reshape((-1,1)),Y[i].reshape((-1,1))),axis=1)
cv2.imwrite(folder_out+'%03d.png'%i,tools_draw_numpy.draw_contours(image, p, color=C[i],transperency=0.9))
| [
"[email protected]"
] | |
dda6f3a8ef102c9ba39babca4de4c8d9b3f4dd59 | 25f61fb72a60f95e10aff5809e67e95526c5fff7 | /leetcode-30day-challenge/May-2019/W1_1_first_bad_version.py | a7d316cfa6cd858db77346c1f616205b8c0c0f1e | [] | no_license | pradeepsinngh/A-Problem-A-Day | 19db6baccc68f216cd8206118dafb2cbec962671 | f5d598bbb60786a99c00fb338145c564fa70cf84 | refs/heads/master | 2021-07-18T02:23:04.248782 | 2020-05-04T22:12:27 | 2020-05-04T22:12:27 | 150,642,029 | 5 | 2 | null | null | null | null | UTF-8 | Python | false | false | 1,157 | py | '''
# Prob: First Bad Version
# You are a product manager and currently leading a team to develop a new product. Unfortunately, the latest version of your product fails the quality check. Since each version is developed based on the previous version, all the versions after a bad version are also bad.
# Suppose you have n versions [1, 2, ..., n] and you want to find out the first bad one, which causes all the following ones to be bad.
# You are given an API bool isBadVersion(version) which will return whether version is bad. Implement a function to find the first bad version. You should minimize the number of calls to the API.
```
# The isBadVersion API is already defined for you.
# @param version, an integer
# @return a bool
# def isBadVersion(version):
class Solution(object):
def firstBadVersion(self, n):
"""
:type n: int
:rtype: int
"""
left = 1
right = n
while left < right:
mid = (left + right)/ 2
if not isBadVersion(mid):
left = mid+1
else:
right = mid
return left
| [
"[email protected]"
] | |
dcae80393763ae7d1d76bccf63b2810a8ededc78 | dfc686228834750216b2cd6eea14d2a6d12422e4 | /flask tutorials/DE_project_pharmacy_manage_Sys/run.py | 2063f0ac0c73dd73b978aa198fa5e68cfbbaefc0 | [] | no_license | Parth-Ps/python | 8466e8856bf301908544eb60ae4a68338ccf4550 | bb448c2a7996d17883214fe8eb11caa61e211400 | refs/heads/master | 2023-01-22T13:30:50.507021 | 2020-12-02T07:59:53 | 2020-12-02T07:59:53 | 317,788,331 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 78 | py | from de_pharma import app
if __name__ == '__main__':
app.run(debug=True)
| [
"[email protected]"
] | |
852c57d9837e0941432c42026f1c82d0451da187 | 3cdb4faf34d8375d6aee08bcc523adadcb0c46e2 | /web/env/lib/python3.6/site-packages/django/contrib/auth/migrations/0008_alter_user_username_max_length.py | 7c9dae09500de428c3e2cea1c22b0419c38beedd | [
"MIT",
"GPL-3.0-only"
] | permissive | rizwansoaib/face-attendence | bc185d4de627ce5adab1cda7da466cb7a5fddcbe | 59300441b52d32f3ecb5095085ef9d448aef63af | refs/heads/master | 2020-04-25T23:47:47.303642 | 2019-09-12T14:26:17 | 2019-09-12T14:26:17 | 173,157,284 | 45 | 12 | MIT | 2020-02-11T23:47:55 | 2019-02-28T17:33:14 | Python | UTF-8 | Python | false | false | 752 | py | from django.contrib.auth import validators
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('auth', '0007_alter_validators_add_error_messages'),
]
operations = [
migrations.AlterField(
model_name='user',
name='username',
field=models.CharField(
error_messages={'unique': 'A user with that username already exists.'},
help_text='Required. 150 characters or fewer. Letters, digits and @/./+/-/_ only.',
max_length=150,
unique=True,
validators=[validators.UnicodeUsernameValidator()],
verbose_name='username',
),
),
]
| [
"[email protected]"
] | |
3afca15c44b03004f1a13b16f2ce4c2a33cdf1b7 | d1c53def818f9c7e1bd660e3303a754f297aff43 | /code/ch7/4_13_b.py | ea53b0fad8491987b7b00a7fb0c995791a2c1aef | [] | no_license | khimacademy/c104 | dcdcae13499e5b68905f09ea009e1a2b9f552e1c | 83443858d5b85c23c107fa09cd672d17549776ee | refs/heads/master | 2020-03-26T10:57:52.536935 | 2018-08-25T06:17:04 | 2018-08-25T06:17:04 | 144,822,712 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 916 | py | '''
4-13. 뷔페
뷔페 스타일의 음식점이 있는데 기본 음식은 다섯 가지밖에 없습니다. 단순한 음식 다섯 가지를 생각하고 튜플로 저장하세요.
- for 루프를 써서 이 식당의 각 음식을 출력하세요.
- 항목 중 하나를 수정하는 시도를 해보고 파이썬에서 변경을 거부하는지 확인하세요.
- 식당에서 메뉴를 교체하려 합니다. 항목 중 두 개를 다른 음식으로 바꾸세요. 튜플을 덮어쓰는 코드 블록을 추가하고, for 루프를 써서 바뀐 메뉴의 각 항목을 출력하세요.
Output:
You can choose from the following menu items:
- rockfish sandwich
- halibut nuggets
- smoked salmon chowder
- salmon burger
- crab cakes
Our menu has been updated.
You can now choose from the following items:
- rockfish sandwich
- halibut nuggets
- smoked salmon chowder
- black cod tips
- king crab legs
'''
| [
"[email protected]"
] | |
69869ebc03984e9a181bfa01398bf27569c1070f | fb7efe44f4d9f30d623f880d0eb620f3a81f0fbd | /tools/perf/core/trybot_command.py | 42d3f8caba6a4ce90339060b8195a004ce92b4b3 | [
"BSD-3-Clause",
"LGPL-2.0-or-later",
"Apache-2.0",
"LGPL-2.0-only",
"MIT",
"LicenseRef-scancode-unknown",
"LicenseRef-scancode-unknown-license-reference",
"LGPL-2.1-only"
] | permissive | wzyy2/chromium-browser | 2644b0daf58f8b3caee8a6c09a2b448b2dfe059c | eb905f00a0f7e141e8d6c89be8fb26192a88c4b7 | refs/heads/master | 2022-11-23T20:25:08.120045 | 2018-01-16T06:41:26 | 2018-01-16T06:41:26 | 117,618,467 | 3 | 2 | BSD-3-Clause | 2022-11-20T22:03:57 | 2018-01-16T02:09:10 | null | UTF-8 | Python | false | false | 23,224 | py | # Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import argparse
import base64
import collections
import gzip
import io
import json
import logging
import os
import platform
import subprocess
import tempfile
import urllib
import urllib2
from core import benchmark_finders
from core import path_util
from telemetry import benchmark
from telemetry import decorators
from telemetry.util import command_line
from telemetry.util import matching
ALL_CONFIG_BOTS = [
'all',
'all-win',
'all-mac',
'all-linux',
'all-android'
]
# Default try bot to use incase builbot is unreachable.
DEFAULT_TRYBOTS = [
'linux_perf_bisect',
'mac_10_11_perf_bisect',
'winx64_10_perf_bisect',
'android_s5_perf_bisect',
]
CHROMIUM_SRC_PATH = path_util.GetChromiumSrcDir()
# Mapping of repo to its root path and git remote URL.
# Values for 'src' key in the map are related to path to the repo in the
# DEPS file, These values are to used create the DEPS patch along with patch
# that is being tried.
REPO_INFO_MAP = {
'src': {
'src': 'src',
'url': 'https://chromium.googlesource.com/chromium/src.git',
},
'v8': {
'src': 'src/v8',
'url': 'https://chromium.googlesource.com/v8/v8.git',
},
'skia': {
'src': 'src/third_party/skia',
'url': 'https://chromium.googlesource.com/skia.git',
},
'angle': {
'src': 'src/third_party/angle',
'url': 'https://chromium.googlesource.com/angle/angle.git',
},
'catapult': {
'src': 'src/third_party/catapult',
'url': ('https://chromium.googlesource.com/external/github.com/'
'catapult-project/catapult.git')
}
}
_MILO_MASTER_ENDPOINT = ('https://luci-milo.appspot.com/prpc/milo.Buildbot/'
'GetCompressedMasterJSON')
_MILO_RESPONSE_PREFIX = ')]}\'\n'
def _IsPerfBisectBot(builder):
return (
builder.endswith('_perf_bisect') and
# Bisect FYI bots are not meant for testing actual perf regressions.
# Hardware configuration on these bots is different from actual bisect bot
# and these bots runs E2E integration tests for auto-bisect
# using dummy benchmarks.
not builder.endswith('fyi_perf_bisect')
# Individual bisect bots may be blacklisted here.
)
assert all(_IsPerfBisectBot(builder) for builder in DEFAULT_TRYBOTS), (
'A default trybot is being exluded by the perf bisect bot filter.')
class TrybotError(Exception):
def __str__(self):
return '(ERROR) Perf Try Job: %s' % self.args[0]
def _ProcessMiloData(data):
if not data.startswith(_MILO_RESPONSE_PREFIX):
return None
data = data[len(_MILO_RESPONSE_PREFIX):]
try:
response_data = json.loads(data)
except Exception:
return None
try:
decoded_data = base64.b64decode(response_data.get('data'))
except Exception:
return None
try:
with io.BytesIO(decoded_data) as compressed_file:
with gzip.GzipFile(fileobj=compressed_file) as decompressed_file:
data_json = decompressed_file.read()
except Exception:
return None
return json.loads(data_json)
def _GetTrybotList(builders):
builders = ['%s' % bot.replace('_perf_bisect', '').replace('_', '-')
for bot in builders]
builders.extend(ALL_CONFIG_BOTS)
return sorted(builders)
def _GetBotPlatformFromTrybotName(trybot_name):
os_names = ['linux', 'android', 'mac', 'win']
try:
return next(b for b in os_names if b in trybot_name)
except StopIteration:
raise TrybotError('Trybot "%s" unsupported for tryjobs.' % trybot_name)
def _GetPlatformVariantFromBuilderName(builder):
bot_platform = _GetBotPlatformFromTrybotName(builder)
# Special case for platform variants that need special configs.
if bot_platform == 'win' and 'x64' in builder:
return 'win-x64'
elif bot_platform == 'android' and 'webview' in builder:
return 'android-webview'
else:
return bot_platform
def _GetBuilderNames(trybot_name, builders):
"""Return platform and its available bot name as dictionary."""
if trybot_name in ALL_CONFIG_BOTS:
platform_prefix = trybot_name[4:]
platform_and_bots = collections.defaultdict(list)
for builder in builders:
bot_platform = _GetPlatformVariantFromBuilderName(builder)
if bot_platform.startswith(platform_prefix):
platform_and_bots[bot_platform].append(builder)
return platform_and_bots
else:
builder = '%s_perf_bisect' % trybot_name.replace('-', '_')
bot_platform = _GetPlatformVariantFromBuilderName(builder)
return {bot_platform: [builder]}
_GIT_CMD = 'git'
if platform.system() == 'Windows':
# On windows, the git command is installed as 'git.bat'
_GIT_CMD = 'git.bat'
def RunGit(cmd, msg_on_error='', ignore_return_code=False):
"""Runs the git command with the given arguments.
Args:
cmd: git command arguments.
msg_on_error: Message to be displayed on git command error.
ignore_return_code: Ignores the return code for git command.
Returns:
The output of the git command as string.
Raises:
TrybotError: This exception is raised when git command fails.
"""
proc = subprocess.Popen(
[_GIT_CMD] + cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
output, err = proc.communicate()
returncode = proc.poll()
if returncode:
if ignore_return_code:
return None
raise TrybotError('%s. \n%s \n%s' % (msg_on_error, err, output))
return output.strip()
class Trybot(command_line.ArgParseCommand):
"""Run telemetry perf benchmark on trybot."""
usage = 'botname benchmark_name [<benchmark run options>]'
_builders = None
def __init__(self):
self._builder_names = None
@classmethod
def _GetBuilderList(cls):
if not cls._builders:
try:
headers = {
'Accept': 'application/json',
'Content-Type': 'application/json',
}
values = {'name': 'tryserver.chromium.perf'}
data = urllib.urlencode(values)
req = urllib2.Request(_MILO_MASTER_ENDPOINT, None, headers)
f = urllib2.urlopen(req, json.dumps(values), timeout=10)
# In case of any kind of exception, allow tryjobs to use default trybots.
# Possible exception are ssl.SSLError, urllib2.URLError,
# socket.timeout, socket.error.
except Exception: # pylint: disable=broad-except
# Incase of any exception return default trybots.
print ('WARNING: Unable to reach builbot to retrieve trybot '
'information, tryjob will use default trybots.')
cls._builders = DEFAULT_TRYBOTS
else:
data = _ProcessMiloData(f.read())
builders = data.get('builders', {}).keys()
# Exclude unsupported bots like win xp and some dummy bots.
cls._builders = [bot for bot in builders if _IsPerfBisectBot(bot)]
return cls._builders
def _InitializeBuilderNames(self, trybot):
self._builder_names = _GetBuilderNames(trybot, self._GetBuilderList())
@classmethod
def CreateParser(cls):
parser = argparse.ArgumentParser(
('Run telemetry benchmarks on trybot. You can add all the benchmark '
'options available except the --browser option'),
formatter_class=argparse.RawTextHelpFormatter)
return parser
@classmethod
def ProcessCommandLineArgs(cls, parser, options, extra_args, environment):
del environment # unused
for arg in extra_args:
if arg == '--browser' or arg.startswith('--browser='):
parser.error('--browser=... is not allowed when running trybot.')
all_benchmarks = benchmark_finders.GetAllPerfBenchmarks()
all_benchmarks.extend(benchmark_finders.GetAllContribBenchmarks())
all_benchmark_names = [b.Name() for b in all_benchmarks]
all_benchmarks_by_names = {b.Name(): b for b in all_benchmarks}
benchmark_class = all_benchmarks_by_names.get(options.benchmark_name, None)
if not benchmark_class:
possible_benchmark_names = matching.GetMostLikelyMatchedObject(
all_benchmark_names, options.benchmark_name)
parser.error(
'No benchmark named "%s". Do you mean any of those benchmarks '
'below?\n%s' % (
options.benchmark_name, '\n'.join(possible_benchmark_names)))
is_benchmark_disabled, reason = cls.IsBenchmarkDisabledOnTrybotPlatform(
benchmark_class, options.trybot)
also_run_disabled_option = '--also-run-disabled-tests'
if is_benchmark_disabled and also_run_disabled_option not in extra_args:
parser.error('%s To run the benchmark on trybot anyway, add '
'%s option.' % (reason, also_run_disabled_option))
@classmethod
def IsBenchmarkDisabledOnTrybotPlatform(cls, benchmark_class, trybot_name):
"""Return whether benchmark will be disabled on trybot platform.
Note that we cannot tell with certainty whether the benchmark will be
disabled on the trybot platform since the disable logic in ShouldDisable()
can be very dynamic and can only be verified on the trybot server platform.
We are biased on the side of enabling the benchmark, and attempt to
early discover whether the benchmark will be disabled as our best.
It should never be the case that the benchmark will be enabled on the test
platform but this method returns True.
Returns:
A tuple (is_benchmark_disabled, reason) whereas |is_benchmark_disabled| is
a boolean that tells whether we are sure that the benchmark will be
disabled, and |reason| is a string that shows the reason why we think the
benchmark is disabled for sure.
"""
benchmark_name = benchmark_class.Name()
benchmark_disabled_strings = decorators.GetDisabledAttributes(
benchmark_class)
if 'all' in benchmark_disabled_strings:
return True, 'Benchmark %s is disabled on all platform.' % benchmark_name
if trybot_name == 'all':
return False, ''
trybot_platform = _GetBotPlatformFromTrybotName(trybot_name)
if trybot_platform in benchmark_disabled_strings:
return True, (
"Benchmark %s is disabled on %s, and trybot's platform is %s." %
(benchmark_name, ', '.join(benchmark_disabled_strings),
trybot_platform))
benchmark_enabled_strings = decorators.GetEnabledAttributes(benchmark_class)
if (benchmark_enabled_strings and
trybot_platform not in benchmark_enabled_strings and
'all' not in benchmark_enabled_strings):
return True, (
"Benchmark %s is only enabled on %s, and trybot's platform is %s." %
(benchmark_name, ', '.join(benchmark_enabled_strings),
trybot_platform))
if benchmark_class.ShouldDisable != benchmark.Benchmark.ShouldDisable:
logging.warning(
'Benchmark %s has ShouldDisable() method defined. If your trybot run '
'does not produce any results, it is possible that the benchmark '
'is disabled on the target trybot platform.', benchmark_name)
return False, ''
@classmethod
def AddCommandLineArgs(cls, parser, environment):
del environment # unused
available_bots = _GetTrybotList(cls._GetBuilderList())
parser.add_argument(
'trybot', choices=available_bots,
help=('specify which bots to run telemetry benchmarks on. '
' Allowed values are:\n' + '\n'.join(available_bots)),
metavar='<trybot name>')
parser.add_argument(
'benchmark_name', type=str,
help=('specify which benchmark to run. To see all available benchmarks,'
' run `run_benchmark list`'),
metavar='<benchmark name>')
parser.add_argument(
'--repo_path', type=str, default=CHROMIUM_SRC_PATH,
help=("""specify the repo path where the patch is created.'
This argument should only be used if the changes are made outside chromium repo.
E.g.,
1) Assume you are running run_benchmarks command from $HOME/cr/src/ directory:'
a) If your changes are in $HOME/cr/src/v8, then --repo_path=v8 or
--repo-path=$HOME/cr/src/v8
b) If your changes are in $HOME/cr/src/third_party/catapult, then
--repo_path=third_party/catapult or
--repo_path = $HOME/cr/src/third_party/catapult'
c) If your changes are not relative to src/ e.g. you created changes in some
other directory say $HOME/mydir/v8/v8/, then the
--repo_path=$HOME/mydir/v8/v8
2) Assume you are running run_benchmarks command not relative to src i.e.,
you are running from $HOME/mydir/ directory:'
a) If your changes are in $HOME/cr/src/v8, then --repo-path=$HOME/cr/src/v8
b) If your changes are in $HOME/cr/src/third_party/catapult, then
--repo_path=$HOME/cr/src/third_party/catapult'
c) If your changes are in $HOME/mydir/v8/v8/, then the
--repo_path=$HOME/mydir/v8/v8 or --repo_path=v8/v8"""),
metavar='<repo path>')
parser.add_argument(
'--deps_revision', type=str, default=None,
help=('specify DEPS revision to modify DEPS entry in Chromium to a '
'certain pushed revision.\n'
'This revision overrides value in DEPS on TOT Chromium for the '
'repo specified in --repo_path.\nIt is applied for both with and '
'wihout patch.'),
metavar='<deps revision>')
def Run(self, options, extra_args=None):
"""Sends a tryjob to a perf trybot.
This creates a branch, telemetry-tryjob, switches to that branch, edits
the bisect config, commits it, uploads the CL, and runs a
tryjob on the given bot.
"""
if extra_args is None:
extra_args = []
self._InitializeBuilderNames(options.trybot)
return self._AttemptTryjob(options, extra_args)
def _GetPerfConfig(self, bot_platform, arguments):
"""Generates the perf config for try job.
Args:
bot_platform: Name of the platform to be generated.
arguments: Command line arguments.
Returns:
A dictionary with perf config parameters.
"""
# To make sure that we don't mutate the original args
arguments = arguments[:]
# Always set verbose logging for later debugging
if '-v' not in arguments and '--verbose' not in arguments:
arguments.append('--verbose')
# Generate the command line for the perf trybots
target_arch = 'ia32'
if any(arg == '--chrome-root' or arg.startswith('--chrome-root=') for arg
in arguments):
raise ValueError(
'Trybot does not suport --chrome-root option set directly '
'through command line since it may contain references to your local '
'directory')
arguments.insert(0, 'src/tools/perf/run_benchmark')
if bot_platform == 'android':
arguments.insert(1, '--browser=android-chromium')
elif bot_platform == 'android-webview':
arguments.insert(1, '--browser=android-webview')
elif bot_platform == 'win-x64':
arguments.insert(1, '--browser=release_x64')
target_arch = 'x64'
else:
arguments.insert(1, '--browser=release')
dummy_parser = argparse.ArgumentParser()
dummy_parser.add_argument('--output-format', action='append')
args, _ = dummy_parser.parse_known_args(arguments)
if not args.output_format or 'html' not in args.output_format:
arguments.append('--output-format=html')
command = ' '.join(arguments)
return {
'command': command,
'repeat_count': '1',
'max_time_minutes': '120',
'truncate_percent': '0',
'target_arch': target_arch,
}
def _GetRepoAndBranchName(self, repo_path):
"""Gets the repository name and working branch name.
Args:
repo_path: Path to the repository.
Returns:
Repository name and branch name as tuple.
Raises:
TrybotError: This exception is raised for the following cases:
1. Try job is for non-git repository or in invalid branch.
2. Un-committed changes in the current branch.
3. No local commits in the current branch.
"""
# If command runs successfully, then the output will be repo root path.
# and current branch name.
output = RunGit(['rev-parse', '--abbrev-ref', '--show-toplevel', 'HEAD'],
('%s is not a git repository, must be in a git repository '
'to send changes to trybots' % os.getcwd()))
repo_info = output.split()
# Assuming the base directory name is same as repo project name set in
# codereviews.settings file.
repo_name = os.path.basename(repo_info[0]).strip()
branch_name = repo_info[1].strip()
if branch_name == 'HEAD':
raise TrybotError('Not on a valid branch, looks like branch '
'is dettached. [branch:%s]' % branch_name)
# Check if the tree is dirty: make sure the index is up to date and then
# run diff-index
RunGit(['update-index', '--refresh', '-q'], ignore_return_code=True)
output = RunGit(['diff-index', 'HEAD'])
if output:
raise TrybotError(
'Cannot send a try job with a dirty tree.\nPlease commit locally and '
'upload your changes for review in %s repository.' % repo_path)
return (repo_name, branch_name)
def _GetBaseGitHashForRepo(self, branch_name, git_url):
"""Gets the base revision for the repo on which local changes are made.
Finds the upstream of the current branch that it is set to and gets
the HEAD revision from upstream. This also checks if the remote URL on
the upstream is supported by Perf Try job.
Args:
branch_name: Current working branch name.
git_url: Remote URL of the repo.
Returns:
Git hash of the HEAD revision from the upstream branch.
Raises:
TrybotError: This exception is raised when a GIT command fails or if the
remote URL of the repo found is not supported.
"""
# Check if there is any upstream branch associated with current working
# branch, Once the upstream branch is found i.e., then validates the
# remote URL and then returns the HEAD revision from the remote branch.
while not self._IsRepoSupported(branch_name, git_url):
branch_name = RunGit(
['rev-parse', '--abbrev-ref', '%s@{upstream}' % branch_name],
'Failed to get upstream branch name.')
return RunGit(
['rev-parse', '%s@{upstream}' % branch_name],
'Failed to get base revision hash on upstream.')
def _IsRepoSupported(self, current_branch, repo_git_url):
cur_remote = RunGit(
['config', 'branch.%s.remote'% current_branch],
'Failed to get branch.%s.remote from git config' % current_branch)
cur_remote = cur_remote.strip()
if cur_remote == '.':
return False
cur_remote_url = RunGit(
['config', 'remote.%s.url' % cur_remote],
'Failed to get remote.%s.url from git config' % cur_remote)
if cur_remote_url.lower() == repo_git_url:
return True
raise TrybotError('URL %s on remote %s is not recognized on branch.'% (
cur_remote_url, cur_remote))
def _GetChangeList(self):
"""Gets the codereview URL for the current changes."""
temp_file = None
json_output = None
try:
fd, temp_file = tempfile.mkstemp(suffix='.json', prefix='perf_try_cl')
os.close(fd)
RunGit(['cl', 'issue', '--json', temp_file],
'Failed to run "git cl issue" command.')
with open(temp_file, 'r') as f:
json_output = json.load(f)
finally:
try:
if temp_file:
os.remove(temp_file)
except OSError:
pass
if not json_output.get('issue'):
raise TrybotError(
'PLEASE NOTE: The workflow for Perf Try jobs is changed. '
'In order to run the perf try job, you must first upload your '
'changes for review.')
return json_output.get('issue_url')
def _AttemptTryjob(self, options, extra_args):
"""Attempts to run a tryjob from a repo directory.
Args:
options: Command line arguments to run benchmark.
extra_args: Extra arugments to run benchmark.
Returns:
If successful returns 0, otherwise 1.
"""
original_workdir = os.getcwd()
repo_path = os.path.abspath(options.repo_path)
try:
# Check the existence of repo path.
if not os.path.exists(repo_path):
raise TrybotError('Repository path "%s" does not exist, please check '
'the value of <repo_path> argument.' % repo_path)
# Change to the repo directory.
os.chdir(repo_path)
repo_name, branch_name = self._GetRepoAndBranchName(repo_path)
repo_info = REPO_INFO_MAP.get(repo_name, None)
if not repo_info:
raise TrybotError('Unsupported repository %s' % repo_name)
deps_override = None
if repo_name != 'src':
if not options.deps_revision:
options.deps_revision = self._GetBaseGitHashForRepo(
branch_name, repo_info.get('url'))
deps_override = {repo_info.get('src'): options.deps_revision}
review_url = self._GetChangeList()
print ('\nRunning try job....\nview progress here %s.'
'\n\tRepo Name: %s\n\tPath: %s\n\tBranch: %s' % (
review_url, repo_name, repo_path, branch_name))
for bot_platform in self._builder_names:
if not self._builder_names[bot_platform]:
logging.warning('No builder is found for %s', bot_platform)
continue
try:
arguments = [options.benchmark_name] + extra_args
self._RunTryJob(bot_platform, arguments, deps_override)
# Even if git cl try throws TrybotError exception for any platform,
# keep sending try jobs to other platforms.
except TrybotError, err:
print err
except TrybotError, error:
print error
return 1
finally:
# Restore to original working directory.
os.chdir(original_workdir)
return 0
def _RunTryJob(self, bot_platform, arguments, deps_override):
"""Executes perf try job with benchmark test properties.
Args:
bot_platform: Name of the platform to be generated.
arguments: Command line arguments.
deps_override: DEPS revision if needs to be overridden.
Raises:
TrybotError: When trybot fails to upload CL or run git try.
"""
config = self._GetPerfConfig(bot_platform, arguments)
# Generate git try command for available bots.
git_try_command = ['cl', 'try', '-m', 'tryserver.chromium.perf']
# Add Perf Test config to git try --properties arg.
git_try_command.extend(['-p', 'perf_try_config=%s' % json.dumps(config)])
error_msg_on_fail = 'Could not try CL for %s' % bot_platform
# Add deps overrides to git try --properties arg.
if deps_override:
git_try_command.extend([
'-p', 'deps_revision_overrides=%s' % json.dumps(deps_override)])
error_msg_on_fail += ' with DEPS override (%s)' % deps_override
for bot in self._builder_names[bot_platform]:
git_try_command.extend(['-b', bot])
RunGit(git_try_command, error_msg_on_fail)
print 'Perf Try job started for %s platform.' % bot_platform
| [
"[email protected]"
] | |
340cdd25e6ba61fafd87bd9b7fb641673e3470b8 | ad0b7af5c3547be1081e77594d98fa9939576c69 | /program/SGD.py | fecb93355de1de1e74268e73e1c5d474d0eba2db | [] | no_license | UESTC-Liuxin/ML | 29ce4a576215520f87049bf1024f659cbd7a0e64 | f88ddaa6eb97d1bb31a64ba3a8448fa9f2bead32 | refs/heads/master | 2022-07-29T22:24:12.997064 | 2020-05-24T12:48:32 | 2020-05-24T12:48:32 | 261,960,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,182 | py | # import d2lzh as d2l
import math
# from mxnet import nd
import numpy as np
import matplotlib.pyplot as plt
def train_2d(trainer): # 本函数将保存在d2lzh包中方便以后使用
x1, x2, s1, s2 = -5, -2, 0, 0 # s1和s2是自变量状态,本章后续几节会使用
results = [(x1, x2)]
for i in range(20):
x1, x2, s1, s2 = trainer(x1, x2, s1, s2)
results.append((x1, x2))
print('epoch %d, x1 %f, x2 %f' % (i + 1, x1, x2))
return results
def show_trace_2d(f, results): # 本函数将保存在d2lzh包中方便以后使用
plt.plot(*zip(*results), '-o', color='#ff7f0e')
x1, x2 = np.meshgrid(np.arange(-5.5, 1.0, 0.02), np.arange(-3.0, 1.0, 0.02))
print(x1,x2)
plt.contour(x1, x2, f(x1, x2), colors='#1f77b4')
plt.xlabel('x1')
plt.ylabel('x2')
plt.show()
eta = 0.1
def f_2d(x1, x2): # 目标函数
return x1 ** 2 + 2 * x2 ** 2
def gd_2d(x1, x2, s1, s2):
return (x1 - eta * 2 * x1, x2 - eta * 4 * x2, 0, 0)
def sgd_2d(x1, x2, s1, s2):
return (x1 - eta * (2 * x1 + np.random.normal(0.1)),
x2 - eta * (4 * x2 + np.random.normal(0.1)), s1, s2)
show_trace_2d(f_2d, train_2d(sgd_2d)) | [
"[email protected]"
] | |
8d2bf3bac2b602bbaeb7eb68a7b28172a7b6631f | d1d79d0c3889316b298852834b346d4246825e66 | /blackbot/core/wss/ttp/art/art_T1055-1.py | ce733c638fed1dee79e232881503ae921bc201e5 | [] | no_license | ammasajan/Atomic-Red-Team-Intelligence-C2 | 78d1ed2de49af71d4c3c74db484e63c7e093809f | 5919804f0bdeb15ea724cd32a48f377bce208277 | refs/heads/master | 2023-07-17T12:48:15.249921 | 2021-08-21T20:10:30 | 2021-08-21T20:10:30 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,874 | py | from blackbot.core.utils import get_path_in_package
from blackbot.core.wss.atomic import Atomic
from terminaltables import SingleTable
import os
import json
class Atomic(Atomic):
def __init__(self):
self.name = 'Privesc/T1055-1'
self.controller_type = ''
self.external_id = 'T1055'
self.blackbot_id = 'T1055-1'
self.version = ''
self.language = 'boo'
self.description = self.get_description()
self.last_updated_by = 'Blackbot, Inc. All Rights reserved'
self.references = ["System.Management.Automation"]
self.options = {
'OutString': {
'Description' : 'Appends Out-String to the PowerShellCode',
'Required' : False,
'Value' : True,
},
'BypassLogging': {
'Description' : 'Bypasses ScriptBlock and Techniques logging',
'Required' : False,
'Value' : True,
},
'BypassAmsi': {
'Description' : 'Bypasses AMSI',
'Required' : False,
'Value' : True,
}
}
def payload(self):
with open(get_path_in_package('core/wss/ttp/art/src/powershell.boo'), 'r') as ttp_src:
src = ttp_src.read()
pwsh_script = get_path_in_package('core/wss/ttp/art/pwsh_ttp/privilegeEscalation/T1055-1')
with open(pwsh_script) as pwsh:
src = src.replace("POWERSHELL_SCRIPT", pwsh.read())
src = src.replace("OUT_STRING", str(self.options["OutString"]["Value"]).lower())
src = src.replace("BYPASS_LOGGING", str(self.options["BypassLogging"]["Value"]).lower())
src = src.replace("BYPASS_AMSI", str(self.options["BypassAmsi"]["Value"]).lower())
return src
def get_description(self):
path = get_path_in_package('core/wss/ttp/art/pwsh_ttp/privilegeEscalation/T1055-1')
with open(path) as text:
head = [next(text) for l in range(4)]
technique_name = head[0].replace('#TechniqueName: ', '').strip('\n')
atomic_name = head[1].replace('#AtomicTestName: ', '').strip('\n')
description = head[2].replace('#Description: ', '').strip('\n')
language = head[3].replace('#Language: ', '').strip('\n')
aux = ''
count = 1
for char in description:
if char == '&':
continue
aux += char
if count % 126 == 0:
aux += '\n'
count += 1
out = '{}: {}\n{}\n\n{}\n'.format(technique_name, language, atomic_name, aux)
return out
| [
"[email protected]"
] | |
7062f8cb946b9bae96404c59fcbabca669313e2f | 5e84763c16bd6e6ef06cf7a129bb4bd29dd61ec5 | /blimgui/dist/OpenGL/raw/GL/VERSION/GL_2_0.py | 6f786b758b73de315862fddade51267e6005a07b | [
"MIT"
] | permissive | juso40/bl2sdk_Mods | 8422a37ca9c2c2bbf231a2399cbcb84379b7e848 | 29f79c41cfb49ea5b1dd1bec559795727e868558 | refs/heads/master | 2023-08-15T02:28:38.142874 | 2023-07-22T21:48:01 | 2023-07-22T21:48:01 | 188,486,371 | 42 | 110 | MIT | 2022-11-20T09:47:56 | 2019-05-24T20:55:10 | Python | UTF-8 | Python | false | false | 14,306 | py | '''Autogenerated by xml_generate script, do not edit!'''
from OpenGL import platform as _p, arrays
# Code generation uses this
from OpenGL.raw.GL import _types as _cs
# End users want this...
from OpenGL.raw.GL._types import *
from OpenGL.raw.GL import _errors
from OpenGL.constant import Constant as _C
import ctypes
_EXTENSION_NAME = 'GL_VERSION_GL_2_0'
def _f( function ):
return _p.createFunction( function,_p.PLATFORM.GL,'GL_VERSION_GL_2_0',error_checker=_errors._error_checker)
GL_ACTIVE_ATTRIBUTES=_C('GL_ACTIVE_ATTRIBUTES',0x8B89)
GL_ACTIVE_ATTRIBUTE_MAX_LENGTH=_C('GL_ACTIVE_ATTRIBUTE_MAX_LENGTH',0x8B8A)
GL_ACTIVE_UNIFORMS=_C('GL_ACTIVE_UNIFORMS',0x8B86)
GL_ACTIVE_UNIFORM_MAX_LENGTH=_C('GL_ACTIVE_UNIFORM_MAX_LENGTH',0x8B87)
GL_ATTACHED_SHADERS=_C('GL_ATTACHED_SHADERS',0x8B85)
GL_BLEND_EQUATION_ALPHA=_C('GL_BLEND_EQUATION_ALPHA',0x883D)
GL_BLEND_EQUATION_RGB=_C('GL_BLEND_EQUATION_RGB',0x8009)
GL_BOOL=_C('GL_BOOL',0x8B56)
GL_BOOL_VEC2=_C('GL_BOOL_VEC2',0x8B57)
GL_BOOL_VEC3=_C('GL_BOOL_VEC3',0x8B58)
GL_BOOL_VEC4=_C('GL_BOOL_VEC4',0x8B59)
GL_COMPILE_STATUS=_C('GL_COMPILE_STATUS',0x8B81)
GL_COORD_REPLACE=_C('GL_COORD_REPLACE',0x8862)
GL_CURRENT_PROGRAM=_C('GL_CURRENT_PROGRAM',0x8B8D)
GL_CURRENT_VERTEX_ATTRIB=_C('GL_CURRENT_VERTEX_ATTRIB',0x8626)
GL_DELETE_STATUS=_C('GL_DELETE_STATUS',0x8B80)
GL_DRAW_BUFFER0=_C('GL_DRAW_BUFFER0',0x8825)
GL_DRAW_BUFFER1=_C('GL_DRAW_BUFFER1',0x8826)
GL_DRAW_BUFFER10=_C('GL_DRAW_BUFFER10',0x882F)
GL_DRAW_BUFFER11=_C('GL_DRAW_BUFFER11',0x8830)
GL_DRAW_BUFFER12=_C('GL_DRAW_BUFFER12',0x8831)
GL_DRAW_BUFFER13=_C('GL_DRAW_BUFFER13',0x8832)
GL_DRAW_BUFFER14=_C('GL_DRAW_BUFFER14',0x8833)
GL_DRAW_BUFFER15=_C('GL_DRAW_BUFFER15',0x8834)
GL_DRAW_BUFFER2=_C('GL_DRAW_BUFFER2',0x8827)
GL_DRAW_BUFFER3=_C('GL_DRAW_BUFFER3',0x8828)
GL_DRAW_BUFFER4=_C('GL_DRAW_BUFFER4',0x8829)
GL_DRAW_BUFFER5=_C('GL_DRAW_BUFFER5',0x882A)
GL_DRAW_BUFFER6=_C('GL_DRAW_BUFFER6',0x882B)
GL_DRAW_BUFFER7=_C('GL_DRAW_BUFFER7',0x882C)
GL_DRAW_BUFFER8=_C('GL_DRAW_BUFFER8',0x882D)
GL_DRAW_BUFFER9=_C('GL_DRAW_BUFFER9',0x882E)
GL_FLOAT_MAT2=_C('GL_FLOAT_MAT2',0x8B5A)
GL_FLOAT_MAT3=_C('GL_FLOAT_MAT3',0x8B5B)
GL_FLOAT_MAT4=_C('GL_FLOAT_MAT4',0x8B5C)
GL_FLOAT_VEC2=_C('GL_FLOAT_VEC2',0x8B50)
GL_FLOAT_VEC3=_C('GL_FLOAT_VEC3',0x8B51)
GL_FLOAT_VEC4=_C('GL_FLOAT_VEC4',0x8B52)
GL_FRAGMENT_SHADER=_C('GL_FRAGMENT_SHADER',0x8B30)
GL_FRAGMENT_SHADER_DERIVATIVE_HINT=_C('GL_FRAGMENT_SHADER_DERIVATIVE_HINT',0x8B8B)
GL_INFO_LOG_LENGTH=_C('GL_INFO_LOG_LENGTH',0x8B84)
GL_INT_VEC2=_C('GL_INT_VEC2',0x8B53)
GL_INT_VEC3=_C('GL_INT_VEC3',0x8B54)
GL_INT_VEC4=_C('GL_INT_VEC4',0x8B55)
GL_LINK_STATUS=_C('GL_LINK_STATUS',0x8B82)
GL_LOWER_LEFT=_C('GL_LOWER_LEFT',0x8CA1)
GL_MAX_COMBINED_TEXTURE_IMAGE_UNITS=_C('GL_MAX_COMBINED_TEXTURE_IMAGE_UNITS',0x8B4D)
GL_MAX_DRAW_BUFFERS=_C('GL_MAX_DRAW_BUFFERS',0x8824)
GL_MAX_FRAGMENT_UNIFORM_COMPONENTS=_C('GL_MAX_FRAGMENT_UNIFORM_COMPONENTS',0x8B49)
GL_MAX_TEXTURE_COORDS=_C('GL_MAX_TEXTURE_COORDS',0x8871)
GL_MAX_TEXTURE_IMAGE_UNITS=_C('GL_MAX_TEXTURE_IMAGE_UNITS',0x8872)
GL_MAX_VARYING_FLOATS=_C('GL_MAX_VARYING_FLOATS',0x8B4B)
GL_MAX_VERTEX_ATTRIBS=_C('GL_MAX_VERTEX_ATTRIBS',0x8869)
GL_MAX_VERTEX_TEXTURE_IMAGE_UNITS=_C('GL_MAX_VERTEX_TEXTURE_IMAGE_UNITS',0x8B4C)
GL_MAX_VERTEX_UNIFORM_COMPONENTS=_C('GL_MAX_VERTEX_UNIFORM_COMPONENTS',0x8B4A)
GL_POINT_SPRITE=_C('GL_POINT_SPRITE',0x8861)
GL_POINT_SPRITE_COORD_ORIGIN=_C('GL_POINT_SPRITE_COORD_ORIGIN',0x8CA0)
GL_SAMPLER_1D=_C('GL_SAMPLER_1D',0x8B5D)
GL_SAMPLER_1D_SHADOW=_C('GL_SAMPLER_1D_SHADOW',0x8B61)
GL_SAMPLER_2D=_C('GL_SAMPLER_2D',0x8B5E)
GL_SAMPLER_2D_SHADOW=_C('GL_SAMPLER_2D_SHADOW',0x8B62)
GL_SAMPLER_3D=_C('GL_SAMPLER_3D',0x8B5F)
GL_SAMPLER_CUBE=_C('GL_SAMPLER_CUBE',0x8B60)
GL_SHADER_SOURCE_LENGTH=_C('GL_SHADER_SOURCE_LENGTH',0x8B88)
GL_SHADER_TYPE=_C('GL_SHADER_TYPE',0x8B4F)
GL_SHADING_LANGUAGE_VERSION=_C('GL_SHADING_LANGUAGE_VERSION',0x8B8C)
GL_STENCIL_BACK_FAIL=_C('GL_STENCIL_BACK_FAIL',0x8801)
GL_STENCIL_BACK_FUNC=_C('GL_STENCIL_BACK_FUNC',0x8800)
GL_STENCIL_BACK_PASS_DEPTH_FAIL=_C('GL_STENCIL_BACK_PASS_DEPTH_FAIL',0x8802)
GL_STENCIL_BACK_PASS_DEPTH_PASS=_C('GL_STENCIL_BACK_PASS_DEPTH_PASS',0x8803)
GL_STENCIL_BACK_REF=_C('GL_STENCIL_BACK_REF',0x8CA3)
GL_STENCIL_BACK_VALUE_MASK=_C('GL_STENCIL_BACK_VALUE_MASK',0x8CA4)
GL_STENCIL_BACK_WRITEMASK=_C('GL_STENCIL_BACK_WRITEMASK',0x8CA5)
GL_UPPER_LEFT=_C('GL_UPPER_LEFT',0x8CA2)
GL_VALIDATE_STATUS=_C('GL_VALIDATE_STATUS',0x8B83)
GL_VERTEX_ATTRIB_ARRAY_ENABLED=_C('GL_VERTEX_ATTRIB_ARRAY_ENABLED',0x8622)
GL_VERTEX_ATTRIB_ARRAY_NORMALIZED=_C('GL_VERTEX_ATTRIB_ARRAY_NORMALIZED',0x886A)
GL_VERTEX_ATTRIB_ARRAY_POINTER=_C('GL_VERTEX_ATTRIB_ARRAY_POINTER',0x8645)
GL_VERTEX_ATTRIB_ARRAY_SIZE=_C('GL_VERTEX_ATTRIB_ARRAY_SIZE',0x8623)
GL_VERTEX_ATTRIB_ARRAY_STRIDE=_C('GL_VERTEX_ATTRIB_ARRAY_STRIDE',0x8624)
GL_VERTEX_ATTRIB_ARRAY_TYPE=_C('GL_VERTEX_ATTRIB_ARRAY_TYPE',0x8625)
GL_VERTEX_PROGRAM_POINT_SIZE=_C('GL_VERTEX_PROGRAM_POINT_SIZE',0x8642)
GL_VERTEX_PROGRAM_TWO_SIDE=_C('GL_VERTEX_PROGRAM_TWO_SIDE',0x8643)
GL_VERTEX_SHADER=_C('GL_VERTEX_SHADER',0x8B31)
@_f
@_p.types(None,_cs.GLuint,_cs.GLuint)
def glAttachShader(program,shader):pass
@_f
@_p.types(None,_cs.GLuint,_cs.GLuint,arrays.GLcharArray)
def glBindAttribLocation(program,index,name):pass
@_f
@_p.types(None,_cs.GLenum,_cs.GLenum)
def glBlendEquationSeparate(modeRGB,modeAlpha):pass
@_f
@_p.types(None,_cs.GLuint)
def glCompileShader(shader):pass
@_f
@_p.types(_cs.GLuint,)
def glCreateProgram():pass
@_f
@_p.types(_cs.GLuint,_cs.GLenum)
def glCreateShader(type):pass
@_f
@_p.types(None,_cs.GLuint)
def glDeleteProgram(program):pass
@_f
@_p.types(None,_cs.GLuint)
def glDeleteShader(shader):pass
@_f
@_p.types(None,_cs.GLuint,_cs.GLuint)
def glDetachShader(program,shader):pass
@_f
@_p.types(None,_cs.GLuint)
def glDisableVertexAttribArray(index):pass
@_f
@_p.types(None,_cs.GLsizei,arrays.GLuintArray)
def glDrawBuffers(n,bufs):pass
@_f
@_p.types(None,_cs.GLuint)
def glEnableVertexAttribArray(index):pass
@_f
@_p.types(None,_cs.GLuint,_cs.GLuint,_cs.GLsizei,arrays.GLsizeiArray,arrays.GLintArray,arrays.GLuintArray,arrays.GLcharArray)
def glGetActiveAttrib(program,index,bufSize,length,size,type,name):pass
@_f
@_p.types(None,_cs.GLuint,_cs.GLuint,_cs.GLsizei,arrays.GLsizeiArray,arrays.GLintArray,arrays.GLuintArray,arrays.GLcharArray)
def glGetActiveUniform(program,index,bufSize,length,size,type,name):pass
@_f
@_p.types(None,_cs.GLuint,_cs.GLsizei,arrays.GLsizeiArray,arrays.GLuintArray)
def glGetAttachedShaders(program,maxCount,count,shaders):pass
@_f
@_p.types(_cs.GLint,_cs.GLuint,arrays.GLcharArray)
def glGetAttribLocation(program,name):pass
@_f
@_p.types(None,_cs.GLuint,_cs.GLsizei,arrays.GLsizeiArray,arrays.GLcharArray)
def glGetProgramInfoLog(program,bufSize,length,infoLog):pass
@_f
@_p.types(None,_cs.GLuint,_cs.GLenum,arrays.GLintArray)
def glGetProgramiv(program,pname,params):pass
@_f
@_p.types(None,_cs.GLuint,_cs.GLsizei,arrays.GLsizeiArray,arrays.GLcharArray)
def glGetShaderInfoLog(shader,bufSize,length,infoLog):pass
@_f
@_p.types(None,_cs.GLuint,_cs.GLsizei,arrays.GLsizeiArray,arrays.GLcharArray)
def glGetShaderSource(shader,bufSize,length,source):pass
@_f
@_p.types(None,_cs.GLuint,_cs.GLenum,arrays.GLintArray)
def glGetShaderiv(shader,pname,params):pass
@_f
@_p.types(_cs.GLint,_cs.GLuint,arrays.GLcharArray)
def glGetUniformLocation(program,name):pass
@_f
@_p.types(None,_cs.GLuint,_cs.GLint,arrays.GLfloatArray)
def glGetUniformfv(program,location,params):pass
@_f
@_p.types(None,_cs.GLuint,_cs.GLint,arrays.GLintArray)
def glGetUniformiv(program,location,params):pass
@_f
@_p.types(None,_cs.GLuint,_cs.GLenum,arrays.GLvoidpArray)
def glGetVertexAttribPointerv(index,pname,pointer):pass
@_f
@_p.types(None,_cs.GLuint,_cs.GLenum,arrays.GLdoubleArray)
def glGetVertexAttribdv(index,pname,params):pass
@_f
@_p.types(None,_cs.GLuint,_cs.GLenum,arrays.GLfloatArray)
def glGetVertexAttribfv(index,pname,params):pass
@_f
@_p.types(None,_cs.GLuint,_cs.GLenum,arrays.GLintArray)
def glGetVertexAttribiv(index,pname,params):pass
@_f
@_p.types(_cs.GLboolean,_cs.GLuint)
def glIsProgram(program):pass
@_f
@_p.types(_cs.GLboolean,_cs.GLuint)
def glIsShader(shader):pass
@_f
@_p.types(None,_cs.GLuint)
def glLinkProgram(program):pass
@_f
@_p.types(None,_cs.GLuint,_cs.GLsizei,ctypes.POINTER( ctypes.POINTER( _cs.GLchar )),arrays.GLintArray)
def glShaderSource(shader,count,string,length):pass
@_f
@_p.types(None,_cs.GLenum,_cs.GLenum,_cs.GLint,_cs.GLuint)
def glStencilFuncSeparate(face,func,ref,mask):pass
@_f
@_p.types(None,_cs.GLenum,_cs.GLuint)
def glStencilMaskSeparate(face,mask):pass
@_f
@_p.types(None,_cs.GLenum,_cs.GLenum,_cs.GLenum,_cs.GLenum)
def glStencilOpSeparate(face,sfail,dpfail,dppass):pass
@_f
@_p.types(None,_cs.GLint,_cs.GLfloat)
def glUniform1f(location,v0):pass
@_f
@_p.types(None,_cs.GLint,_cs.GLsizei,arrays.GLfloatArray)
def glUniform1fv(location,count,value):pass
@_f
@_p.types(None,_cs.GLint,_cs.GLint)
def glUniform1i(location,v0):pass
@_f
@_p.types(None,_cs.GLint,_cs.GLsizei,arrays.GLintArray)
def glUniform1iv(location,count,value):pass
@_f
@_p.types(None,_cs.GLint,_cs.GLfloat,_cs.GLfloat)
def glUniform2f(location,v0,v1):pass
@_f
@_p.types(None,_cs.GLint,_cs.GLsizei,arrays.GLfloatArray)
def glUniform2fv(location,count,value):pass
@_f
@_p.types(None,_cs.GLint,_cs.GLint,_cs.GLint)
def glUniform2i(location,v0,v1):pass
@_f
@_p.types(None,_cs.GLint,_cs.GLsizei,arrays.GLintArray)
def glUniform2iv(location,count,value):pass
@_f
@_p.types(None,_cs.GLint,_cs.GLfloat,_cs.GLfloat,_cs.GLfloat)
def glUniform3f(location,v0,v1,v2):pass
@_f
@_p.types(None,_cs.GLint,_cs.GLsizei,arrays.GLfloatArray)
def glUniform3fv(location,count,value):pass
@_f
@_p.types(None,_cs.GLint,_cs.GLint,_cs.GLint,_cs.GLint)
def glUniform3i(location,v0,v1,v2):pass
@_f
@_p.types(None,_cs.GLint,_cs.GLsizei,arrays.GLintArray)
def glUniform3iv(location,count,value):pass
@_f
@_p.types(None,_cs.GLint,_cs.GLfloat,_cs.GLfloat,_cs.GLfloat,_cs.GLfloat)
def glUniform4f(location,v0,v1,v2,v3):pass
@_f
@_p.types(None,_cs.GLint,_cs.GLsizei,arrays.GLfloatArray)
def glUniform4fv(location,count,value):pass
@_f
@_p.types(None,_cs.GLint,_cs.GLint,_cs.GLint,_cs.GLint,_cs.GLint)
def glUniform4i(location,v0,v1,v2,v3):pass
@_f
@_p.types(None,_cs.GLint,_cs.GLsizei,arrays.GLintArray)
def glUniform4iv(location,count,value):pass
@_f
@_p.types(None,_cs.GLint,_cs.GLsizei,_cs.GLboolean,arrays.GLfloatArray)
def glUniformMatrix2fv(location,count,transpose,value):pass
@_f
@_p.types(None,_cs.GLint,_cs.GLsizei,_cs.GLboolean,arrays.GLfloatArray)
def glUniformMatrix3fv(location,count,transpose,value):pass
@_f
@_p.types(None,_cs.GLint,_cs.GLsizei,_cs.GLboolean,arrays.GLfloatArray)
def glUniformMatrix4fv(location,count,transpose,value):pass
@_f
@_p.types(None,_cs.GLuint)
def glUseProgram(program):pass
@_f
@_p.types(None,_cs.GLuint)
def glValidateProgram(program):pass
@_f
@_p.types(None,_cs.GLuint,_cs.GLdouble)
def glVertexAttrib1d(index,x):pass
@_f
@_p.types(None,_cs.GLuint,arrays.GLdoubleArray)
def glVertexAttrib1dv(index,v):pass
@_f
@_p.types(None,_cs.GLuint,_cs.GLfloat)
def glVertexAttrib1f(index,x):pass
@_f
@_p.types(None,_cs.GLuint,arrays.GLfloatArray)
def glVertexAttrib1fv(index,v):pass
@_f
@_p.types(None,_cs.GLuint,_cs.GLshort)
def glVertexAttrib1s(index,x):pass
@_f
@_p.types(None,_cs.GLuint,arrays.GLshortArray)
def glVertexAttrib1sv(index,v):pass
@_f
@_p.types(None,_cs.GLuint,_cs.GLdouble,_cs.GLdouble)
def glVertexAttrib2d(index,x,y):pass
@_f
@_p.types(None,_cs.GLuint,arrays.GLdoubleArray)
def glVertexAttrib2dv(index,v):pass
@_f
@_p.types(None,_cs.GLuint,_cs.GLfloat,_cs.GLfloat)
def glVertexAttrib2f(index,x,y):pass
@_f
@_p.types(None,_cs.GLuint,arrays.GLfloatArray)
def glVertexAttrib2fv(index,v):pass
@_f
@_p.types(None,_cs.GLuint,_cs.GLshort,_cs.GLshort)
def glVertexAttrib2s(index,x,y):pass
@_f
@_p.types(None,_cs.GLuint,arrays.GLshortArray)
def glVertexAttrib2sv(index,v):pass
@_f
@_p.types(None,_cs.GLuint,_cs.GLdouble,_cs.GLdouble,_cs.GLdouble)
def glVertexAttrib3d(index,x,y,z):pass
@_f
@_p.types(None,_cs.GLuint,arrays.GLdoubleArray)
def glVertexAttrib3dv(index,v):pass
@_f
@_p.types(None,_cs.GLuint,_cs.GLfloat,_cs.GLfloat,_cs.GLfloat)
def glVertexAttrib3f(index,x,y,z):pass
@_f
@_p.types(None,_cs.GLuint,arrays.GLfloatArray)
def glVertexAttrib3fv(index,v):pass
@_f
@_p.types(None,_cs.GLuint,_cs.GLshort,_cs.GLshort,_cs.GLshort)
def glVertexAttrib3s(index,x,y,z):pass
@_f
@_p.types(None,_cs.GLuint,arrays.GLshortArray)
def glVertexAttrib3sv(index,v):pass
@_f
@_p.types(None,_cs.GLuint,arrays.GLbyteArray)
def glVertexAttrib4Nbv(index,v):pass
@_f
@_p.types(None,_cs.GLuint,arrays.GLintArray)
def glVertexAttrib4Niv(index,v):pass
@_f
@_p.types(None,_cs.GLuint,arrays.GLshortArray)
def glVertexAttrib4Nsv(index,v):pass
@_f
@_p.types(None,_cs.GLuint,_cs.GLubyte,_cs.GLubyte,_cs.GLubyte,_cs.GLubyte)
def glVertexAttrib4Nub(index,x,y,z,w):pass
@_f
@_p.types(None,_cs.GLuint,arrays.GLubyteArray)
def glVertexAttrib4Nubv(index,v):pass
@_f
@_p.types(None,_cs.GLuint,arrays.GLuintArray)
def glVertexAttrib4Nuiv(index,v):pass
@_f
@_p.types(None,_cs.GLuint,arrays.GLushortArray)
def glVertexAttrib4Nusv(index,v):pass
@_f
@_p.types(None,_cs.GLuint,arrays.GLbyteArray)
def glVertexAttrib4bv(index,v):pass
@_f
@_p.types(None,_cs.GLuint,_cs.GLdouble,_cs.GLdouble,_cs.GLdouble,_cs.GLdouble)
def glVertexAttrib4d(index,x,y,z,w):pass
@_f
@_p.types(None,_cs.GLuint,arrays.GLdoubleArray)
def glVertexAttrib4dv(index,v):pass
@_f
@_p.types(None,_cs.GLuint,_cs.GLfloat,_cs.GLfloat,_cs.GLfloat,_cs.GLfloat)
def glVertexAttrib4f(index,x,y,z,w):pass
@_f
@_p.types(None,_cs.GLuint,arrays.GLfloatArray)
def glVertexAttrib4fv(index,v):pass
@_f
@_p.types(None,_cs.GLuint,arrays.GLintArray)
def glVertexAttrib4iv(index,v):pass
@_f
@_p.types(None,_cs.GLuint,_cs.GLshort,_cs.GLshort,_cs.GLshort,_cs.GLshort)
def glVertexAttrib4s(index,x,y,z,w):pass
@_f
@_p.types(None,_cs.GLuint,arrays.GLshortArray)
def glVertexAttrib4sv(index,v):pass
@_f
@_p.types(None,_cs.GLuint,arrays.GLubyteArray)
def glVertexAttrib4ubv(index,v):pass
@_f
@_p.types(None,_cs.GLuint,arrays.GLuintArray)
def glVertexAttrib4uiv(index,v):pass
@_f
@_p.types(None,_cs.GLuint,arrays.GLushortArray)
def glVertexAttrib4usv(index,v):pass
@_f
@_p.types(None,_cs.GLuint,_cs.GLint,_cs.GLenum,_cs.GLboolean,_cs.GLsizei,ctypes.c_void_p)
def glVertexAttribPointer(index,size,type,normalized,stride,pointer):pass
| [
"[email protected]"
] | |
55405e3d6382798737ab5eaecff2a1af521ff606 | b22588340d7925b614a735bbbde1b351ad657ffc | /athena/MuonSpectrometer/MuonCablings/MuonCablingServers/python/__init__.py | 46ac7323b0e0050a72f16f3a5a65d27ae41069ce | [] | no_license | rushioda/PIXELVALID_athena | 90befe12042c1249cbb3655dde1428bb9b9a42ce | 22df23187ef85e9c3120122c8375ea0e7d8ea440 | refs/heads/master | 2020-12-14T22:01:15.365949 | 2020-01-19T03:59:35 | 2020-01-19T03:59:35 | 234,836,993 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 234 | py | # Copyright (C) 2002-2017 CERN for the benefit of the ATLAS collaboration
# File: MuonCablingServersConfig/__init__.py
__version__ = '1.0.0'
__author__ = '[email protected]'
__all__ = [ 'MuonCablingServersConfig' ]
| [
"[email protected]"
] | |
51df6f78e9135e31af7333b8fb60f766d0b4e202 | 389569a591284a2adcdc38046114e7b1038afd94 | /polygon/polygon/main_test1.py | e577151b68aaee98d00b80a0414e0977b53d0787 | [] | no_license | xytysingle/AnnotationTool | b797daf2fd472f602341b16f24fb1ed9b702aef1 | a217d4376ceee739e0d8c43515c403133982e86e | refs/heads/master | 2020-04-11T18:16:10.438919 | 2019-07-31T10:21:18 | 2019-07-31T10:21:18 | 161,992,153 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,227 | py | # import re
#
# s = '1中文中文:123456aa哈哈哈bbcc'
# print(re.match(u"[\u4e00-\u9fa5]+", s) )
# # None. 只从字符串的开始匹配,没有匹配上返回None,否则返回matchobject
#
# pat = '中文'
# print(re.search(pat, s).group())
# # matchobject. 对整个字符串进行匹配,,没有匹配上返回None,否则返回matchobject
#
# newpat = '这里是中文内容'
# news = re.sub(pat, newpat, s) # 正则部分替换,将s中的所有符合pat的全部替换为newpat,newpat也可以是函数
# print(news)
#
#
#
# def newpat_func(matched):
# return "这里是" + matched.group() + u"内容"
#
#
# print(re.sub(pat, newpat_func, s))
# for i,bbox in enumerate
#
# from tkinter import *
# import threading, time
# trace = 0
# class CanvasEventsDemo:
# def __init__(self, parent=None):
# canvas = Canvas(width=300, height=300, bg='beige')
# canvas.pack()
# canvas.bind('<ButtonPress-1>', self.onStart) # click
# canvas.bind('<B1-Motion>', self.onGrow) # and drag
# canvas.bind('<Double-1>', self.onClear) # delete all
# canvas.bind('<ButtonPress-3>', self.onMove) # move latest
# self.canvas = canvas
# self.drawn = None
# self.kinds = [canvas.create_oval, canvas.create_rectangle]
# def onStart(self, event):
# self.shape = self.kinds[0]
# self.kinds = self.kinds[1:] + self.kinds[:1] # start dragout
# self.start = event
# self.drawn = None
# def onGrow(self, event): # delete and redraw
# canvas = event.widget
# if self.drawn: canvas.delete(self.drawn)
# objectId = self.shape(self.start.x, self.start.y, event.x, event.y)
# if trace: print(objectId)
# self.drawn = objectId
# def onClear(self, event):
# event.widget.delete('all') # use tag all
# def onMove(self, event):
# if self.drawn: # move to click spot
# if trace: print(self.drawn)
# canvas = event.widget
# diffX, diffY = (event.x - self.start.x), (event.y - self.start.y)
# canvas.move(self.drawn, diffX, diffY)
# self.start = event
# class CanvasEventsDemoTags(CanvasEventsDemo):
# def __init__(self, parent=None):
# CanvasEventsDemo.__init__(self, parent)
# self.canvas.create_text(100, 8, text='Press o and r to move shapes')
# self.canvas.master.bind('<KeyPress-o>', self.onMoveOvals)
# self.canvas.master.bind('<KeyPress-r>', self.onMoveRectangles)
# self.kinds = self.create_oval_tagged, self.create_rectangle_tagged
# def create_oval_tagged(self, x1, y1, x2, y2):
# objectId = self.canvas.create_oval(x1, y1, x2, y2)
# self.canvas.itemconfig(objectId, tag='ovals', fill='blue')
# return objectId
# def create_rectangle_tagged(self, x1, y1, x2, y2):
# objectId = self.canvas.create_rectangle(x1, y1, x2, y2)
# self.canvas.itemconfig(objectId, tag='rectangles', fill='red')
# return objectId
# def onMoveOvals(self, event):
# print('moving ovals')
# self.moveInSquares(tag='ovals') # move all tagged ovals
# def onMoveRectangles(self, event):
# print('moving rectangles')
# self.moveInSquares(tag='rectangles')
# def moveInSquares(self, tag): # 5 reps of 4 times per sec
# for i in range(5):
# for (diffx, diffy) in [(+20, 0), (0, +20), (-20, 0), (0, -20)]:
# self.canvas.move(tag, diffx, diffy)
# self.canvas.update() # force screen redraw/update
# time.sleep(0.25) # pause, but don't block gui
# class CanvasEventsDemoThread(CanvasEventsDemoTags):
# def moveEm(self, tag):
# for i in range(5):
# for (diffx, diffy) in [(+20, 0), (0, +20), (-20, 0), (0, -20)]:
# self.canvas.move(tag, diffx, diffy)
# time.sleep(0.25) # pause this thread only
# def moveInSquares(self, tag):
# threading.Thread(self.moveEm, (tag,)).start()
# if __name__ == '__main__':
# CanvasEventsDemoThread()
# mainloop()
#python tkinter menu
from tkinter import *
# some vocabulary to keep from getting confused. This terminology
# is something I cooked up for this file, but follows the man pages
# pretty closely
#
#
#
# This is a MENUBUTTON
# V
# +-------------+
# | |
#
# +------------++------------++------------+
# | || || |
# | File || Edit || Options | <-------- the MENUBAR
# | || || |
# +------------++------------++------------+
# | New... |
# | Open... |
# | Print |
# | | <------ This is a MENU. The lines of text in the menu are
# | | MENU ENTRIES
# | +---------------+
# | Open Files > | file1 |
# | | file2 |
# | | another file | <------ this cascading part is also a MENU
# +----------------| |
# | |
# | |
# | |
# +---------------+
__author__ = {'name' : 'Hongten',
'Email' : '[email protected]',
'Blog' : 'http://www.cnblogs.com/hongten',
'QQ' : '648719819',
'Created' : '2013-09-10'}
# _*_ coding:utf-8 _*_
# from tkinter import *
# tk = Tk()
# canvas = Canvas(width=500,height=500)
# canvas.pack()
#
#
# #canvas.create_polygon(0,0,250,250,fill = 'red')
#
# def echo_event(evt):
# #打印键盘事件
# if evt.type == "2":
# print("键盘:%s" % evt.keysym)
# #打印鼠标操作
# if evt.type == "4":
# print("鼠标: %s" % evt.num)
# #
# print(evt.type)
#
# #键盘事件
# # canvas.bind_all("<KeyPress>",echo_event)
# #如果绑定指定的键盘,则"<Key>" 或者"<KeyPress>"都可以,具体到指定键的话后面加入下划线和指定的键就好了,如:绑定小写字母t和Left键
# canvas.bind("<KeyPress-t>",echo_event)
# canvas.bind_all("<KeyPress-Left>",echo_event)
# #鼠标事件
# canvas.bind_all("<Double-Button-1>",echo_event)
# canvas.bind_all("<Button-1>",echo_event)
# canvas.bind_all("<Button-2>",echo_event)
# canvas.bind_all("<Button-3>",echo_event)
# if __name__ == '__main__':
# mainloop()
# from tkinter import *
#
#
# def call_back(event):
# print(event.keysym)
#
#
# def main():
# root_login = Tk()
#
# # 创建一个框架,在这个框架中响应事件
# frame = Frame(root_login,
# width=200, height=200,
# background='green')
#
# # 这样就不用查看 键盘特殊按键的keysym表了。
# # 试一下就知道了
# frame.bind("<KeyPress>", call_back)
# frame.pack()
#
# # 当前框架被选中,意思是键盘触发,只对这个框架有效
# frame.focus_set()
#
# mainloop()
#
#
# if __name__ == '__main__':
# main()
from tkinter import *
class make_list(Listbox):
def __init__(self,master, **kw):
self.canvas=Canvas(master,width=500,height=600,bg='green')
self.canvas.pack()
self.canvas.create_rectangle(0,50,100,100,dash=' ')
if __name__ == '__main__':
tk = Tk()
make_list(tk)
tk.mainloop()
| [
"[email protected]"
] | |
45baff1dc3b4200e7558995235b4bbe599e0dcc1 | ae76cc0d5432573bd263ad2c0539ac341a406719 | /SniffSpoof/scapy-examples/3.py | 66368a3d8799893f2832b375a98274e366b1ed26 | [] | no_license | Samyak2/SEEDLabs-Examples | aa250405c77fed5fda8f3a22dee3538efbf74dbf | 028ba10f99f7dbe1a50ccbdcbe1a45f6340b0898 | refs/heads/master | 2021-01-26T03:44:56.272708 | 2020-02-26T15:29:08 | 2020-02-26T15:29:08 | 243,296,486 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 120 | py | #!/usr/bin/python3
from scapy.all import *
def print_pkt(pkt):
pkt.show()
pkt = sniff(filter='icmp', prn=print_pkt)
| [
"[email protected]"
] | |
8389f0e2b24980237a4f8e788106d6c081298b48 | 06957bcc77891d2a6cca5661625f9571918e47ae | /python/400.Nth Digit.py | 223b008c80d6116e012cac1ab8c0fc25cdfd95bf | [] | no_license | Leputa/Leetcode | c869af806dadff5c2c456a67ca61bf91fb54284f | 1f0a2a3150cee119bbe28f0061f467403ceddbee | refs/heads/master | 2021-06-03T11:47:56.992171 | 2020-07-01T09:22:23 | 2020-07-01T09:22:23 | 106,419,034 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 119 | py | class Solution(object):
def findNthDigit(self, n):
"""
:type n: int
:rtype: int
""" | [
"[email protected]"
] | |
3d0f4e92f82aeca0ee2486764345fa694bfe6656 | 8fd28b248511f42ad8732ca1e574aada33908376 | /configs/recognition/slowonly/slowonly_r50_8x8x1_256e_kinetics400_rgb.py | 02c50db20d8fa02519d4fe1ad7364d0967dfce15 | [
"Apache-2.0"
] | permissive | vt-vl-lab/video-data-aug | 28bd175535cab1444055502389c8f5d7d75e4bd2 | 01667cdbd1b952f2510af3422beeeb76e0d9e15a | refs/heads/main | 2023-09-01T02:36:40.034893 | 2021-07-21T01:31:42 | 2021-07-21T01:31:42 | 352,920,339 | 29 | 6 | Apache-2.0 | 2021-07-21T01:29:36 | 2021-03-30T08:06:54 | Jupyter Notebook | UTF-8 | Python | false | false | 3,720 | py | model = dict(
type='Recognizer3D',
backbone=dict(
type='ResNet3dSlowOnly',
depth=50,
pretrained=None,
lateral=False,
conv1_kernel=(1, 7, 7),
conv1_stride_t=1,
pool1_stride_t=1,
inflate=(0, 0, 1, 1),
norm_eval=False),
cls_head=dict(
type='I3DHead',
in_channels=2048,
num_classes=400,
spatial_type='avg',
dropout_ratio=0.5))
train_cfg = None
test_cfg = dict(average_clips=None)
dataset_type = 'RawframeDataset'
data_root = 'data/kinetics400/rawframes_train'
data_root_val = 'data/kinetics400/rawframes_val'
ann_file_train = 'data/kinetics400/kinetics400_train_list_rawframes.txt'
ann_file_val = 'data/kinetics400/kinetics400_val_list_rawframes.txt'
ann_file_test = 'data/kinetics400/kinetics400_val_list_rawframes.txt'
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False)
train_pipeline = [
dict(type='SampleFrames', clip_len=8, frame_interval=8, num_clips=1),
dict(type='RawFrameDecode'),
dict(type='Resize', scale=(-1, 256)),
dict(type='RandomResizedCrop'),
dict(type='Resize', scale=(224, 224), keep_ratio=False),
dict(type='Flip', flip_ratio=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(type='FormatShape', input_format='NCTHW'),
dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]),
dict(type='ToTensor', keys=['imgs', 'label'])
]
val_pipeline = [
dict(
type='SampleFrames',
clip_len=8,
frame_interval=8,
num_clips=1,
test_mode=True),
dict(type='RawFrameDecode'),
dict(type='Resize', scale=(-1, 256)),
dict(type='CenterCrop', crop_size=224),
dict(type='Flip', flip_ratio=0),
dict(type='Normalize', **img_norm_cfg),
dict(type='FormatShape', input_format='NCTHW'),
dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]),
dict(type='ToTensor', keys=['imgs'])
]
test_pipeline = [
dict(
type='SampleFrames',
clip_len=8,
frame_interval=8,
num_clips=10,
test_mode=True),
dict(type='RawFrameDecode'),
dict(type='Resize', scale=(-1, 256)),
dict(type='ThreeCrop', crop_size=256),
dict(type='Flip', flip_ratio=0),
dict(type='Normalize', **img_norm_cfg),
dict(type='FormatShape', input_format='NCTHW'),
dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]),
dict(type='ToTensor', keys=['imgs'])
]
data = dict(
videos_per_gpu=8,
workers_per_gpu=4,
train=dict(
type=dataset_type,
ann_file=ann_file_train,
data_prefix=data_root,
pipeline=train_pipeline),
val=dict(
type=dataset_type,
ann_file=ann_file_val,
data_prefix=data_root_val,
pipeline=val_pipeline),
test=dict(
type=dataset_type,
ann_file=ann_file_test,
data_prefix=data_root_val,
pipeline=test_pipeline))
# optimizer
optimizer = dict(
type='SGD', lr=0.1, momentum=0.9,
weight_decay=0.0001) # this lr is used for 8 gpus
optimizer_config = dict(grad_clip=dict(max_norm=40, norm_type=2))
# learning policy
lr_config = dict(policy='CosineAnnealing', min_lr=0)
total_epochs = 256
checkpoint_config = dict(interval=4)
workflow = [('train', 1)]
evaluation = dict(
interval=5, metrics=['top_k_accuracy', 'mean_class_accuracy'], topk=(1, 5))
log_config = dict(
interval=20,
hooks=[
dict(type='TextLoggerHook'),
# dict(type='TensorboardLoggerHook'),
])
dist_params = dict(backend='nccl')
log_level = 'INFO'
work_dir = './work_dirs/slowonly_r50_8x8x1_256e_kinetics400_rgb'
load_from = None
resume_from = None
find_unused_parameters = False
| [
"[email protected]"
] | |
d07b9ec027e3387ad373a6fcb4dc243fa3964750 | 3f763cf893b09a3be562858613c928703ff349e4 | /client/verta/verta/_swagger/_public/modeldb/metadata/model/MetadataGetLabelsRequestResponse.py | 18ffa3ba5b5186c19c9a7ab2962468b46c703524 | [
"Apache-2.0"
] | permissive | VertaAI/modeldb | 636e46fc025b01a514d599b10e228c8735503357 | ec9ac7712500adb13fd815dfd476ce9f536c6921 | refs/heads/main | 2023-08-31T00:45:37.220628 | 2023-08-30T18:45:13 | 2023-08-30T18:45:13 | 71,305,435 | 844 | 142 | Apache-2.0 | 2023-09-14T19:24:13 | 2016-10-19T01:07:26 | Java | UTF-8 | Python | false | false | 566 | py | # THIS FILE IS AUTO-GENERATED. DO NOT EDIT
from verta._swagger.base_type import BaseType
class MetadataGetLabelsRequestResponse(BaseType):
def __init__(self, labels=None):
required = {
"labels": False,
}
self.labels = labels
for k, v in required.items():
if self[k] is None and v:
raise ValueError('attribute {} is required'.format(k))
@staticmethod
def from_json(d):
tmp = d.get('labels', None)
if tmp is not None:
d['labels'] = [tmp for tmp in tmp]
return MetadataGetLabelsRequestResponse(**d)
| [
"[email protected]"
] | |
de67c007364dfb0b71dd50d487c78eea39e615d6 | cfbbe1303ed4a2feaf7e0023e62aa910b7eee733 | /doc/conf.py | a4b1bca880898a236fea88f11043c0d203a934b7 | [
"BSD-3-Clause"
] | permissive | 717524640/fatiando | 8fa4fef8920770735d1a0d655259e87bc9382001 | bf09661c40423bec85e47f15a14b786f25b7e873 | refs/heads/master | 2021-01-20T21:29:26.742259 | 2015-03-18T19:15:45 | 2015-03-18T19:15:45 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,807 | py | # -*- coding: utf-8 -*-
import sys
import os
import datetime
import sphinx_bootstrap_theme
# Sphinx needs to be able to import fatiando to use autodoc
sys.path.append(os.path.pardir)
# and the cookbook.py module to build the cookbook
sys.path.append(os.path.split(os.path.abspath(__file__))[0])
from fatiando import __version__, __commit__
import cookbook
# Build the cookbook recipes
cookbook.build(os.path.join(os.pardir, 'cookbook'))
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.coverage',
'sphinx.ext.mathjax',
'sphinx.ext.doctest',
'sphinx.ext.viewcode',
'sphinx.ext.extlinks',
'matplotlib.sphinxext.plot_directive',
]
# Sphinx project configuration
templates_path = ['_templates']
exclude_patterns = ['_build']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
master_doc = 'index'
# General information about the project
year = datetime.date.today().year
project = u'Fatiando a Terra'
copyright = u'2010-{:d}, Leonardo Uieda'.format(year)
if len(__version__.split('-')) > 1:
version = '-'.join([__version__.split('-')[0], 'dev'])
else:
version = __version__
# I'll use the release to place the commit hash at the footer of the site
release = __commit__.split('-')[0] # Get rid of -dirty
doi = '10.6084/m9.figshare.1115194'
# These enable substitutions using |variable| in the rst files
rst_epilog = """
.. |doi| replace:: {doi}
.. |doilink| replace:: doi:`{doi} <http://dx.doi.org/{doi}>`__
.. |year| replace:: {year}
""".format(doi=doi, year=year)
html_last_updated_fmt = '%b %d, %Y'
html_title = 'Fatiando a Terra'
html_short_title = 'Fatiando a Terra'
html_logo = '_static/fatiando-logo-noborder.png'
html_favicon = u'favicon.ico'
html_static_path = ['_static']
html_extra_path = ['.nojekyll', 'CNAME']
html_use_smartypants = True
pygments_style = 'sphinx'
add_function_parentheses = False
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'FatiandoATerraDoc'
# Theme config
html_theme = 'bootstrap'
html_theme_path = sphinx_bootstrap_theme.get_html_theme_path()
html_theme_options = {
'bootswatch_theme': "flatly",
'navbar_title': 'fatiando',
'navbar_site_name': "Site",
'navbar_links': [
("Cite us", "cite"),
("Install", "install"),
("Docs", "docs"),
('<i class="fa fa-github-square fa-lg" title="Source code on Github"></i>',
"https://github.com/fatiando/fatiando", True),
('<i class="fa fa-envelope fa-lg" title="Mailing list"></i>',
"https://groups.google.com/d/forum/fatiando", True),
],
# Render the next and previous page links in navbar. (Default: true)
'navbar_sidebarrel': False,
# Render the current pages TOC in the navbar. (Default: true)
'navbar_pagenav': True,
# Tab name for the current pages TOC. (Default: "Page")
'navbar_pagenav_name': "Page",
# Global TOC depth for "site" navbar tab. (Default: 1)
# Switching to -1 shows all levels.
'globaltoc_depth': 1,
# Include hidden TOCs in Site navbar?
# Note: If this is "false", you cannot have mixed ``:hidden:`` and
# non-hidden ``toctree`` directives in the same page, or else the build
# will break.
# Values: "true" (default) or "false"
'globaltoc_includehidden': "true",
# HTML navbar class (Default: "navbar") to attach to <div> element.
# For black navbar, do "navbar navbar-inverse"
'navbar_class': "navbar navbar-default",
# Fix navigation bar to top of page?
# Values: "true" (default) or "false"
'navbar_fixed_top': "false",
# Location of link to source.
# Options are "nav" (default), "footer" or anything else to exclude.
'source_link_position': "footer",
'bootstrap_version': "3",
}
| [
"[email protected]"
] | |
7d1953fb1da45033d63049d7b07cc49f0cbb273e | f07a42f652f46106dee4749277d41c302e2b7406 | /Data Set/bug-fixing-5/e93cdecef17fe3027247389bba5934607c63372e-<run>-fix.py | 358973030efe43d7b9bb143ecbcf224ff7f0da99 | [] | no_license | wsgan001/PyFPattern | e0fe06341cc5d51b3ad0fe29b84098d140ed54d1 | cc347e32745f99c0cd95e79a18ddacc4574d7faa | refs/heads/main | 2023-08-25T23:48:26.112133 | 2021-10-23T14:11:22 | 2021-10-23T14:11:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,366 | py | def run(self, tmp=None, task_vars=None):
if (self._play_context.connection != 'local'):
return dict(failed=True, msg=('invalid connection specified, expected connection=local, got %s' % self._play_context.connection))
module = module_loader._load_module_source(self._task.action, module_loader.find_plugin(self._task.action))
if (not getattr(module, 'USE_PERSISTENT_CONNECTION', False)):
return super(ActionModule, self).run(tmp, task_vars)
provider = self.load_provider()
pc = copy.deepcopy(self._play_context)
pc.network_os = 'junos'
pc.remote_addr = (provider['host'] or self._play_context.remote_addr)
if (self._task.action == 'junos_netconf'):
pc.connection = 'network_cli'
pc.port = (provider['port'] or self._play_context.port or 22)
else:
pc.connection = 'netconf'
pc.port = (provider['port'] or self._play_context.port or 830)
pc.remote_user = (provider['username'] or self._play_context.connection_user)
pc.password = (provider['password'] or self._play_context.password)
pc.private_key_file = (provider['ssh_keyfile'] or self._play_context.private_key_file)
pc.timeout = (provider['timeout'] or self._play_context.timeout)
display.vvv(('using connection plugin %s' % pc.connection), pc.remote_addr)
connection = self._shared_loader_obj.connection_loader.get('persistent', pc, sys.stdin)
socket_path = self._get_socket_path(pc)
display.vvvv(('socket_path: %s' % socket_path), pc.remote_addr)
if (not os.path.exists(socket_path)):
if (pc.connection == 'netconf'):
(rc, out, err) = connection.exec_command('open_session()')
else:
(rc, out, err) = connection.exec_command('open_shell()')
if (rc != 0):
return {
'failed': True,
'msg': 'unable to connect to control socket',
}
elif (pc.connection == 'network_cli'):
(rc, out, err) = connection.exec_command('prompt()')
while str(out).strip().endswith(')#'):
display.vvvv('wrong context, sending exit to device', self._play_context.remote_addr)
connection.exec_command('exit')
(rc, out, err) = connection.exec_command('prompt()')
task_vars['ansible_socket'] = socket_path
return super(ActionModule, self).run(tmp, task_vars) | [
"[email protected]"
] | |
408df962b97cfeeb22d6bd8877223f600cbcaf2e | 3ee982b28adec7154777a9962dacae5c17fbebe0 | /data3/hadoop-2/PageRank1/PageRank.py | 86f0a067ab8c03d973c7f59125f0f7d6d44aa98d | [] | no_license | nycdatasci/aetna | 406a5194b0ffff6b78c2ce1d34c2b090c3d82840 | 095c476210ebe4fef0a702a6a0a56981fe91c8ff | refs/heads/master | 2020-04-24T05:40:48.464871 | 2019-03-15T15:06:42 | 2019-03-15T15:06:42 | 171,737,873 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 875 | py | #!/usr/bin/env python
import os
from sys import argv
script, input_file, iters = argv
iters = int(iters)
streaming = '''hadoop jar $HADOOP_HOME/share/hadoop/tools/lib/hadoop-streaming-*.jar \
-files PageRankMap.py,PageRankReduce.py \
-input %s \
-output PageRankOutput \
-mapper PageRankMap.py \
-reducer PageRankReduce.py
''' % input_file
get_pop = 'hadoop fs -get PageRankOutput/part-00000'
rm_output = 'hadoop fs -rm -R PageRankOutput'
update_pop_local = 'mv part-00000 pop_table'
rm_pop_table = 'hadoop fs -rm pop_table'
update_pop_hdfs = 'hadoop fs -put pop_table'
os.system("hadoop fs -put %s" % input_file)
for i in range(iters):
os.system(streaming)
os.system(get_pop)
os.system(rm_output)
os.system(update_pop_local)
os.system(rm_pop_table)
os.system(update_pop_hdfs)
print("%d th iteration:" % (i+1))
os.system("cat pop_table")
| [
"[email protected]"
] | |
7c79dc9fc7a73da8dbaa46e617aa02e1400a73a7 | ae8f61a8c0c4a569f00529c3f07c73dbfc884f71 | /tiled/server/models.py | f46b8b543646104f2e61cef8595a3bcd5815ac21 | [
"BSD-3-Clause"
] | permissive | untzag/tiled | 1ba705303193312711d8ac75b977a26d6d9e7571 | 43a8ba82660ce3be077f2b6b060bdd2a23cf956b | refs/heads/main | 2023-04-18T18:34:13.545139 | 2021-04-28T21:27:59 | 2021-04-28T21:27:59 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,385 | py | import enum
import pydantic
import pydantic.dataclasses
import pydantic.generics
from typing import Any, Dict, Generic, List, Optional, TypeVar
DataT = TypeVar("DataT")
class Error(pydantic.BaseModel):
code: int
message: str
class Response(pydantic.generics.GenericModel, Generic[DataT]):
data: Optional[DataT]
error: Optional[Error]
meta: Optional[dict]
links: Optional[dict]
@pydantic.validator("error", always=True)
def check_consistency(cls, v, values):
if v is not None and values["data"] is not None:
raise ValueError("must not provide both data and error")
if v is None and values.get("data") is None:
raise ValueError("must provide data or error")
return v
class EntryType(str, enum.Enum):
catalog = "catalog"
reader = "reader"
class EntryFields(str, enum.Enum):
metadata = "metadata"
structure_family = "structure_family"
microstructure = "structure.micro"
macrostructure = "structure.macro"
count = "count"
client_type_hint = "client_type_hint"
none = ""
class CatalogAttributes(pydantic.BaseModel):
metadata: Optional[dict] # free-form, user-specified dict
count: Optional[int]
client_type_hint: Optional[str]
class StructureFamilies(str, enum.Enum):
array = "array"
dataframe = "dataframe"
variable = "variable"
data_array = "data_array"
dataset = "dataset"
class ReaderAttributes(pydantic.BaseModel):
metadata: Optional[dict] # free-form, user-specified dict
structure_family: Optional[StructureFamilies]
structure: Optional[Any] # TODO Figure out how to deal with dataclasses in FastAPI
class Resource(pydantic.BaseModel):
"A JSON API Resource"
id: str
type: EntryType
meta: Optional[dict]
class CatalogResource(Resource):
"Representation of a Catalog as a JSON API Resource"
attributes: CatalogAttributes
class ReaderResource(Resource):
"Representation of a Reader as a JSON API Resource"
attributes: ReaderAttributes
class Token(pydantic.BaseModel):
access_token: str
token_type: str
class TokenData(pydantic.BaseModel):
username: Optional[str] = None
class About(pydantic.BaseModel):
api_version: int
library_version: str
formats: Dict[str, List[str]]
aliases: Dict[str, Dict[str, List[str]]]
queries: List[str]
| [
"[email protected]"
] | |
e99dd85e1f058f54beede6d60e8a291aa7cccccd | e38f7b5d46fd8a65c15e49488fc075e5c62943c9 | /pychron/processing/vcs_data/vcs_manager.py | 56048e37f366b487a7135bf49ed42f78aeb4bc64 | [] | no_license | INGPAN/pychron | 3e13f9d15667e62c347f5b40af366096ee41c051 | 8592f9fc722f037a61b0b783d587633e22f11f2f | refs/heads/master | 2021-08-15T00:50:21.392117 | 2015-01-19T20:07:41 | 2015-01-19T20:07:41 | 111,054,121 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,622 | py | # #===============================================================================
# # Copyright 2013 Jake Ross
# #
# # Licensed under the Apache License, Version 2.0 (the "License");
# # you may not use this file except in compliance with the License.
# # You may obtain a copy of the License at
# #
# # http://www.apache.org/licenses/LICENSE-2.0
# #
# # Unless required by applicable law or agreed to in writing, software
# # distributed under the License is distributed on an "AS IS" BASIS,
# # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# # See the License for the specific language governing permissions and
# # limitations under the License.
# #===============================================================================
#
# #============= enthought library imports =======================
# from itertools import groupby
# import os
# import subprocess
# from git.exc import GitCommandError
# import paramiko
# from traits.api import Instance, Str
# #============= standard library imports ========================
# #============= local library imports ==========================
# from uncertainties import std_dev, nominal_value
# import yaml
#
# from pychron.loggable import Loggable
# from pychron.paths import r_mkdir, paths
# from pychron.processing.vcs_data.diff import Diff
# from pychron.processing.vcs_data.repo_manager import RepoManager
#
#
# class VCSManager(Loggable):
# """
# manage access to data sourced in git repos
# create local and remote repositories
# """
# #root location of all repositories
# root = Str
# remote_template=Str('file:///Users/ross/Sandbox/git/{}.git')
#
# class IsotopeVCSManager(VCSManager):
# """
# add analyses to local repository
# create a line-oriented file for each analysis
# repo organization
# -project
# -.git
# -sample
# -labnumber
# -<record_id>.yaml
#
# track changes
# push changes to remote repo
#
# """
# repo_manager = Instance(RepoManager, ())
#
# def is_dirty(self):
# rm=self.repo_manager
# return rm.is_dirty()
#
# def get_untracked(self):
# rm=self.repo_manager
# return rm.get_untracked()
#
# def get_diffs(self):
# rm = self.repo_manager
#
# ds=[]
#
# diffs, patches=rm.get_local_changes()
# for di, p in zip(diffs, patches):
# ds.append(Diff(name=os.path.basename(di.a_blob.path),
# path=di.a_blob.path,
# patch=p,
# use=True))
#
# return ds
#
# def set_repo(self, name):
# name=name.replace(' ','_')
# p = os.path.join(paths.vcs_dir, name)
#
# #make or use existing repo
# self.init_repo(p)
#
# #add readme if none exists
# self.add_readme(p)
#
# def add_readme(self, p):
# p = os.path.join(p, 'README')
# if not os.path.isfile(p):
# with open(p, 'w') as fp:
# fp.write('README for PROJECT <{}>\n\n\n'
# '**file created by Pychron\'s VCSManager'.format(os.path.basename(p)))
#
# self.repo_manager.add(p, msg='init commit')
#
# def init_repo(self, path):
# """
# return if repositories already existed
# """
# rm = self.repo_manager
# return rm.add_repo(path)
#
# def create_remote(self, *args, **kw):
# """
# add remote url alias
# """
# rm=self.repo_manager
# rm.create_remote(*args,**kw)
#
# # def remote_repo_exists(self, path, host='localhost'):
# # if host == 'localhost':
# # return os.path.isdir(path)
# # else:
# # client = paramiko.SSHClient()
# # # client.connect(host, username=user, password=pwd)
# # stdin, stdout, stderr = client.exec_command('cd {}'.format(path))
# # return not 'No such file or directory' in stdout.readall()
#
# def create_remote_repo(self, name, host='localhost'):
# """
# create a bare repo on the server
# """
# path=self.remote_template.format(name)[7:]
# print path, host
# if host=='localhost':
#
# if not os.path.isdir(path):
# os.mkdir(path)
# subprocess.call(['git','--bare', 'init',path])
# else:
#
# client = paramiko.SSHClient()
# # client.connect(host, username=user, password=pwd)
#
# stdin, stdout, stderr=client.exec_command('mkdir {}'.format(path))
# if not 'File exists' in stdout.readall():
# client.exec_command('git --bare init {}'.format(path))
#
# def add(self, p, **kw):
# rm = self.repo_manager
# rm.add(p, **kw)
#
# def push(self, **kw):
# self.debug('pushing')
# rm=self.repo_manager
# rm.push(**kw)
#
# def pull(self, **kw):
# rm = self.repo_manager
# rm.pull(**kw)
#
# def commit(self, msg):
# rm = self.repo_manager
# rm.commit(msg)
#
# #Isotope protocol
# def clone_project_repos(self, rs):
# for ri in rs:
# ri=ri.replace(' ','_')
# p=os.path.join(paths.vcs_dir, ri)
# if not self.init_repo(p):
# self.debug('Cloning repository {}'.format(ri))
#
# url=self.remote_template.format(ri)
# self.create_remote(url)
#
# self.add_readme(p)
# try:
# self.pull(handled=False)
# except GitCommandError:
# p=os.path.basename(p)
# self.create_remote_repo(p)
#
# self.push()
#
# self.pull()
#
# def update_analyses(self, ans, msg):
# for proj, ais in self._groupby_project(ans):
#
# self.set_repo(proj)
#
# ais=list(ais)
# for ai in ais:
# self._update_analysis(ai)
#
# s=ans[0]
# e=ans[-1]
# self.commit('{} project={} {}({}) - {}({})'.format(msg, proj, s.record_id, s.sample, e.record_id, e.sample))
#
# def update_analysis(self, an, msg):
# self._update_analysis(an)
# self.commit(msg)
#
# def _update_analysis(self,an):
# root = self.repo_manager.root
# p = os.path.join(root, an.sample, an.labnumber)
# p = os.path.join(p, '{}.yaml'.format(an.record_id))
# d = self._generate_analysis_dict(an)
# with open(p, 'w') as fp:
# yaml.dump(d, fp, indent=4, default_flow_style=False)
#
# self.repo_manager.add(p, commit=False)
#
# def _groupby_project(self, ans):
# key = lambda x: x.project
# ans = sorted(ans, key=key)
# return groupby(ans, key=key)
#
# def add_analyses(self, ans, **kw):
# for proj, ais in self._groupby_project(ans):
# self.set_repo(proj)
# ais=list(ais)
# added=any([self._add_analysis(ai, commit=False, **kw) for ai in ais])
# if added:
# s=ais[0]
# e=ais[-1]
# self.repo_manager.commit('added analyses {}({}) to {}({}) to project= {}'.format(s.record_id, s.sample,
# e.record_id, e.sample,
# proj))
#
# def add_analysis(self, an, set_repo=True, **kw):
# if set_repo:
# self.set_repo(an.project)
# self._add_analysis(an, **kw)
#
# def _add_analysis(self, an, commit=True, progress=None):
# root = os.path.join(self.repo_manager.root, an.sample, an.labnumber)
# p = os.path.join(root, '{}.yaml'.format(an.record_id))
# if not os.path.isfile(p):
# if progress:
# progress.change_message('Adding vcs analysis {}'.format(an.record_id))
# #make necessary file structure
# r_mkdir(root)
#
# d = self._generate_analysis_dict(an)
# with open(p, 'w') as fp:
# yaml.dump(d, fp, indent=4, default_flow_style=False)
#
# self.repo_manager.add(p, commit=commit)
# return True
#
# #private
# def _generate_analysis_dict(self, ai):
# """
# convert types to float,int,dict,list, etc
# """
# d = dict([(k, getattr(ai, k)) for k in ('labnumber', 'aliquot',
# 'step', 'timestamp', 'tag',
# 'sample','project','material','mass_spectrometer')])
#
# def func(iso):
# return {'name': iso.name,
# 'detector': iso.detector,
# 'discrimination': float(iso.discrimination.nominal_value),
# 'discrimination_err': float(iso.discrimination.std_dev),
# 'ic_factor': float(iso.ic_factor.nominal_value),
# 'ic_factor_err': float(iso.ic_factor.std_dev),
# 'value':float(iso.value),
# 'error':float(iso.error),
# 'blank': float(iso.blank.value),
# 'blank_err': float(iso.blank.error),
# 'baseline': float(iso.baseline.value),
# 'baseline_err': float(iso.baseline.error),
# 'fit':iso.fit,
# 'filter_outliers':dict(iso.filter_outliers_dict),
# 'data':iso.pack()
# }
#
# isos = [func(ii) for ii in ai.isotopes.itervalues()]
# d['isotopes'] = isos
#
# d['j']=float(ai.j.nominal_value)
# d['j_err']=float(ai.j.std_dev)
#
# d['constants']=ai.arar_constants.to_dict()
# d['production_ratios']=dict(ai.production_ratios)
#
# ifc=ai.interference_corrections
# nifc=dict()
# for k,v in ifc.iteritems():
# nifc[k]=nominal_value(v)
# nifc['{}_err'.format(k)]=float(std_dev(v))
#
# d['interference_corrections']=nifc
# d['chron_segments']=[dict(zip(('power','duration','dt'), ci)) for ci in ai.chron_segments]
# d['irradiation_time']=ai.irradiation_time
#
# return d
#
# #============= EOF =============================================
#
| [
"[email protected]"
] | |
51cbf596ebc778a532ab71e8d4bc5c3334d54fc2 | 6fe477a55bd565c78b55e5ec79ae704186e2c1fc | /chatgui.py | aceede2560445b3cbe0b67e7c8c2db77c079ae00 | [] | no_license | jayz6/chatboty | 4f7ef8ea0105394230b076261f0f5a4828abfcde | e419faac730c9644269b6093e35bc375e2f723b8 | refs/heads/master | 2023-06-15T14:45:36.542008 | 2021-07-11T12:27:29 | 2021-07-11T12:27:29 | 374,563,728 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,646 | py | import nltk
from nltk.stem import WordNetLemmatizer
lemmatizer = WordNetLemmatizer()
import pickle
import numpy as np
from keras.models import load_model
model = load_model('chatbot_model.h5')
import json
import random
intents = json.loads(open('intents.json').read())
words = pickle.load(open('words.pkl','rb'))
classes = pickle.load(open('classes.pkl','rb'))
def clean_up_sentence(sentence):
# tokenize the pattern - split words into array
sentence_words = nltk.word_tokenize(sentence)
# stem each word - create short form for word
sentence_words = [lemmatizer.lemmatize(word.lower()) for word in sentence_words]
return sentence_words
# return bag of words array: 0 or 1 for each word in the bag that exists in the sentence
def bow(sentence, words, show_details=True):
# tokenize the pattern
sentence_words = clean_up_sentence(sentence)
# bag of words - matrix of N words, vocabulary matrix
bag = [0]*len(words)
for s in sentence_words:
for i,w in enumerate(words):
if w == s:
# assign 1 if current word is in the vocabulary position
bag[i] = 1
if show_details:
print ("found in bag: %s" % w)
return(np.array(bag))
def predict_class(sentence, model):
# filter out predictions below a threshold
p = bow(sentence, words,show_details=False)
res = model.predict(np.array([p]))[0]
ERROR_THRESHOLD = 0.25
results = [[i,r] for i,r in enumerate(res) if r>ERROR_THRESHOLD]
# sort by strength of probability
results.sort(key=lambda x: x[1], reverse=True)
return_list = []
for r in results:
return_list.append({"intent": classes[r[0]], "probability": str(r[1])})
return return_list
def getResponse(ints, intents_json):
tag = ints[0]['intent']
list_of_intents = intents_json['intents']
for i in list_of_intents:
if(i['tag']== tag):
result = random.choice(i['responses'])
break
return result
def chatbot_response(msg):
ints = predict_class(msg, model)
res = getResponse(ints, intents)
return res
#Creating GUI with tkinter
import tkinter
from tkinter import *
def send():
msg = EntryBox.get("1.0",'end-1c').strip()
EntryBox.delete("0.0",END)
if msg != '':
ChatLog.config(state=NORMAL)
ChatLog.insert(END, "You: " + msg + '\n\n')
ChatLog.config(foreground="#442265", font=("Verdana", 12 ))
res = chatbot_response(msg)
ChatLog.insert(END, "Bot: " + res + '\n\n')
ChatLog.config(state=DISABLED)
ChatLog.yview(END)
base = Tk()
base.title("VISHNU Assistance-Bot")
base.geometry("400x500")
base.resizable(width=FALSE, height=FALSE)
#Create Chat window
ChatLog = Text(base, bd=0, bg="white", height="8", width="50", font="Arial",)
ChatLog.config(state=DISABLED)
#Bind scrollbar to Chat window
scrollbar = Scrollbar(base, command=ChatLog.yview, cursor="heart")
ChatLog['yscrollcommand'] = scrollbar.set
#Create Button to send message
SendButton = Button(base, font=("Verdana",12,'bold'), text="Send", width="12", height=5,
bd=0, bg="#32de97", activebackground="#3c9d9b",fg='#ffffff',
command= send )
#Create the box to enter message
EntryBox = Text(base, bd=0, bg="white",width="29", height="5", font="Arial")
# EntryBox.bind("<Return>", send)
#Place all components on the screen
scrollbar.place(x=376,y=6, height=386)
ChatLog.place(x=6,y=6, height=386, width=370)
EntryBox.place(x=128, y=401, height=90, width=265)
SendButton.place(x=6, y=401, height=90)
base.mainloop()
| [
"[email protected]"
] | |
5a238a1582affefe50b7405410ac9c64ff303309 | edcc26728370aa5bfabfbf5615933c34b108ed21 | /sketches/readLines/readLines.pyde | eb9caaf92a7754dd7c22440ad54ab00eda3f75ef | [
"MIT"
] | permissive | kantel/processingpy | 9e94f4116257e9cfcd59c1f71d7572559c703058 | 1eef60347d41563aef7a092ff35434bd47d931d2 | refs/heads/master | 2023-08-15T12:04:46.713124 | 2023-07-29T16:41:14 | 2023-07-29T16:41:14 | 73,006,897 | 5 | 3 | null | null | null | null | UTF-8 | Python | false | false | 599 | pyde | # Font-Test auf UTF-8; aus der Dokumentation:
# Starting with Processing release 0134,
# all files loaded and saved by the Processing API
# use UTF-8 encoding.
font = None
def setup():
size(500, 500)
# fontList = PFont.list()
# printArray(fontList)
font = createFont("Palatino-Roman", 32)
textFont(font)
noLoop()
def draw():
background(30)
textSize(32)
u = 50
text("Seltsame Zeichen", 20, u)
u = 80
textSize(24)
lines = loadStrings("boxer.txt")
for line in lines:
print(line)
text(line, 20, u, 460, 500)
u += 80
| [
"[email protected]"
] | |
808c4ec297fe02c4849e4ad2d8562f28cd8a4e8c | 55a9b1b294d5a402c63848f9f7386e3bf93645da | /docker/src/clawpack-5.3.1/visclaw/src/python/visclaw/data.py | 83a00cc67a98eecde9d7e4e3d64ae7a12ff2a277 | [
"LicenseRef-scancode-public-domain",
"CC-BY-4.0",
"MIT"
] | permissive | geohackweek/visualization | b606cfade5d31f59cc38602df05930aed6e19b17 | 5d29fa5b69d69ee5c18ffaef2d902bd51f5807c8 | refs/heads/gh-pages | 2021-01-21T13:34:44.622039 | 2019-09-06T23:28:08 | 2019-09-06T23:28:08 | 68,648,198 | 11 | 13 | NOASSERTION | 2019-09-06T23:28:09 | 2016-09-19T21:27:33 | Jupyter Notebook | UTF-8 | Python | false | false | 39,350 | py | """
Plotting Data Module
Contains the general class definition and the subclasses of the Clawpack
data objects specific to plotting.
"""
import os
import copy
import numpy as np
import re
import logging
import clawpack.clawutil.data as clawdata
import gaugetools
import time
import clawpack.pyclaw.controller
# ============================================================================
# Subclass ClawPlotData containing data for plotting results
# ============================================================================
class ClawPlotData(clawdata.ClawData):
"""ClawPlotData class
Data subclass containing plot data.
"""
# ========== Initialization routine ======================================
def __init__(self, controller=None):
"""Initialize a PlotData object
"""
# Initialize the data object and read the data files
super(ClawPlotData,self).__init__()
# default values of attributes:
if controller:
controller.plotdata = self
# inherit some values from controller
self.add_attribute('rundir',copy.copy(controller.rundir))
self.add_attribute('outdir',copy.copy(controller.outdir))
if len(controller.frames)>0:
for i,frame in enumerate(controller.frames):
self.framesoln_dict[str(i)] = frame
self.add_attribute('format',copy.copy(controller.output_format))
else:
self.add_attribute('rundir',os.getcwd()) # uses *.data from rundir
self.add_attribute('outdir',os.getcwd()) # where to find fort.* files
self.add_attribute('format','ascii')
# This should eventually replace all need for recording the above
# information
self.add_attribute('output_controller', None)
self.output_controller = clawpack.pyclaw.controller.OutputController(
self.outdir, file_format=self.format)
self.add_attribute('plotdir',os.getcwd()) # directory for plots *.png, *.html
self.add_attribute('overwrite',True) # ok to overwrite old plotdir?
self.add_attribute('plotter','matplotlib') # backend for plots
self.add_attribute('msgfile','') # where to write error messages
self.add_attribute('verbose',True) # verbose output?
self.add_attribute('ion',False) # call ion() or ioff()?
self.add_attribute('printfigs',True)
self.add_attribute('print_format','png')
self.add_attribute('print_framenos','all') # which frames to plot
self.add_attribute('print_gaugenos','all') # which gauges to plot
self.add_attribute('print_fignos','all') # which figures to plot each frame
self.add_attribute('iplotclaw_fignos','all') # which figures to plot interactively
self.add_attribute('latex',True) # make latex files for figures
self.add_attribute('latex_fname','plots') # name of latex file
self.add_attribute('latex_title','Clawpack Results')
self.add_attribute('latex_framesperpage','all') # number of frames on each page
self.add_attribute('latex_framesperline',2) # number of frames on each line
self.add_attribute('latex_figsperline','all') # number of figures on each line
self.add_attribute('latex_makepdf',False) # run pdflatex on latex file
self.add_attribute('html',True) # make html files for figures
self.add_attribute('html_index_fname','_PlotIndex.html') # name of html index file
self.add_attribute('html_index_title','Plot Index') # title at top of index page
self.add_attribute('html_homelink',None) # link to here from top of _PlotIndex.html
self.add_attribute('html_movie','JSAnimation') # make html with java script for movie
self.add_attribute('html_eagle',False) # use EagleClaw titles on html pages?
self.add_attribute('kml',False) # make kml plots and a kml file for figures
self.add_attribute('kml_index_fname','_GoogleEarth') # name of html index file
self.add_attribute('kml_publish',None)
self.add_attribute('kml_name',"GeoClaw")
self.add_attribute('kml_starttime',None)
self.add_attribute('kml_tz_offset',None)
self.add_attribute('gif_movie',False) # make animated gif movie of frames
self.add_attribute('setplot',False) # Execute setplot.py in plot routine
self.add_attribute('mapc2p',None) # function to map computational
# points to physical
self.add_attribute('beforeframe',None) # function called before all plots
# in each frame are done
self.add_attribute('afterframe',None) # function called after all plots
# in each frame are done
self.add_attribute('plotfigure_dict',{})
try:
from collections import OrderedDict # new in Python 2.7
d = OrderedDict()
except:
d = {}
self.add_attribute('otherfigure_dict',d)
self.add_attribute('framesoln_dict',{}) # dictionary for holding framesoln
# objects associated with plots
self.add_attribute('gaugesoln_dict',{}) # dictionary for holding gaugesoln
# objects associated with plots
self.add_attribute('save_frames',True) # True ==> Keep a copy of any frame
# read in. False ==> Clear the frame
# solution dictionary before adding
# another solution
self.add_attribute('save_figures',True) # True ==> Keep a copy of and figure
# created. False ==> Clear the
# figure dictionary before adding
# another solution
self.add_attribute('refresh_gauges',False) # False ==> don't re-read gaugesoln if
# already in gaugesoln_dict
self.add_attribute('timeframes_framenos',None)
self.add_attribute('timeframes_frametimes',None)
self.add_attribute('timeframes_fignos',None)
self.add_attribute('timeframes_fignames',None)
self.add_attribute('gauges_gaugenos',None)
self.add_attribute('gauges_fignos',None)
self.add_attribute('gauges_fignames',None)
self._next_FIG = 1000
self._fignames = []
self._fignos = []
self._mode = 'unknown'
self._figname_from_num = {}
self._otherfignames = []
def new_plotfigure(self, name=None, figno=None, type='each_frame'):
"""
Create a new figure for Clawpack plots.
If type='each_frame' it is a figure that will be plotted
for each time frame.
If type='multi_frame' it is a figure that will be plotted based on
all the frames, such as x-t plots or time series. (Not yet implemented)
"""
if (self._mode != 'iplotclaw') and (name in self._fignames):
print '*** Warning, figure named %s has already been created' % name
if (self._mode != 'iplotclaw') and (figno in self._fignos):
print '*** Warning, figure number %s has already been created' % figno
if figno is None:
self._next_FIG += 1
figno = self._next_FIG
if name is None:
name = "FIG%s" % figno
if name in self._fignames:
print "*** Error in new_plotfigure: Figure name already used... ",name
raise Exception("Figure name already used")
elif figno in self._fignos:
print "*** Error in new_plotfigure: Figure number already used... ",figno
raise Exception("Figure number already used")
self._fignames.append(name)
self._fignos.append(figno)
plotfigure = ClawPlotFigure(name, figno, type, self)
if not self.save_figures:
self.plotfigure_dict.clear()
self.plotfigure_dict[name] = plotfigure
self._figname_from_num[figno] = name
return plotfigure
def getframe(self,frameno,outdir=None,refresh=False):
"""
ClawPlotData.getframe:
Return an object of class Solution containing the solution
for frame number frameno.
If refresh == True then this frame is read from the fort
files, otherwise it is read from the fort files only if the
the dictionary self.framesoln_dict has no key frameno. If it does, the
frame has previously been read and the dictionary value is returned.
"""
from clawpack.pyclaw import solution
framesoln_dict = self.framesoln_dict
if 0:
if outdir:
key = (frameno, outdir)
else:
key = frameno
outdir = self.outdir
if outdir is None:
outdir = self.outdir
outdir = os.path.abspath(outdir)
key = (frameno, outdir)
if refresh or (not framesoln_dict.has_key(key)):
framesoln = solution.Solution(frameno,path=outdir,file_format=self.format)
if not self.save_frames:
framesoln_dict.clear()
framesoln_dict[key] = framesoln
if key != frameno:
print ' Reading Frame %s at t = %g from outdir = %s' \
% (frameno,framesoln.t,outdir)
else:
print ' Reading Frame %s at t = %g ' \
% (frameno,framesoln.t)
else:
framesoln = self.framesoln_dict[key]
return framesoln
def clearfigures(self):
"""
Clear all plot parameters specifying figures, axes, items.
Does not clear the frames of solution data already read in.
For that use clearframes.
"""
self.plotfigure_dict.clear()
self._fignames = []
self._fignos = []
self._next_FIG = 1000
self._otherfignames = []
def clearframes(self, framenos='all'):
"""
Clear one or more frames from self.framesoln_dict.
Need to add outdir option!
"""
if isinstance(framenos, int):
framenos = [framenos] # turn into a list
if framenos=='all':
self.framesoln_dict.clear()
print 'Cleared all frames'
else:
for frameno in framenos:
xxx = self.plotdata.framesoln_dict.pop(frameno,None)
if xxx is None:
print 'No frame data to clear for frame ',frameno
else:
print 'Cleared data for frame ',frameno
def getgauge(self, gaugeno, outdir=None, verbose=True):
"""
ClawPlotData.getgauge:
Return an object of class clawdata.Gauge containing the solution
for gauge number gaugeno.
If self.refresh_gauges == True then this gauge is read from the
fort.gauge file, otherwise it is read only if the
the dictionary self.gaugesoln_dict has no key gaugeno. If it does, the
gauge has previously been read and the dictionary value is returned.
"""
# Construct path to file
if outdir is None:
outdir = self.outdir
outdir = os.path.abspath(outdir)
key = (gaugeno, outdir)
# Reread gauge data file
if self.refresh_gauges or (not self.gaugesoln_dict.has_key(key)):
# Attempt to fetch location and time data for checking
location = None
try:
try:
import clawpack.amrclaw.data as amrclaw
except ImportError as e:
print "You must have AMRClaw installed to plot gauges."
print "continuing..."
return None
gauge_data = amrclaw.GaugeData()
gauge_data.read(outdir)
# Check to make sure the gauge requested is in the data file
if gaugeno not in gauge_data.gauge_numbers:
raise Exception("Could not find guage %s in gauges data file.")
# Extract locations from gauge data file to be used with the
# solutions below
locations = {}
for gauge in gauge_data.gauges:
locations[gauge[0]] = gauge[1:-2]
except:
if verbose:
print "*** WARNING *** Could not read gauges.data file from"
print " %s" % outdir
print "*** Unable to determine gauge locations"
# raise Warning()
# Read in all gauges
try:
file_path = os.path.join(outdir,'fort.gauge')
if not os.path.exists(file_path):
print '*** Warning: cannot find gauge data file %s'%file_path
pass
else:
if verbose:
print "Reading gauge data from %s" % file_path
raw_data = np.loadtxt(file_path)
gauge_read_string = ""
if len(raw_data) == 0:
print '*** Warning: fort.gauge is empty'
gauge_numbers = []
else:
# Convert type for equality comparison:
raw_numbers = np.array(raw_data[:,0], dtype=int)
gauge_numbers = list(set(raw_numbers))
gauge_numbers.sort()
if verbose:
print "In fort.gauge file, found gauge numbers %s" \
% gauge_numbers
for n in gauge_numbers:
try:
loc = locations[n]
except:
loc = None
gauge = gaugetools.GaugeSolution(gaugeno,
location=loc)
gauge_indices = np.nonzero(n == raw_numbers)[0]
gauge.level = [int(value) for value in raw_data[gauge_indices,1]]
gauge.t = raw_data[gauge_indices,2]
gauge.q = raw_data[gauge_indices,3:].transpose()
gauge.number = n
gauge_read_string = " ".join((gauge_read_string,str(n)))
self.gaugesoln_dict[(n, outdir)] = gauge
if verbose:
print "Read in gauges [%s]" % gauge_read_string[1:]
except Exception as e:
print '*** Error reading gauges in ClawPlotData.getgauge'
print '*** outdir = ', outdir
import pdb; pdb.set_trace()
raise e
# Attempt to fetch gauge requested
try:
return self.gaugesoln_dict[key]
except Exception as e:
print '*** Unable to find gauge %d in solution dictionary'%gaugeno
print '*** Lookup key was %s'%str(key)
raise e
def plotframe(self, frameno):
from clawpack.visclaw import frametools
frametools.plotframe(frameno, self)
def printframes(self, verbose=True):
#from clawpack.visclaw import frametools
#frametools.printframes(self, verbose)
print "*** printframes is deprecated. Use plotpages.plotclaw_driver"
print "*** for added capabilities."
raise DeprecationWarning("The method 'printframes' is deprecated.")
def fignos(self):
"""
Return a list of the figure numbers actually used.
Useful in afterframe function for example to loop over all
figures and do something.
"""
return self._fignos
def mode(self):
"""
Return self._mode, which is set internally to
'iplotclaw' if Iplotclaw is in use,
'printframes' if printframes is being used
Useful in afterframe function if you want to do different things
for interactive or print modes.
"""
return self._mode
def iplotclaw(self):
"""
Return True if interactive plotting with iplotclaw is being done.
"""
return (self._mode == 'iplotclaw')
def getfigure(self,figname):
try:
plotfigure = self.plotfigure_dict[figname]
except:
raise Exception('Error accessing plotfigure_dict[%s]' % figname)
return plotfigure
def getaxes(self,axesname,figname=None):
found = True
if not figname:
found = False
for fig in self._fignames:
plotfigure = self.getfigure(fig)
if axesname in plotfigure._axesnames:
if found == True: # already found!
print '*** Ambiguous... must specify figname'
print ' try getaxes(axesname, figname)'
return None
figname = fig
found = True
if not found:
print '*** No axes found with name = ',axesname
return None
try:
plotfigure = self.getfigure(figname)
plotaxes = plotfigure.plotaxes_dict[axesname]
except:
print '*** Error accessing plotaxes[%s]' % axesname
print '*** figname = %s' % figname
return None
return plotaxes
def getitem(self,itemname,axesname=None,figname=None):
found = True
if not figname:
# search over all figures looking for the item
found = False
for fign in self._fignames:
plotfigure = self.getfigure(fign)
if not axesname:
# search over all axes looking for the item
for axesn in plotfigure._axesnames:
plotaxes = self.getaxes(axesn,fign)
if itemname in plotaxes._itemnames:
if found == True: # already found!
print '*** Ambiguous... must specify figname and/or axesname'
print ' try getitem(itemname, axesname, figname)'
return None
axesname = axesn
figname = fign
found = True
else:
# axesname was specified (but not figname)
plotaxes = self.getaxes(axesname,fign)
if itemname in plotaxes._itemnames:
if found == True: # already found!
print '*** Ambiguous... must specify figname and/or axesname'
print ' try getitem(itemname, axesname, figname)'
return None
figname = fign
found = True
elif not axesname:
# figname was specified but not axesname.
# search over all axes looking for the item
found = False
plotfigure = self.getfigure(figname)
for axesn in plotfigure._axesnames:
plotaxes = self.getaxes(axesn,figname)
if itemname in plotaxes._itemnames:
if found == True: # already found!
print '*** Ambiguous... must specify axesname'
print ' try getitem(itemname, axesname, figname)'
return None
axesname = axesn
found = True
if not found:
print '*** No item found with name = ',itemname
return None
try:
plotaxes = self.getaxes(axesname,figname)
plotitem = plotaxes.plotitem_dict[itemname]
except:
print '*** Error accessing plotitem[%s]' % itemname
print '*** figname = ',figname
print '*** axesname = ',axesname
return None
return plotitem
def showitems(self):
fignames = self._fignames
print "\n\nCurrent plot figures, axes, and items:"
print "---------------------------------------"
for figname in fignames:
plotfigure = self.getfigure(figname)
s = " figname = %s, figno = %s" % (figname, plotfigure.figno)
if not plotfigure._show:
s = s + " [Not showing]"
print s
axesnames = plotfigure._axesnames
for axesname in axesnames:
plotaxes = self.getaxes(axesname,figname)
s = " axesname = %s, axescmd = %s" \
% (axesname, plotaxes.axescmd)
if not plotaxes._show:
s = s + " [Not showing]"
print s
for itemname in plotaxes._itemnames:
plotitem = self.getitem(itemname,axesname,figname)
plot_type = plotitem.plot_type
s = " itemname = %s, plot_type = %s" \
% (itemname,plot_type)
if not plotitem._show:
s = s + " [Not showing]"
print s
print " "
def getq(self,frameno):
solution = self.getframe(frameno)
patches = solution.patches
if len(patches) > 1:
print '*** Warning: more than 1 patch, q on patch[0] is returned'
q = patches[0].q
return q
def new_otherfigure(self, name=None, fname=None):
"""
Create a new figure for Clawpack plots.
For figures not repeated each frame.
"""
if (self._mode != 'iplotclaw') and (name in self._fignames):
print '*** Warning, figure named %s has already been created' % name
if name is None:
if fname is None:
raise Exception("Need to provide name in new_otherfigure")
else:
name = fname
if name in self._otherfignames:
print "*** Error in new_otherfigure: Figure name already used... ",name
raise Exception("Figure name already used")
self._otherfignames.append(name)
otherfigure = ClawOtherFigure(name,self)
self.otherfigure_dict[name] = otherfigure
otherfigure.fname = fname
return otherfigure
def set_outdirs(self):
"""
Make a list of all outdir's for all plotitem's in the order they
are first used.
"""
outdir_list = []
for figname in self._fignames:
plotfigure = self.plotfigure_dict[figname]
if not plotfigure._show:
continue # skip to next figure
for axesname in plotfigure._axesnames:
plotaxes = plotfigure.plotaxes_dict[axesname]
if not plotaxes._show:
continue # skip to next axes
for itemname in plotaxes._itemnames:
plotitem = plotaxes.plotitem_dict[itemname]
if not plotitem._show:
continue # skip to next item
if plotitem.outdir is not None:
outdir = plotitem.outdir
else:
outdir = self.outdir
if outdir not in outdir_list:
outdir_list.append(outdir)
self._outdirs = outdir_list
return self
# ============================================================================
# Subclass ClawPlotFigure containing data for plotting a figure
# ============================================================================
class ClawPlotFigure(clawdata.ClawData):
"""
Data subclass containing plot data needed to plot a single figure.
This may consist of several ClawPlotAxes objects.
"""
# ========================================================================
# Initialization routine
# ========================================================================
def __init__(self, name, figno, fig_type, plotdata):
"""
Initialize a ClawPlotFigure object
"""
super(ClawPlotFigure, self).__init__()
self._plotdata = plotdata # parent ClawPlotData object
self.add_attribute('name',name)
self.add_attribute('figno',figno)
self.add_attribute('kwargs',{})
self.add_attribute('clf_each_frame',True)
self.add_attribute('clf_each_gauge',True)
self._axesnames = []
self.add_attribute('show',True)
self._show = True
self.add_attribute('plotaxes_dict', {})
self.add_attribute('type',fig_type) # = 'each_frame' or 'each_run' or 'each_gauge'
self.add_attribute('use_for_kml',False)
self.add_attribute('kml_dpi',200)
self.add_attribute('kml_xlimits',None)
self.add_attribute('kml_ylimits',None)
self.add_attribute('kml_tile_images',False)
self.add_attribute('kml_colorbar',None)
self.add_attribute('kml_use_for_initial_view',False)
self.add_attribute('kml_figsize',None) # Figure size; specify to get rid of aliasing
self._next_AXES = 0
def new_plotaxes(self, name=None, type='each_frame'):
"""
Create a new axes that will be plotted in this figure.
If type='each_frame' it is an axes that will be plotted
for each time frame.
If type='multi_frame' it is an axes that will be plotted based on
all the frames, such as x-t plots or time series. (Not yet implemented)
If type='empty' it is created without doing any plots using the
pyclaw tools. Presumably the user will create a plot within an
afteraxes command, for example.
"""
if name is None:
self._next_AXES += 1
name = "AXES%s" % self._next_AXES
if name in self._axesnames:
print '*** Warning, axes named %s has already been created' % name
if name not in self._axesnames:
self._axesnames.append(name)
plotaxes = ClawPlotAxes(name, self)
self.plotaxes_dict[name] = plotaxes
plotaxes.type = type
return plotaxes
def gethandle(self):
_handle = getattr(self,'_handle',None)
return _handle
# ============================================================================
# Subclass ClawPlotAxes containing data for plotting axes within a figure
# ============================================================================
class ClawPlotAxes(clawdata.ClawData):
"""
Data subclass containing plot data needed to plot a single axes.
This may consist of several ClawPlotItem objects.
"""
# ========================================================================
# Initialization routine
# ========================================================================
def __init__(self, name, plotfigure):
"""
Initialize a ClawPlotAxes object
"""
super(ClawPlotAxes, self).__init__()
self._plotfigure = plotfigure # figure this item is on
self._plotdata = plotfigure._plotdata # parent ClawPlotData object
self.add_attribute('name',name)
self.add_attribute('title',name)
self.add_attribute('title_with_t',True) # creates title of form 'title at time t = ...'
self.add_attribute('axescmd','subplot(1,1,1)')
self.add_attribute('afteraxes',None)
self.add_attribute('xlimits',None)
self.add_attribute('ylimits',None)
self.add_attribute('scaled',False) # true so x- and y-axis scaled same
self.add_attribute('image',False) # true so x- and y-axis scaled same
# and plot bounds tight
self.add_attribute('plotitem_dict', {})
self.add_attribute('type','each_frame')
self._itemnames = []
self.add_attribute('show',True)
self._show = True
self._handle = None
self._next_ITEM = 0
self.add_attribute('figno', self._plotfigure.figno)
def new_plotitem(self, name=None, plot_type=None):
# Create a new entry in self.plotitem_dict
if name is None:
self._next_ITEM += 1
name = "ITEM%s" % self._next_ITEM
if name not in self._itemnames:
self._itemnames.append(name)
plotitem = ClawPlotItem(name, plot_type, plotaxes=self)
self.plotitem_dict[name] = plotitem
return plotitem
def get_plotdata(self):
plotdata = getattr(self,'_plotdata',None)
return self._plotdata
def get_plotfigure(self):
plotfigure = getattr(self,'_plotfigure',None)
return self._plotfigure
def gethandle(self):
_handle = getattr(self,'_handle',None)
return self._handle
# ============================================================================
# Subclass ClawPlotItem containing data for plotting a single object
# ============================================================================
class ClawPlotItem(clawdata.ClawData):
"""
Data subclass containing plot data needed to plot a single object.
This may be a single curve, set of points, contour plot, etc.
"""
# ========================================================================
# Initialization routine
# ========================================================================
def __init__(self, name, plot_type, plotaxes):
"""
Initialize a ClawPlotItem object
"""
super(ClawPlotItem, self).__init__()
self._plotaxes = plotaxes # axes this item is on
self._plotfigure = plotaxes._plotfigure # figure this item is on
self._plotdata = plotaxes._plotfigure._plotdata # parent ClawPlotData object
try:
num_dim = int(plot_type[0]) # first character of plot_type should be num_dim
except:
print '*** Error: could not determine num_dim from plot_type = ',plot_type
self.add_attribute('num_dim',num_dim)
self.add_attribute('name',name)
self.add_attribute('figno',plotaxes.figno)
self.add_attribute('outdir',None) # indicates data comes from
# self._plotdata.outdir
self.add_attribute('plot_type',plot_type)
self.add_attribute('plot_var',0)
self.add_attribute('plot_show',True)
self.add_attribute('MappedGrid',None) # False to plot on comput. patch even
# if _plotdata.mapc2p is not None.
self.add_attribute('mapc2p',None) # function to map computational
# points to physical (over-rides
# plotdata.mapc2p if set for item
self.add_attribute('afterpatch',None) # function called after each patch is
# plotted within each single plotitem.
self.add_attribute('afteritem',None) # function called after the item is
# plotted for each frame
self.add_attribute("show",True) # False => suppress showing this item
self._show = True # Internal
self._current_pobj = None
self.add_attribute('params',{}) # dictionary to hold optional parameters
if num_dim == 1:
self.add_attribute('plotstyle','-')
self.add_attribute('color',None)
self.add_attribute('kwargs',{})
amr_attributes = """show color kwargs data_show""".split()
for a in amr_attributes:
self.add_attribute('amr_%s' % a, [])
if plot_type == '1d_fill_between':
zero_function = lambda current_data: 0.
self.add_attribute('plot_var2',zero_function)
self.add_attribute('fill_where',None)
if plot_type == '1d_from_2d_data':
self.add_attribute('map_2d_to_1d',None)
self.add_attribute('amr_plotstyle',[])
elif num_dim == 2:
# default values specifying this single plot:
self.add_attribute('plot_type',plot_type)
self.add_attribute('celledges_show',0)
self.add_attribute('celledges_color','k')
self.add_attribute('patch_bgcolor','w')
self.add_attribute('patchedges_show',0)
self.add_attribute('patchedges_color','k')
self.add_attribute('add_colorbar',False)
self.add_attribute('colorbar_shrink',1.0)
self.add_attribute('colorbar_label',None)
self.add_attribute('colorbar_ticks', None)
self.add_attribute('colorbar_tick_labels',None)
self.add_attribute('kwargs',{})
amr_attributes = """celledges_show celledges_color data_show
patch_bgcolor patchedges_show patchedges_color kwargs""".split()
for a in amr_attributes:
self.add_attribute('amr_%s' % a, [])
if plot_type == '2d_pcolor':
from clawpack.visclaw import colormaps
self.add_attribute('pcolor_cmap',colormaps.yellow_red_blue)
self.add_attribute('pcolor_cmin',None)
self.add_attribute('pcolor_cmax',None)
elif plot_type == '2d_imshow':
from clawpack.visclaw import colormaps
self.add_attribute('imshow_cmap',colormaps.yellow_red_blue)
self.add_attribute('imshow_cmin',None)
self.add_attribute('imshow_cmax',None)
elif plot_type in ['2d_contour', '2d_contourf']:
self.add_attribute('contour_nlevels',20)
self.add_attribute('contour_levels',None)
self.add_attribute('contour_min',None)
self.add_attribute('contour_max',None)
self.add_attribute('contour_show',1)
self.add_attribute('contour_colors','k')
self.add_attribute('contour_cmap',None)
amr_attributes = """show colors cmap data_show""".split()
for a in amr_attributes:
self.add_attribute('amr_contour_%s' % a, [])
if plot_type == '2d_contourf':
self.add_attribute('fill_cmap',None)
self.add_attribute('fill_cmin',None)
self.add_attribute('fill_cmax',None)
self.add_attribute('fill_colors',None)
# Note either fill_cmap or fill_colors must be None
elif plot_type == '2d_schlieren':
from clawpack.visclaw import colormaps
self.add_attribute('schlieren_cmap',colormaps.schlieren_grays)
self.add_attribute('schlieren_cmin',None)
self.add_attribute('schlieren_cmax',None)
elif plot_type == '2d_patch':
self.add_attribute('max_density',None)
self.celledges_show = True
self.patchedges_show = True
elif plot_type == '2d_quiver':
self.add_attribute('quiver_var_x',None)
self.add_attribute('quiver_var_y',None)
self.add_attribute('quiver_coarsening',1)
self.add_attribute('quiver_key_show',False)
self.add_attribute('quiver_key_label_x',0.15)
self.add_attribute('quiver_key_label_y',0.95)
self.add_attribute('quiver_key_units','')
self.add_attribute('quiver_key_scale',None)
self.add_attribute('quiver_key_kwargs',{})
amr_attributes = """coarsening key_show key_label_x key_label_y
key_scale key_kwargs data_show""".split()
for a in amr_attributes:
self.add_attribute('amr_quiver_%s' % a, [])
else:
print '*** Warning 2d plot type %s not recognized' % plot_type
elif num_dim == 3:
raise NotImplementedError('ClawPlotItem not yet set up for num_dim = 3')
else:
raise Warning('Unrecognized plot_type in ClawPlotItem')
def getframe(self,frameno,refresh=False):
"""
ClawPlotItem.getframe:
Return an object of class Solution containing the solution
for frame number frameno.
If refresh == True then this frame is read from the fort
files, otherwise it is read from the fort files only if the
the dictionary self.framesoln_dict has key frameno. If it does, the
frame has previously been read and the dictionary value is returned.
"""
plotdata = self._plotdata
outdir = self.outdir
framesoln = plotdata.getframe(frameno, outdir,refresh=refresh)
return framesoln
def getgauge(self,gauge):
"""
ClawPlotItem.getgauge:
Return an object of class GaugeSolution containing the solution
for gauge number gaugeno.
If self.refresh_gauges == True then this gauge is read from the
fort.gauge file, otherwise it is read only if the
the dictionary self.gaugesoln_dict has no key gaugeno. If it does, the
gauge has previously been read and the dictionary value is returned.
"""
plotdata = self._plotdata
outdir = self.outdir
gaugesoln = plotdata.getgauge(gauge, outdir)
return gaugesoln
# ============================================================================
# Subclass ClawOtherFigure containing data for plotting a figure
# ============================================================================
class ClawOtherFigure(clawdata.ClawData):
"""
Data subclass containing plot data needed to plot a single figure.
For figures that are not produced each frame.
"""
# ========================================================================
# Initialization routine
# ========================================================================
def __init__(self, name, plotdata):
"""
Initialize a ClawOtherFigure object
"""
super(ClawOtherFigure, self).__init__()
self._plotdata = plotdata # parent ClawPlotData object
self.add_attribute('name',name)
self.add_attribute('fname',None) # name of png file
self.add_attribute('makefig',None) # function invoked to create figure
| [
"[email protected]"
] | |
a0bb2dd4a72b7f004884bdda564b3762452634f9 | c2d681e9a4c7b1be07e9d581ad3ac00a5c783604 | /classes.py | f9cca27f147dc0814ed33409926ddb4d9e4d635e | [] | no_license | DennisMufasa/PythoneCode | 6291ddf2d08a8361fe82e81bc5747eb0123848f6 | d9b74205de0c60fec2c088e1b6c2b0b7a91c1273 | refs/heads/master | 2021-04-05T23:45:50.256668 | 2019-04-07T16:17:20 | 2019-04-07T16:17:20 | 124,888,947 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 668 | py | class Mufasa:
def __init__(self,name,age,profession,health):
self.name=name
self.age=age
self.profession=profession
self.health=health
def career(self):
if self.profession=='Developer':
print(self.name,' is a developer.')
else:
print(self.name,' is not qualified for this job.')
# wewe=Mufasa('Vicky',23,'IT','Insured')
# wewe.career()
# mimi=Mufasa('Dennis',23,'Developer','Insured')
# mimi.career()
# mimi=Mufasa('Dennis',23,'developer','insured')
# mimi.details('Dennis',23,'developer')
# mimi.job('Anthony','doctor')
# mimi.miaka('Vicky',23)
# mimi.afya('Walter','not insured') | [
"[email protected]"
] | |
c936f1c418ea6c456cf0dd6c2b5cec291e39acf2 | 905f40a4ad8e17bb4871cf87b6ee184a76a77c2a | /BCM/management/commands/remove_templates.py | bd73841db58291945cf21bda90d4758966cf6519 | [] | no_license | sai9912/mypyton | 5e1f7ca278051d5f588af1d9accae5fd1780020b | 338fd6396dbdce971bc542718fbb9608bdcfc2a7 | refs/heads/master | 2022-12-16T05:04:34.590818 | 2019-04-18T09:18:06 | 2019-04-18T09:18:06 | 176,324,427 | 0 | 0 | null | 2022-12-08T02:31:10 | 2019-03-18T16:16:56 | Python | UTF-8 | Python | false | false | 1,975 | py | from django.core.management import BaseCommand
from member_organisations.models import MemberOrganisation, ProductTemplate, ProductAttribute, ProductPackaging
IMPORT_DEBUG = True
class Command(BaseCommand):
"""
python manage.py remove_templates
"""
def add_arguments(self, parser):
parser.add_argument('mo_slug', nargs='?', default='', type=str)
def handle(self, *args, **options):
mo_slug = options.get('mo_slug')
try:
mo = MemberOrganisation.objects.get(slug=mo_slug)
except MemberOrganisation.DoesNotExist:
pass
else:
product_templates = ProductTemplate.objects.filter(member_organisation_id=mo.pk)
product_attributes = set()
for template in product_templates:
product_attributes |= set(template.attributes.all().values_list('pk', flat=True))
# delete attributes
ProductAttribute.objects.filter(id__in=product_attributes).delete()
# delete templates
product_templates_count = product_templates.count()
product_templates.delete()
if IMPORT_DEBUG and product_templates_count:
print('{attr} ProductAttribute and {c} ProductTemplate related to {mo} are removed'
.format(attr=len(product_attributes), c=product_templates_count, mo=mo_slug))
# delete orphaned attributes
product_attributes = ProductAttribute.objects.filter(member_organisation_id=mo.pk)
product_attributes_count = product_attributes.count()
product_attributes.delete()
if IMPORT_DEBUG and product_attributes_count:
print('{attr} orphaned ProductAttribute related to {mo} are removed'
.format(attr=product_attributes_count, mo=mo_slug))
# delete prod packaging too
ProductPackaging.objects.filter(member_organisation=mo.pk).delete()
| [
"[email protected]"
] | |
79ecffd0003b8f52b8e02699f96264b491844e07 | d66818f4b951943553826a5f64413e90120e1fae | /hackerearth/Algorithms/Palindromes/solution.py | 2287b35b7d8a759993933e56e1c6c55558875c88 | [
"MIT"
] | permissive | HBinhCT/Q-project | 0f80cd15c9945c43e2e17072416ddb6e4745e7fa | 19923cbaa3c83c670527899ece5c3ad31bcebe65 | refs/heads/master | 2023-08-30T08:59:16.006567 | 2023-08-29T15:30:21 | 2023-08-29T15:30:21 | 247,630,603 | 8 | 1 | MIT | 2020-07-22T01:20:23 | 2020-03-16T06:48:02 | Python | UTF-8 | Python | false | false | 418 | py | """
# Sample code to perform I/O:
name = input() # Reading input from STDIN
print('Hi, %s.' % name) # Writing output to STDOUT
# Warning: Printing unwanted or ill-formatted data to output will cause the test cases to fail
"""
# Write your code here
s = input().strip()
temp = s
if len(set(s)) > 1:
while s == temp[::-1]:
temp = temp[:-1]
print(len(temp))
else:
print(0)
| [
"[email protected]"
] | |
a04ef908392a402d41fe90499306a7c8d326b53a | 9307d42ca27c8f07115197851a4d2355a7492abc | /shared/views.py | ef3b27912011c7aa2b20c022011f458b65b3a524 | [] | no_license | dkutelov/djangoevents | 2a1c05d187a32557d4e5195cbe349efb05611ce4 | 9b6c4c9db366d7e282542cb853123dcca6191f8e | refs/heads/master | 2023-01-29T15:37:47.664373 | 2020-12-13T13:17:58 | 2020-12-13T13:17:58 | 314,523,415 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 593 | py | from django.core.exceptions import PermissionDenied
class GroupRequiredMixin:
groups = None
def dispatch(self, request, *args, **kwargs):
user = request.user
if not user.is_authenticated:
raise PermissionDenied
groups_set = set(self.groups or [])
raw_groups = user.groups.all()
user_groups = set([group.name for group in raw_groups])
if not user_groups.intersection(groups_set) and \
not user.is_superuser:
raise PermissionDenied
return super().dispatch(request, *args, **kwargs)
| [
"[email protected]"
] | |
e9cdc4ae77fecdbb0421ffa2a8da67c5833881db | f9a2e67dd2f40b37d8ff81bf6cdce47c38d2dee4 | /.c9/metadata/environment/fb_post_learning/fb_post/utils/get_reaction_metrics.py | 6e0d0e0bfcaaef57360b9dcf368f1f57eab9341b | [] | no_license | mohan277/backend_repo | 4eae065cf0fffa29866a2b549028cb8df4c97643 | 25dbb4d0f1c174b6da95f4c73737e49db9978429 | refs/heads/master | 2022-11-13T00:08:37.600743 | 2020-07-09T04:36:44 | 2020-07-09T04:36:44 | 278,259,585 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 669 | py | {"filter":false,"title":"get_reaction_metrics.py","tooltip":"/fb_post_learning/fb_post/utils/get_reaction_metrics.py","undoManager":{"mark":1,"position":1,"stack":[[{"start":{"row":6,"column":1},"end":{"row":6,"column":2},"action":"insert","lines":["u"],"id":12}],[{"start":{"row":6,"column":1},"end":{"row":6,"column":2},"action":"remove","lines":["u"],"id":13}]]},"ace":{"folds":[],"scrolltop":0,"scrollleft":0,"selection":{"start":{"row":6,"column":1},"end":{"row":6,"column":1},"isBackwards":false},"options":{"guessTabSize":true,"useWrapMode":false,"wrapToView":true},"firstLineState":0},"timestamp":1588937314048,"hash":"bf28be9d7caaea42ba7730b68b047b9fb53d68e8"} | [
"[email protected]"
] | |
e772c8886cb376be4c127bf28013a9fc58b59e69 | 2eff2b24d5b6f5dffc42c9cbde6102ec9317502f | /src/Bits.py | 49ff824b30bb9cf591b4e5c06713a9fefbb9b567 | [] | no_license | JakobKallestad/Python-Kattis | 599a14e71a8d5c52aae779b8db3d35f0e4d01e88 | 51656964e79cc861e53f574785aacb213ef10b46 | refs/heads/master | 2022-10-24T23:12:45.599813 | 2021-12-08T12:31:54 | 2021-12-08T12:31:54 | 156,881,692 | 2 | 1 | null | 2022-10-02T12:36:57 | 2018-11-09T15:34:09 | Python | UTF-8 | Python | false | false | 231 | py | n_test_cases = int(input())
for _ in range(n_test_cases):
line = input()
max_bits = 0
for i in range(1, len(line)+1):
num = bin(int(line[:i]))
max_bits = max(max_bits, num.count('1'))
print(max_bits) | [
"[email protected]"
] | |
90a2df92f2f7c3fbf6c29a1d42bb305edf250d74 | a32c2ee4e6b2b1c6f8db02320c4bd50b17940af5 | /modules/AlipayDepostIII/AlipayDepostIII.py | 361f92c906504347a3178c30def95f38e7ca6b45 | [] | no_license | wszg5/studyGit | 93d670884d4cba7445c4df3a5def8085e5bf9ac0 | bebfc90bc38689990c2ddf52e5a2f7a02649ea00 | refs/heads/master | 2020-04-05T02:55:17.367722 | 2018-11-07T06:01:03 | 2018-11-07T06:01:03 | 156,494,390 | 2 | 2 | null | null | null | null | UTF-8 | Python | false | false | 26,351 | py | # coding:utf-8
import logging
from PIL.ImageShow import show
from uiautomator import Device
from Repo import *
import os, datetime, string, random
from zservice import ZDevice
import sys
from PIL import Image
import colorsys
from Inventory import *
class AlipayDepostIII:
def __init__(self):
self.repo = Repo()
def GetUnique(self):
nowTime = datetime.datetime.now().strftime("%Y%m%d%H%M%S"); # 生成当前时间
randomNum = random.randint(0, 1000); # 生成的随机整数n,其中0<=n<=100
if randomNum <= 10:
randomNum = str(00) + str(randomNum);
uniqueNum = str(nowTime) + str(randomNum);
return uniqueNum
def Gender(self,d,obj):
base_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir, "tmp"))
if not os.path.isdir(base_dir):
os.mkdir(base_dir)
sourcePng = os.path.join(base_dir, "%s_s.png" % (self.GetUnique()))
if obj.exists:
obj = obj.info
obj = obj['bounds'] # 验证码处的信息
left = obj["left"] # 验证码的位置信息
top = obj['top']
right = obj['right']
bottom = obj['bottom']
d.screenshot(sourcePng) # 截取整个输入验证码时的屏幕
img = Image.open(sourcePng)
box = (left, top, right, bottom) # left top right bottom
region = img.crop(box) # 截取验证码的图片
# show(region) #展示资料卡上的信息
image = region.convert('RGBA')
# 生成缩略图,减少计算量,减小cpu压力
image.thumbnail((200, 200))
max_score = None
dominant_color = None
for count, (r, g, b, a) in image.getcolors(image.size[0] * image.size[1]):
# 跳过纯黑色
if a == 0:
continue
saturation = colorsys.rgb_to_hsv(r / 255.0, g / 255.0, b / 255.0)[1]
y = min(abs(r * 2104 + g * 4130 + b * 802 + 4096 + 131072) >> 13, 235)
y = (y - 16.0) / (235 - 16)
# 忽略高亮色
if y > 0.9:
continue
score = (saturation + 0.1) * count
if score > max_score:
max_score = score
dominant_color = (r, g, b) #红绿蓝
# print("---------------------------------------------------------------------------")
# print(dominant_color)
return dominant_color
def CheckLogined(self,d, z ):
z.toast( "检测是否已有支付宝帐号登录" )
z.cmd("shell", "am force-stop com.eg.android.AlipayGphone") # 强制停止
z.cmd("shell", "am start -n com.eg.android.AlipayGphone/com.eg.android.AlipayGphone.AlipayLogin")
z.sleep(15)
if d(textContains='口碑').exists:
if d( description='关闭', className='android.widget.ImageView' ).exists:
d( description='关闭', className='android.widget.ImageView' ).click( )
return True
return False
def CheckAddressBook(self,d, z ):
z.toast( "检测通讯录是否正常" )
d( description='通讯录' ).click( )
if d( text='转到银行卡' ).exists:
d( description='返回' ).click( )
d( description='通讯录' ).click( )
d( text='新的朋友' ).click( )
d( text='添加手机联系人' ).click( )
z.sleep(8)
if d(textContains='账号违规').exists or d(textContains='该功能暂未对您开放').exists:
d.server.adb.cmd( "shell", "pm clear com.eg.android.AlipayGphone" ).communicate( ) # 清除缓存
return False
return True
def impContact(self, d, z, args):
z.heartbeat()
if self.CheckLogined(d, z) :
if self.CheckAddressBook( d, z ):
z.toast("检测到已经登录,开始导入")
else:
z.toast( "通讯录异常,结束模块" )
return
else:
z.toast( "没有检测到登陆帐号,结束运行" )
return
base_dir = os.path.abspath(os.path.join(os.path.dirname(__file__),os.path.pardir, "tmp"))
if not os.path.isdir(base_dir):
os.mkdir(base_dir)
filename = os.path.join(base_dir, "%s.txt"%(self.GetUnique()) )
number_count = int(args['number_count'])
cate_id = args["repo_cate_id"]
intervalTime = int(args["intervalTime"])
while True:
exist_numbers = self.repo.GetNumber(cate_id, intervalTime, number_count, 'exist', 'NO')
print(exist_numbers)
remain = number_count - len(exist_numbers)
unknown_numbers = self.repo.GetNumber(cate_id, intervalTime, remain, 'unknown', 'NO')
numbers = exist_numbers + unknown_numbers
if len(numbers)> 0:
break
else:
normal_numbers = self.repo.GetNumber( cate_id, intervalTime, number_count, 'normal', 'NO')
if len( normal_numbers ) == 0:
d.server.adb.cmd( "shell",
"am broadcast -a com.zunyun.zime.toast --es msg \"电话号码%s号仓库为空,等待中\"" % cate_id ).communicate( )
else:
numbers = normal_numbers
break
d.server.adb.cmd("shell", "am broadcast -a com.zunyun.zime.toast --es msg \"电话号码%s号仓库为空,等待中\""%cate_id).communicate()
z.sleep(10)
z.heartbeat()
if numbers:
file_object = open(filename, 'w')
lines = ""
pname = ""
for number in numbers:
if number["name"] is None:
random_name = args['random_name']
if random_name == '是':
pname = z.phoneToName( number["number"] )
number["name"] = pname
else:
pname = number["number"]
else:
pname = number["name"]
lines = "%s%s----%s\r" %(lines, pname, number["number"])
file_object.writelines(lines)
file_object.close()
isclear = args['clear']
if isclear=='是':
d.server.adb.cmd("shell", "pm clear com.android.providers.contacts").communicate()
#d.server.adb.cmd("shell", "am", "start", "-a", "zime.clear.contacts").communicate()
d.server.adb.cmd("push", filename, "/data/local/tmp/contacts.txt").communicate()
d.server.adb.cmd("shell", "am", "start", "-n", "com.zunyun.zime/.ImportActivity", "-t", "text/plain", "-d",
"file:////data/local/tmp/contacts.txt").communicate()
#d.server.adb.cmd("shell", "am broadcast -a com.zunyun.import.contact --es file \"file:///data/local/tmp/contacts.txt\"").communicate()
os.remove(filename)
out = d.server.adb.cmd("shell",
"dumpsys activity top | grep ACTIVITY").communicate()[0].decode('utf-8')
while out.find("com.zunyun.zime/.ImportActivity") > -1:
z.heartbeat()
out = d.server.adb.cmd("shell",
"dumpsys activity top | grep ACTIVITY").communicate()[0].decode('utf-8')
z.sleep(5)
return numbers
def action(self, d, z, args):
numbers = self.impContact(d, z, args)
z.heartbeat()
str = d.info # 获取屏幕大小等信息
height = str["displayHeight"]
width = str["displayWidth"]
d.server.adb.cmd("shell", "am force-stop com.eg.android.AlipayGphone").wait() # 强制停止
d.server.adb.cmd("shell", "am start -n com.eg.android.AlipayGphone/com.eg.android.AlipayGphone.AlipayLogin").communicate() # 拉起来
z.sleep(10)
if d( description='关闭', className='android.widget.ImageView' ).exists:
d( description='关闭', className='android.widget.ImageView' ).click( )
accountStatus = "正常"
d(description='通讯录').click()
if d(text='转到银行卡').exists:
d(description ='返回').click()
d(description='通讯录').click()
d(text='新的朋友').click()
d(text='添加手机联系人').click()
z.sleep( int( args["contact_wait"] ) )
publicpath = d(className='android.widget.ListView').child(className='android.widget.LinearLayout', index=2) \
.child(className='android.widget.LinearLayout', index=0).child(
className='android.widget.LinearLayout', index=0) # 为下面的点击做准备
times = 3
while d(textContains='没有联系人').exists:
z.heartbeat()
d(description='返回').click()
z.sleep(int(args["contact_wait"]))
d(text='添加手机联系人').click()
times = times - 1
if times < 0:
z.toast("手机通讯录里没有联系人")
return
z.heartbeat()
i = 0
set1 = set()
change = 0
while True:
judexist = d(className='android.widget.ListView').child(className='android.widget.LinearLayout',index=i)\
.child(className='android.widget.LinearLayout').child(className='android.widget.TextView',index=0)
if judexist.exists:
z.heartbeat()
change = 1
phoneNumber = judexist.info['text']#要保存的电话号码
if phoneNumber in set1:
i = i+1
continue
set1.add(phoneNumber)
# print(phoneNumber)
judexist.click() #点击第i个人
z.sleep(1.5)
accountCount = "一对一"
if d(textContains='该手机号对应多个支付宝账户,请核实后选择').exists:
d(resourceId='com.alipay.mobile.contactsapp:id/head_icon').click()
accountCount = '一对多'
try:
path = d( className='android.widget.ListView' ).child( className='android.widget.FrameLayout',
index=0 ).child(
className='android.widget.ImageView', index=2 )
getinfo = self.Gender( d, path )
if getinfo == None:
rank = '非会员'
# print('不是会员')
elif getinfo[2] > 200: # (68, 164, 238)蓝色大众会员 (140, 142, 185)黑色砖石会员 (255, 197, 30)黄金会员
rank = '大众会员'
elif getinfo[0] > 200:
rank = '黄金会员'
elif getinfo[0] == 140 and getinfo[1] == 142 and getinfo[2] == 185:
rank = '白金会员'
else:
rank = '钻石会员'
# print('=====================================%s==================================================='%rank)
if d( className='android.widget.ListView' ).child( className='android.widget.FrameLayout' ).child(
className='android.widget.TextView', index=0 ).exists:
nickname = \
d( className='android.widget.ListView' ).child( className='android.widget.FrameLayout' ).child(
className='android.widget.TextView', index=0 ).info['text'] # 要保存的昵称
else:
nickname = '空'
# print('=============================%s=============================================================='%nickname)
z.heartbeat( )
if d( text='支付宝账户' ).exists:
for t in range( 0, 14 ):
if publicpath.child( className='android.widget.LinearLayout', index=t ).child(
text='支付宝账户' ).exists:
break
account = publicpath.child( className='android.widget.LinearLayout', index=t ).child(
className='android.widget.TextView',
index=1 ).info['text']
else:
account = '空'
z.heartbeat( )
# print('================================%s============================================================='%account)
if d( text='真实姓名' ).exists:
path = publicpath.child( className='android.widget.LinearLayout', index=1 ).child(
className='android.widget.TextView', index='1' )
getinfo = self.Gender( d, path )
if getinfo[0] > 200:
gender = '女' # 要保存的性别和是否认证
identity = '已实名'
elif getinfo[2] > 200:
gender = '男'
identity = '已实名'
else:
gender = '无'
identity = '未实名'
# print('==========================%s==============%s======================================================'%(gender,identity))
if identity == '已实名':
d( text='转账' ).click( )
if d( textContains='对方账户存在异常' ).exists:
accountStatus = "异常"
d( text='确定' ).click( )
else:
realnameStr = \
d( className='android.widget.ScrollView' ).child(
className='android.widget.LinearLayout',
resourceId='com.alipay.mobile.transferapp:id/tf_receive_area' ).child(
className='android.widget.TextView' ).info['text']
a = realnameStr.find( '(' )
b = realnameStr.find( ')' )
if a != -1 and b != -1:
realname = realnameStr[a + 2:b]
else:
realname = realnameStr
# realname = d(className='android.widget.ScrollView').child(className='android.widget.LinearLayout',index=0).child(className='android.widget.RelativeLayout',index=1).child(className='android.widget.TextView').info['text']
if d( textContains='对方长时间未使用支付宝' ).exists:
accountStatus = "非常用"
else:
accountStatus = "常用"
d( description='返回' ).click( )
else:
realname = '无'
# print('=========================%s====================================================================='%realname)
if d( text='显示更多' ).exists:
d( text='显示更多' ).click( )
z.sleep( 1 )
if not d( text='收起' ).exists:
d.swipe( width / 2, height * 3 / 4, width / 2, height / 3 )
z.heartbeat( )
if d( text='地区' ).exists:
area = publicpath.child( className='android.widget.LinearLayout', index=2 ).child(
className='android.widget.TextView', index='1' ).info['text']
else:
area = '空'
# print('=========================%s====================================================================='%area)
if d( text='星座' ).exists: # 星座
zodiac = d( textContains='座', index=1 ).info['text']
else:
zodiac = '空'
# print('=============================%s================================================================='%zodiac)
if d( text='年龄' ).exists:
for t in range( 1, 14 ):
if publicpath.child( className='android.widget.LinearLayout', index=t ).child(
text='年龄' ).exists:
break
age = publicpath.child( className='android.widget.LinearLayout', index=t ).child(
className='android.widget.TextView',
index=1 ).info['text']
else:
age = '空'
# print('=================================%s============================================================='%age)
if d( text='身高' ).exists:
for t in range( 1, 14 ):
if publicpath.child( className='android.widget.LinearLayout', index=t ).child(
text='身高' ).exists:
break
tall = publicpath.child( className='android.widget.LinearLayout', index=t ).child(
className='android.widget.TextView',
index=1 ).info['text']
else:
tall = '空'
# print('==========================%s===================================================================='%tall)
if d( text='体重' ).exists:
for t in range( 1, 14 ):
if publicpath.child( className='android.widget.LinearLayout', index=t ).child(
text='体重' ).exists:
break
weight = publicpath.child( className='android.widget.LinearLayout', index=t ).child(
className='android.widget.TextView',
index=1 ).info['text']
else:
weight = '空'
# print('=============================%s================================================================='%weight)
z.heartbeat( )
if d( text='职业' ).exists:
for t in range( 1, 14 ):
if publicpath.child( className='android.widget.LinearLayout', index=t ).child(
text='职业' ).exists:
break
carrer = publicpath.child( className='android.widget.LinearLayout', index=t ).child(
className='android.widget.TextView',
index=1 ).info['text']
else:
carrer = '空'
# print('=============================%s================================================================='%carrer)
z.heartbeat( )
if d( text='收入' ).exists:
for t in range( 1, 14 ):
if publicpath.child( className='android.widget.LinearLayout', index=t ).child(
text='收入' ).exists:
break
income = publicpath.child( className='android.widget.LinearLayout', index=t ).child(
className='android.widget.TextView',
index=1 ).info['text']
else:
income = '空'
# print('===============================%s==============================================================='%income)
if d( text='兴趣爱好' ).exists:
for t in range( 1, 14 ):
if publicpath.child( className='android.widget.LinearLayout', index=t ).child(
text='兴趣爱好' ).exists:
break
idexx = 0
taste = [] # 将所有兴趣保存到集合里
z.heartbeat( )
while True:
interest = publicpath.child( className='android.widget.LinearLayout', index=t ).child(
className='android.view.View' ).child( className='android.widget.TextView',
index=idexx )
if interest.exists:
hobby = interest.info['text']
taste.append( hobby )
idexx = idexx + 1
else:
break
else:
taste = []
# print(taste)
z.heartbeat( )
para = {"phoneNumber": phoneNumber,
"x_11": nickname,
"x_12": realname, "x_13": gender,
"x_14": area, "x_15": age,
"x_16": accountStatus,
"x_17": accountCount,
"x_01": "AliPay", "x_02": rank,
"x_03": account, "x_04": zodiac,
"x_05": identity, "x_06": tall,
"x_07": weight, "x_08": carrer,
"x_09": income, "x_10": taste}
self.repo.PostInformation( args["repo_information_id"], para )
for number in numbers:
if number["name"] == phoneNumber:
self.repo.uploadNumberALiPay( number["number"], args['repo_cate_id'], "checked" )
break
z.toast( "%s入库完成" % phoneNumber )
i = i + 1
d( description='返回' ).click( )
except:
logging.exception("exception")
for number in numbers:
if number["name"] == phoneNumber:
self.repo.uploadNumberALiPay( number["number"], args['repo_cate_id'], "unknown" )
break
else:
if change==0:
d.server.adb.cmd("shell", "am broadcast -a com.zunyun.zime.toast --es msg \"通讯录内没有好友\"" ).communicate()
z.sleep(10)
return
clickCondition = d(className='android.widget.ListView')
obj = clickCondition.info
obj = obj['visibleBounds']
top = int(obj['top'])
bottom = int(obj['bottom'])
y = bottom - top
d.swipe(width / 2, y, width / 2, 0)
zz = i+2
for k in range(1,10):
obj2 = d(className='android.widget.ListView').child(className='android.widget.LinearLayout', index=zz) \
.child(className='android.widget.LinearLayout').child(className='android.widget.TextView', index=0) #结束判断条件
if obj2.exists:
phone = obj2.info['text']
if phone in set1: # 结束条件,如果
if (args["time_delay"]):
for number in numbers:
if not number['name'] in set1:
self.repo.uploadNumberALiPay( number['number'], args['repo_cate_id'], "not_exist" )
z.sleep(int(args["time_delay"]))
return
else:
break
else:
zz = zz-1
continue
obj1 =d(className='android.widget.ListView').child(className='android.widget.LinearLayout', index=0) \
.child(className='android.widget.LinearLayout').child(className='android.widget.TextView', index=0)
if obj1.exists: # 实现精准滑动后有的能显示第0列的电话号码,有的显示不出来
i = 0
continue
else:
i = 1
continue
def getPluginClass():
return AlipayDepostIII
if __name__ == "__main__":
import sys
reload(sys)
sys.setdefaultencoding('utf8')
clazz = getPluginClass()
o = clazz()
d = Device("c0e5994f")
z = ZDevice("c0e5994f")
# z.toast("开始重装支付宝APP")
# z.cmd("shell", "pm uninstall com.eg.android.AlipayGphone")
# z.cmd("shell", "su -c 'rm -rf /data/data/com.eg.android.AlipayGphone'")
# z.cmd("install", "/home/zunyun/alipay.apk")
#z.server.install();
# d.server.adb.cmd("shell", "pm clear com.eg.android.AlipayGphone").communicate() # 清除缓存
# if d(textContains='今天操作太频繁了').exists: # 操作频繁,清空账号信息,重新注册
# # z.cmd("shell", "pm clear com.eg.android.AlipayGphone") # 清除缓存
# z.generateSerial()
# d.server.adb.cmd("shell", "ime set com.zunyun.qk/.ZImeService").wait()
args = {"repo_information_id":"219","contact_wait":"10","repo_cate_id":"264",'number_count':'50',"random_name":"是","clear":"是", "intervalTime": "120","time_delay": "3"}; #cate_id是仓库号,length是数量
o.action(d, z,args)
| [
"[email protected]"
] | |
644400cc50052b08c364c1f2f950b52d631c448a | cd5746f8cc7aee1f20606a65b4fae0d5e8ee78dc | /Python Books/Athena/training/exercises/exercises/software_craftsmanship/fancy_math/fancy_math_solution.py | f6812946aeb6e236e271e1b69504b96c23643970 | [] | no_license | theGreenJedi/Path | df24fca355590efef0c6cb5c52e7216c6b5d2464 | b5ed2805dbb046480929e49e550bfd8af5bb4d6f | refs/heads/master | 2023-07-27T14:23:37.694546 | 2021-07-16T01:38:55 | 2021-07-16T01:38:55 | 87,686,563 | 8 | 2 | null | 2023-07-11T22:49:03 | 2017-04-09T05:57:30 | Jupyter Notebook | UTF-8 | Python | false | false | 1,523 | py | """ Test Driven Development Example
Write a function called `slope` that calculates the slope
between two points. A point is specified by a two element
sequence of (x,y) coordinates.
>>> pt1 = [0.0, 0.0]
>>> pt2 = [1.0, 2.0]
>>> slope(pt1, pt2)
2.0
Use Test Driven Development. Write your tests first in
a separate file called tests_fancy_math.py.
Run your tests using the "nosetests" shell command. You can
do this by changing to the "slope" directory where your
fancy_math.py is defined and running "nosetests". From IPython,
you can run it like this:
In [1]: cd <someplace>/exercises/slope
In [2]: !nosestests
...
--------------------------------------------------
Ran 3 tests in 0.157s
If you would like to see more verbose output, use the "-v"
option:
In [3]: !nosetests -v
test_fancy_math.test_slope_xxx ... ok
test_fancy_math.test_slope_yyy ... ok
...
By default, nose captures all output and does not print it
to the screen. If you would like to see the output of print
statements, use the "-s" flag.
"""
from __future__ import division
from numpy import Inf
def slope(pt1, pt2):
dy = pt2[1] - pt1[1]
dx = pt2[0] - pt1[0]
try:
slope = dy/dx
except ZeroDivisionError:
if dy > 0:
slope = Inf
else:
slope = -Inf
return slope
| [
"[email protected]"
] | |
25c5217e5d12a8af8f78be0b6b050e94d56d9462 | 7e53ebbbe711bf6554774b96abb94354c0eb1c3a | /src/arcresthelper/portalautomation.py | a225035bccd9115825ee70423f79bd4d19618ee4 | [
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] | permissive | lpsnyd/ArcREST | 0ac59f5d70a97fa603769b6bb7fa8f8f2529497a | b0366ced8cd20e59431373211aa484409c5359c0 | refs/heads/master | 2021-01-18T02:02:03.437015 | 2015-11-03T00:04:02 | 2015-11-03T00:04:02 | 45,482,870 | 0 | 0 | null | 2015-11-03T17:21:47 | 2015-11-03T17:21:47 | null | UTF-8 | Python | false | false | 24,821 | py | """
@author: ArcGIS for Utilities
@contact: [email protected]
@company: Esri
@version: 1.2
@description: Used to create reports, maps and apps
@requirements: Python 2.7.x, ArcGIS 10.2
@copyright: Esri, 2015
"""
import gc
import sys, os, datetime
import json
import csv
from arcpy import env
import publishingtools
import orgtools
import common
from securityhandlerhelper import securityhandlerhelper
try:
import solutionreporttools
from solutionreporttools import reporttools as ReportTools
from solutionreporttools import dataprep as DataPrep
reportToolsInstalled = True
except:
reportToolsInstalled = False
#----------------------------------------------------------------------
def trace():
"""
trace finds the line, the filename
and error message and returns it
to the user
"""
import traceback, inspect
tb = sys.exc_info()[2]
tbinfo = traceback.format_tb(tb)[0]
filename = inspect.getfile(inspect.currentframe())
# script name + line number
line = tbinfo.split(", ")[1]
# Get Python syntax error
#
synerror = traceback.format_exc().splitlines()[-1]
return line, filename, synerror
class portalautomation(securityhandlerhelper):
_log = None
def setLog(self,log_file):
if log_file is None:
log_file = "log.txt"
self._log = common.init_log(log_file=log_file)
def __del__(self):
if self._log is not None:
common.close_log(log_file = self._log)
#----------------------------------------------------------------------
def stageContent(self,configFiles,dateTimeFormat=None):
results = None
groups = None
items = None
group = None
content = None
contentInfo = None
startTime = None
orgTools = None
if dateTimeFormat is None:
dateTimeFormat = '%Y-%m-%d %H:%M'
env.overwriteOutput = True
scriptStartTime = datetime.datetime.now()
try:
print "********************Stage Content Started********************"
print "Script started at %s" % scriptStartTime.strftime(dateTimeFormat)
if self.securityhandler.valid == False:
print "Login required"
else:
orgTools = orgtools.orgtools(securityinfo=self)
if orgTools is None:
print "Error creating org tools"
else:
for configFile in configFiles:
config = common.init_config_json(config_file=configFile)
if config is not None:
if 'ContentItems' in config:
startTime = datetime.datetime.now()
print "Processing config %s, starting at: %s" % (configFile,startTime.strftime(dateTimeFormat))
contentInfo = config['ContentItems']
for cont in contentInfo:
content = cont['Content']
group = cont['ShareToGroup']
print "Sharing content to: %s" % group
if os.path.isfile(content):
with open(content, 'rb') as csvfile:
items = []
groups = []
for row in csv.DictReader(csvfile,dialect='excel'):
if cont['Type'] == "Group":
groups.append(row['id'])
elif cont['Type'] == "Items":
items.append(row['id'])
results = orgTools.shareItemsToGroup(shareToGroupName=group,items=items,groups=groups)
print "Config %s completed, time to complete: %s" % (configFile, str(datetime.datetime.now() - startTime))
else:
print "Config file missing ContentItems section"
else:
print "Config %s not found" % configFile
except(TypeError,ValueError,AttributeError),e:
print e
except (common.ArcRestHelperError),e:
print "error in function: %s" % e[0]['function']
print "error on line: %s" % e[0]['line']
print "error in file name: %s" % e[0]['filename']
print "with error message: %s" % e[0]['synerror']
if 'arcpyError' in e[0]:
print "with arcpy message: %s" % e[0]['arcpyError']
except Exception as e:
if (reportToolsInstalled):
if isinstance(e,(ReportTools.ReportToolsError,DataPrep.DataPrepError)):
print "error in function: %s" % e[0]['function']
print "error on line: %s" % e[0]['line']
print "error in file name: %s" % e[0]['filename']
print "with error message: %s" % e[0]['synerror']
if 'arcpyError' in e[0]:
print "with arcpy message: %s" % e[0]['arcpyError']
else:
line, filename, synerror = trace()
print "error on line: %s" % line
print "error in file name: %s" % filename
print "with error message: %s" % synerror
else:
line, filename, synerror = trace()
print "error on line: %s" % line
print "error in file name: %s" % filename
print "with error message: %s" % synerror
finally:
print "Script complete, time to complete: %s" % str(datetime.datetime.now() - scriptStartTime)
print "###############Stage Content Completed#################"
print ""
#if orgTools is not None:
#orgTools.dispose()
results = None
groups = None
items = None
group = None
content = None
contentInfo = None
startTime = None
orgTools = None
del results
del groups
del items
del group
del content
del contentInfo
del startTime
del orgTools
gc.collect()
#----------------------------------------------------------------------
def createGroups(self,configFiles,dateTimeFormat=None):
groupInfo = None
groupFile = None
iconPath = None
startTime = None
thumbnail = None
result = None
config = None
sciptPath = None
orgTools = None
if dateTimeFormat is None:
dateTimeFormat = '%Y-%m-%d %H:%M'
env.overwriteOutput = True
scriptStartTime = datetime.datetime.now()
try:
print "********************Create Groups********************"
print "Script started at %s" % scriptStartTime.strftime(dateTimeFormat)
if self.securityhandler.valid == False:
print "Login required"
else:
orgTools = orgtools.orgtools(securityinfo=self)
if orgTools is None:
print "Error creating orgtools"
else:
for configFile in configFiles:
config = common.init_config_json(config_file=configFile)
if config is not None:
startTime = datetime.datetime.now()
print "Processing config %s, starting at: %s" % (configFile,startTime.strftime(dateTimeFormat))
groupInfo = config['Groups']
groupFile = groupInfo['GroupInfo']
iconPath = groupInfo['IconPath']
if os.path.isfile(groupFile):
with open(groupFile, 'rb') as csvfile:
for row in csv.DictReader(csvfile,dialect='excel'):
if os.path.isfile(os.path.join(iconPath,row['thumbnail'])):
thumbnail = os.path.join(iconPath,row['thumbnail'])
if not os.path.isabs(thumbnail):
sciptPath = os.getcwd()
thumbnail = os.path.join(sciptPath,thumbnail)
result = orgTools.createGroup(title=row['title'],description=row['description'],tags=row['tags'],snippet=row['snippet'],phone=row['phone'],access=row['access'],sortField=row['sortField'],sortOrder=row['sortOrder'], \
isViewOnly=row['isViewOnly'],isInvitationOnly=row['isInvitationOnly'],thumbnail=thumbnail)
else:
result = orgTools.createGroup(title=row['title'],description=row['description'],tags=row['tags'],snippet=row['snippet'],phone=row['phone'],access=row['access'],sortField=row['sortField'],sortOrder=row['sortOrder'], \
isViewOnly=row['isViewOnly'],isInvitationOnly=row['isInvitationOnly'])
if result is None:
pass
else:
print "Group created: " + result.title
print "Config %s completed, time to complete: %s" % (configFile, str(datetime.datetime.now() - startTime))
else:
print "Config %s not found" % configFile
except(TypeError,ValueError,AttributeError),e:
print e
except (common.ArcRestHelperError),e:
print "error in function: %s" % e[0]['function']
print "error on line: %s" % e[0]['line']
print "error in file name: %s" % e[0]['filename']
print "with error message: %s" % e[0]['synerror']
if 'arcpyError' in e[0]:
print "with arcpy message: %s" % e[0]['arcpyError']
except Exception as e:
if (reportToolsInstalled):
if isinstance(e,(ReportTools.ReportToolsError,DataPrep.DataPrepError)):
print "error in function: %s" % e[0]['function']
print "error on line: %s" % e[0]['line']
print "error in file name: %s" % e[0]['filename']
print "with error message: %s" % e[0]['synerror']
if 'arcpyError' in e[0]:
print "with arcpy message: %s" % e[0]['arcpyError']
else:
line, filename, synerror = trace()
print "error on line: %s" % line
print "error in file name: %s" % filename
print "with error message: %s" % synerror
else:
line, filename, synerror = trace()
print "error on line: %s" % line
print "error in file name: %s" % filename
print "with error message: %s" % synerror
finally:
print "Script complete, time to complete: %s" % str(datetime.datetime.now() - scriptStartTime)
print "###############Create Groups Completed#################"
print ""
#if orgTools is not None:
#orgTools.dispose()
groupInfo = None
groupFile = None
iconPath = None
startTime = None
thumbnail = None
result = None
config = None
sciptPath = None
orgTools = None
del groupInfo
del groupFile
del iconPath
del startTime
del thumbnail
del result
del config
del sciptPath
del orgTools
gc.collect()
#----------------------------------------------------------------------
def publishfromconfig(self,configFiles,combinedApp=None,dateTimeFormat=None):
publishTools = None
webmaps = None
config = None
resultsItems = None
resultFS = None
resultMaps = None
resultApps = None
combinedResults = None
if dateTimeFormat is None:
dateTimeFormat = '%Y-%m-%d %H:%M'
env.overwriteOutput = True
scriptStartTime = datetime.datetime.now()
try:
webmaps = []
print "********************Script Started********************"
print "Script started at %s" % scriptStartTime.strftime(dateTimeFormat)
# start report processing (moved out from under ArcREST logic. no AGO crednetials needed to run reports)
for configFile in configFiles:
config = common.init_config_json(config_file=configFile)
if config is not None:
if 'ReportDetails' in config:
if reportToolsInstalled == False:
print "Report section is included in the config file but the solutionreporttools cannot be located"
else:
reportConfig = config['ReportDetails']
# This code checks to see if you want to export the data from SDE to a local GDB. The parameter is set in config file.
# Could be performance gain to run locally. If you choose this option, both the report and the data prep in memory config
# are modified so they can point to the local temp location.
if 'RunReport' in reportConfig and (str(reportConfig['RunReport']).upper() =="TRUE" or str(reportConfig['RunReport']).upper() =="YES"):
reportConfig = ReportTools.reportDataPrep(reportConfig)
print "-----Report Section Starting-----"
startTime = datetime.datetime.now()
print "Processing reports in config %s, starting at: %s" % (configFile,startTime.strftime(dateTimeFormat))
ReportTools.create_report_layers_using_config(config=reportConfig)
print "Reports in config %s completed, time to complete: %s" % (configFile, str(datetime.datetime.now() - startTime))
print "-----Report Section Complete-----"
if 'PublishingDetails' in config:
publishingConfig = config['PublishingDetails']
if 'PublishData' in publishingConfig:
publishData = publishingConfig['PublishData']
else:
print "PublishingDetails is missing the PublishData parameter: type string, values, True or False"
publishData = 'TRUE'
if (str(publishData).upper() =="TRUE" or str(publishData).upper() =="YES"):
print " "
print "-----Publishing Section Starting-----"
startTime = datetime.datetime.now()
print "Processing publishing in config %s, starting at: %s" % (configFile,startTime.strftime(dateTimeFormat))
publishTools = publishingtools.publishingtools(securityinfo=self)
if publishTools.valid == False :
print "Error creating publishing tools: %s" % publishTools.message
else:
print "Publishing tools created: %s" % publishTools.message
resultFS = []
if 'Items' in publishingConfig:
startSectTime = datetime.datetime.now()
print " "
print "Creating Items: %s" % str(startSectTime.strftime(dateTimeFormat))
resultsItems = publishTools.publishItems(items_info=publishingConfig['Items'])
print "Items created, time to complete: %s" % str(datetime.datetime.now() - startSectTime)
if 'FeatureCollections' in publishingConfig:
startSectTime = datetime.datetime.now()
print " "
print "Creating Feature Collection: %s" % str(startSectTime.strftime(dateTimeFormat))
resultFS = publishTools.publishFeatureCollections(configs=publishingConfig['FeatureCollections'])
print "Feature Collection published, time to complete: %s" % str(datetime.datetime.now() - startSectTime)
if 'FeatureServices' in publishingConfig:
startSectTime = datetime.datetime.now()
print " "
print "Creating Feature Services: %s" % str(startSectTime.strftime(dateTimeFormat))
resultFS = resultFS + publishTools.publishFsFromMXD(fs_config=publishingConfig['FeatureServices'])
print "Feature Services published, time to complete: %s" % str(datetime.datetime.now() - startSectTime)
if 'ExistingServices' in publishingConfig:
startSectTime = datetime.datetime.now()
print " "
print "Updating Existing Feature Services: %s" % str(startSectTime.strftime(dateTimeFormat))
resultES = publishTools.updateFeatureService(efs_config=publishingConfig['ExistingServices'])
print "Updating Existing Feature Services completed, time to complete: %s" % str(datetime.datetime.now() - startSectTime)
if 'MapDetails' in publishingConfig:
startSectTime = datetime.datetime.now()
print " "
print "Creating maps: %s" % str(startSectTime.strftime(dateTimeFormat))
resultMaps = publishTools.publishMap(maps_info=publishingConfig['MapDetails'],fsInfo=resultFS,itInfo=resultsItems)
for maps in resultMaps:
if 'MapInfo' in maps:
if 'Results' in maps['MapInfo']:
if 'itemId' in maps['MapInfo']['Results']:
webmaps.append(maps['MapInfo']['Results']['itemId'])
print "Creating maps completed, time to complete: %s" % str(datetime.datetime.now() - startSectTime)
if 'AppDetails' in publishingConfig:
startSectTime = datetime.datetime.now()
print " "
print "Creating apps: %s" % str(startSectTime.strftime(dateTimeFormat))
resultApps = publishTools.publishApp(app_info=publishingConfig['AppDetails'],map_info=resultMaps)
print "Creating apps completed, time to complete: %s" % str(datetime.datetime.now() - startSectTime)
print "Publishing complete in config %s completed, time to complete: %s" % (configFile, str(datetime.datetime.now() - startTime))
print "-----Publishing Section Complete-----"
else:
print "Config %s not found" % configFile
if combinedApp:
if os.path.exists(combinedApp):
print " "
startSectTime = datetime.datetime.now()
print "Creating combined result: %s" % str(startSectTime.strftime(dateTimeFormat))
config = common.init_config_json(config_file=combinedApp)
combinedResults = publishTools.publishCombinedWebMap(maps_info=config['PublishingDetails']['MapDetails'],webmaps=webmaps)
if 'PublishingDetails' in config:
publishingConfig = config['PublishingDetails']
if 'PublishData' in publishingConfig:
publishData = publishingConfig['PublishData']
else:
print "PublishingDetails is missing the PublishData parameter: type string, values, True or False"
publishData = 'TRUE'
if (str(publishData).upper() =="TRUE" or str(publishData).upper() =="YES"):
if 'AppDetails' in publishingConfig:
resultApps = publishTools.publishApp(app_info=publishingConfig['AppDetails'],map_info=combinedResults)
print "Creating combind result completed, time to complete: %s" % str(datetime.datetime.now() - startSectTime)
except(TypeError,ValueError,AttributeError),e:
print e
except (common.ArcRestHelperError),e:
print "error in function: %s" % e[0]['function']
print "error on line: %s" % e[0]['line']
print "error in file name: %s" % e[0]['filename']
print "with error message: %s" % e[0]['synerror']
if 'arcpyError' in e[0]:
print "with arcpy message: %s" % e[0]['arcpyError']
except Exception as e:
if (reportToolsInstalled):
if isinstance(e,(ReportTools.ReportToolsError,DataPrep.DataPrepError)):
print "error in function: %s" % e[0]['function']
print "error on line: %s" % e[0]['line']
print "error in file name: %s" % e[0]['filename']
print "with error message: %s" % e[0]['synerror']
if 'arcpyError' in e[0]:
print "with arcpy message: %s" % e[0]['arcpyError']
else:
line, filename, synerror = trace()
print "error on line: %s" % line
print "error in file name: %s" % filename
print "with error message: %s" % synerror
else:
line, filename, synerror = trace()
print "error on line: %s" % line
print "error in file name: %s" % filename
print "with error message: %s" % synerror
finally:
print "Script complete, time to complete: %s" % str(datetime.datetime.now() - scriptStartTime)
print "###############Script Completed#################"
print ""
if publishTools is not None:
publishTools.dispose()
publishTools = None
webmaps = None
config = None
resultFS = None
resultsItems = None
resultMaps = None
resultApps = None
combinedResults = None
del publishTools
del webmaps
del config
del resultFS
del resultMaps
del resultApps
del combinedResults
del resultsItems
gc.collect()
| [
"[email protected]"
] | |
387e099c4bd771eae8b41a1b0168202680e46074 | 31747dd8c61085421d7bd4166f7bd4f9429cf914 | /tests/test_visitors/test_tokenize/test_keywords/test_starts_with_dot.py | 71c475e0052d8cbb4394f153df8ed6fd363f4019 | [
"MIT"
] | permissive | edytagarbarz/wemake-python-styleguide | 0e9ed4080a13a6727b8e80785e113b8407409352 | 74b86156d73c2a4fe9c755138f6953fec41fab3b | refs/heads/master | 2021-03-03T19:21:54.807089 | 2020-03-07T23:35:15 | 2020-03-07T23:35:15 | 245,981,718 | 1 | 1 | MIT | 2020-03-09T08:31:55 | 2020-03-09T08:31:54 | null | UTF-8 | Python | false | false | 1,483 | py | import pytest
from wemake_python_styleguide.violations.consistency import (
LineStartsWithDotViolation,
)
from wemake_python_styleguide.visitors.tokenize.syntax import (
WrongKeywordTokenVisitor,
)
# Correct:
correct_dot_attr = """
some_line = some.attr(
some.other,
)
"""
correct_elipsis = """
first[
1,
...,
]
"""
correct_string_dot = '".start!"'
# Wrong:
wrong_dot_start1 = """
some = (
MyModel.objects.filter(some=1)
.exclude(other=2)
)
"""
wrong_dot_start2 = """
some = (
MyModel.objects.filter(some=1)
.exclude(other=2)
)
"""
@pytest.mark.parametrize('code', [
wrong_dot_start1,
wrong_dot_start2,
])
def test_wrong_dot_start(
parse_tokens,
assert_errors,
default_options,
code,
):
"""Ensures that lines cannot be started with ``.`` char."""
file_tokens = parse_tokens(code)
visitor = WrongKeywordTokenVisitor(
default_options, file_tokens=file_tokens,
)
visitor.run()
assert_errors(visitor, [LineStartsWithDotViolation])
@pytest.mark.parametrize('code', [
correct_dot_attr,
correct_elipsis,
correct_string_dot,
])
def test_correct_dot_start(
parse_tokens,
assert_errors,
default_options,
code,
):
"""Ensures that lines can be started with other chars."""
file_tokens = parse_tokens(code)
visitor = WrongKeywordTokenVisitor(
default_options, file_tokens=file_tokens,
)
visitor.run()
assert_errors(visitor, [])
| [
"[email protected]"
] | |
14b359d92f3e1da24aa1a431953c9de91141cae3 | 449f6888bff99d7e4fd86fa6ffa6b3316084e34e | /Solutions/248.py | febd366352a6f0c6d1e9f9292f83bc6b4f1906f7 | [
"MIT"
] | permissive | All3yp/Daily-Coding-Problem-Solutions | e94679a5858b8a83ffe58d14b824fe80de21a694 | 199b9606474edb45bd14b20b511b691ada437586 | refs/heads/master | 2023-03-18T21:06:30.675503 | 2021-03-13T03:52:31 | 2021-03-13T03:52:31 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 465 | py | """
Problem:
Find the maximum of two numbers without using any if-else statements, branching, or
direct comparisons.
"""
def get_max(num1: int, num2: int) -> int:
return num1 ^ ((num1 ^ num2) & -(num1 < num2))
if __name__ == "__main__":
print(get_max(1, 5))
print(get_max(4, 3))
print(get_max(-3, 6))
print(get_max(5, -4))
print(get_max(-4, -2))
print(get_max(-3, -6))
"""
SPECS:
TIME COMPLEXITY: O(1)
SPACE COMPLEXITY: O(1)
"""
| [
"[email protected]"
] | |
7af9d20303a6cb0534c7e8fa34538d9028d47d3a | e40381a0aa3320616e5a5b82533c2c5cfe0fa2ce | /Dark_Scripts/plot_ValidationScores-LoopSeeds.py | 40dc9786c4c96c2cd21b3edd21e4a9bd9de57c82 | [
"MIT"
] | permissive | zmlabe/predictGMSTrate | 7220b26f86839699635fe2f04e45348095183bc7 | ac4238c7f1c33dc9d30382e4dbdc26a2f63352f5 | refs/heads/main | 2023-04-10T03:46:16.053123 | 2023-01-11T14:08:27 | 2023-01-11T14:08:27 | 396,942,451 | 4 | 3 | MIT | 2022-01-19T22:15:19 | 2021-08-16T19:30:55 | Python | UTF-8 | Python | false | false | 5,612 | py | """
Create plots to show validation scores for different seeds
Author : Zachary M. Labe
Date : 27 September 2021
Version : 2 (mostly for testing)
"""
### Import packages
import sys
import matplotlib.pyplot as plt
import matplotlib.colors as c
import numpy as np
### Plotting defaults
plt.rc('text',usetex=True)
plt.rc('font',**{'family':'sans-serif','sans-serif':['Avant Garde']})
def adjust_spines(ax, spines):
for loc, spine in ax.spines.items():
if loc in spines:
spine.set_position(('outward', 5))
else:
spine.set_color('none')
if 'left' in spines:
ax.yaxis.set_ticks_position('left')
else:
ax.yaxis.set_ticks([])
if 'bottom' in spines:
ax.xaxis.set_ticks_position('bottom')
else:
ax.xaxis.set_ticks([])
### Hyperparamters for files of the ANN model
rm_ensemble_mean = True
COUNTER = 100
directorydata = '/Users/zlabe/Documents/Research/GmstTrendPrediction/Data/LoopSeeds/'
directoryfigure = '/Users/zlabe/Desktop/GmstTrendPrediction/ANN_v2/Scores/'
### Read in seeds
seeds = np.load(directorydata + 'LoopSeedsResultsfor_ANNv2_OHC100_hiatus_EnsembleMeanRemoved_SEEDS.npz')
random_segment_seedq = seeds['random_segment_seedall']
random_network_seedq = seeds['random_network_seedall']
accval = np.empty((COUNTER))
precval = np.empty((COUNTER))
recallval = np.empty((COUNTER))
f1val = np.empty((COUNTER))
for lo in range(COUNTER):
if rm_ensemble_mean == True:
vari_predict = ['OHC100']
fac = 0.7
random_segment_seed = random_segment_seedq[lo]
random_network_seed = random_network_seedq[lo]
hidden = [30,30]
n_epochs = 500
batch_size = 128
lr_here = 0.001
ridgePenalty = 0.5
actFun = 'relu'
fractWeight = np.arange(0.1,1.2,0.1)
yearsall = np.arange(1990,2099+1,1)
else:
print(ValueError('SOMETHING IS WRONG WITH DATA PROCESSING!'))
sys.exit()
### Naming conventions for files
savename = 'LoopSeedsResultsfor_ANNv2_'+vari_predict[0]+'_hiatus_' + actFun + '_L2_'+ str(ridgePenalty)+ '_LR_' + str(lr_here)+ '_Batch'+ str(batch_size)+ '_Iters' + str(n_epochs) + '_' + str(len(hidden)) + 'x' + str(hidden[0]) + '_SegSeed' + str(random_segment_seed) + '_NetSeed'+ str(random_network_seed)
if(rm_ensemble_mean==True):
savename = savename + '_EnsembleMeanRemoved'
scores = np.load(directorydata + savename + '_SCORES_%s.npz' % lo)
accval[lo] = scores['accval']
precval[lo] = scores['precval']
recallval[lo] = scores['recallval']
f1val[lo] = scores['f1_val']
### Gather data and place percent
alldata = np.asarray([accval,precval,recallval,f1val]) * 100
###############################################################################
###############################################################################
###############################################################################
###############################################################################
###############################################################################
###############################################################################
### Graph for scores
fig = plt.figure()
ax = plt.subplot(111)
plotdata = alldata.transpose()
adjust_spines(ax, ['left', 'bottom'])
ax.spines['top'].set_color('none')
ax.spines['right'].set_color('none')
ax.spines['left'].set_color('dimgrey')
ax.spines['bottom'].set_color('none')
ax.spines['left'].set_linewidth(2)
ax.tick_params('both',length=4,width=2,which='major',color='dimgrey')
ax.tick_params(axis="x",which="both",bottom = False,top=False,
labelbottom=False)
ax.yaxis.grid(zorder=1,color='darkgrey',alpha=0.7,clip_on=False,linewidth=0.5)
def set_box_color(bp, color):
plt.setp(bp['boxes'],color=color)
plt.setp(bp['whiskers'], color=color,linewidth=1.5)
plt.setp(bp['caps'], color='w',alpha=0)
plt.setp(bp['medians'], color='w',linewidth=1.5)
positionsq = np.array(range(alldata.shape[0]))
bpl = plt.boxplot(plotdata,positions=positionsq,widths=0.6,
patch_artist=True,sym='')
# Modify boxes
cp= 'maroon'
set_box_color(bpl,cp)
plt.plot([], c=cp, label=r'\textbf{VALIDATION}',clip_on=False)
leg = plt.legend(shadow=False,fontsize=11,loc='upper center',
bbox_to_anchor=(0.5,1.14),fancybox=True,ncol=4,frameon=False,
handlelength=5,handletextpad=1)
for line,text in zip(leg.get_lines(), leg.get_texts()):
text.set_color(line.get_color())
for i in range(plotdata.shape[1]):
y = plotdata[:,i]
x = np.random.normal(positionsq[i], 0.04, size=len(y))
plt.plot(x, y,color='teal', alpha=0.5,zorder=10,marker='.',linewidth=0,markersize=5,markeredgewidth=0,clip_on=False)
plt.yticks(np.arange(0,101,10),list(map(str,np.round(np.arange(0,101,10),2))),
fontsize=6)
plt.ylim([10,90])
plt.text(-0.3,3,r'\textbf{ACCURACY}',fontsize=10,color='dimgrey',
ha='left',va='center')
plt.text(1.,3,r'\textbf{PRECISION}',fontsize=10,color='dimgrey',
ha='center',va='center')
plt.text(2.2,3,r'\textbf{RECALL}',fontsize=10,color='dimgrey',
ha='right',va='center')
plt.text(3.27,3,r'\textbf{F1-SCORE}',fontsize=10,color='dimgrey',
ha='right',va='center')
plt.ylabel(r'\textbf{Score [\%]}',color='k',fontsize=10)
if rm_ensemble_mean == True:
plt.savefig(directoryfigure + 'ValidationScores-LoopSeeds_Hiatus_EDA-v2_rmENSEMBLEmean.png',dpi=300)
else:
plt.savefig(directoryfigure + 'ValidationScores-LoopSeeds_Hiatus_EDA-v2.png',dpi=300) | [
"[email protected]"
] | |
d87d2fd7a1df348093b2c383f2a073227f39de42 | 4e691a59c67915d5e2cc5a367137dfb02894f4cc | /main/migrations/0005_auto_20191118_1131.py | f8539f0214cc916cee8f0cc19c14e6206fb9725e | [] | no_license | antocaroca/clase-4 | fd7395e25debfa807fde2c5823b968747c5d9222 | b88b78b022102156ba01cd4804307fafd3c6966b | refs/heads/master | 2022-04-30T14:48:03.361465 | 2019-11-23T13:22:33 | 2019-11-23T13:22:33 | 223,596,303 | 0 | 0 | null | 2022-04-22T22:53:32 | 2019-11-23T13:51:00 | Python | UTF-8 | Python | false | false | 406 | py | # Generated by Django 2.2.7 on 2019-11-18 14:31
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('main', '0004_activemanager'),
]
operations = [
migrations.AlterField(
model_name='productag',
name='products',
field=models.ManyToManyField(blank=True, to='main.Producto'),
),
]
| [
"[email protected]"
] | |
ee912e4cffffee0781c744461002eba138ced516 | 9e988c0dfbea15cd23a3de860cb0c88c3dcdbd97 | /sdBs/AllRun/sdssj9-10_233719.19+062504.0/sdB_sdssj9-10_233719.19+062504.0_lc.py | 0f09a4e3130e72011e6fee900f36b7ea6ab8c394 | [] | no_license | tboudreaux/SummerSTScICode | 73b2e5839b10c0bf733808f4316d34be91c5a3bd | 4dd1ffbb09e0a599257d21872f9d62b5420028b0 | refs/heads/master | 2021-01-20T18:07:44.723496 | 2016-08-08T16:49:53 | 2016-08-08T16:49:53 | 65,221,159 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 378 | py | from gPhoton.gAperture import gAperture
def main():
gAperture(band="NUV", skypos=[354.329958,6.417778], stepsz=30., csvfile="/data2/fleming/GPHOTON_OUTPU/LIGHTCURVES/sdBs/sdB_sdssj9-10_233719.19+062504.0/sdB_sdssj9-10_233719.19+062504.0_lc.csv", maxgap=1000., overwrite=True, radius=0.00555556, annulus=[0.005972227,0.0103888972], verbose=3)
if __name__ == "__main__":
main()
| [
"[email protected]"
] | |
1c6f272ebd1ff04f050865ce0bd785f5e92cc25c | af9268e1ead8cdb491868c14a2240d9e44fb3b56 | /last-minute-env/lib/python2.7/site-packages/django/db/migrations/executor.py | a19af992f2af78ad965ede2ad59e5a039ef25855 | [] | no_license | frosqh/Cousinade2017 | d5154c24c93ca8089eeba26b53c594e92cb6bd82 | c34d5707af02402bf2bb7405eddc91297da399ff | refs/heads/master | 2021-01-20T07:57:34.586476 | 2017-10-22T18:42:45 | 2017-10-22T18:42:45 | 90,074,802 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 17,835 | py | from __future__ import unicode_literals
from django.apps.registry import apps as global_apps
from django.db import migrations, router
from .exceptions import InvalidMigrationPlan
from .loader import MigrationLoader
from .recorder import MigrationRecorder
from .state import ProjectState
class MigrationExecutor(object):
"""
End-to-end migration execution - loads migrations, and runs them
up or down to a specified set of targets.
"""
def __init__(self, connection, progress_callback=None):
self.connection = connection
self.loader = MigrationLoader(self.connection)
self.recorder = MigrationRecorder(self.connection)
self.progress_callback = progress_callback
def migration_plan(self, targets, clean_start=False):
"""
Given a set of targets, returns a list of (Migration instance, backwards?).
"""
plan = []
if clean_start:
applied = set()
else:
applied = set(self.loader.applied_migrations)
for target in targets:
# If the target is (app_label, None), that means unmigrate everything
if target[1] is None:
for root in self.loader.graph.root_nodes():
if root[0] == target[0]:
for migration in self.loader.graph.backwards_plan(root):
if migration in applied:
plan.append((self.loader.graph.nodes[migration], True))
applied.remove(migration)
# If the migration is already applied, do backwards mode,
# otherwise do forwards mode.
elif target in applied:
# Don't migrate backwards all the way to the target node (that
# may roll back dependencies in other apps that don't need to
# be rolled back); instead roll back through target's immediate
# child(ren) in the same app, and no further.
next_in_app = sorted(
n for n in
self.loader.graph.node_map[target].children
if n[0] == target[0]
)
for node in next_in_app:
for migration in self.loader.graph.backwards_plan(node):
if migration in applied:
plan.append((self.loader.graph.nodes[migration], True))
applied.remove(migration)
else:
for migration in self.loader.graph.forwards_plan(target):
if migration not in applied:
plan.append((self.loader.graph.nodes[migration], False))
applied.add(migration)
return plan
def _create_project_state(self, with_applied_migrations=False):
"""
Create a project state including all the applications without
migrations and applied migrations if with_applied_migrations=True.
"""
state = ProjectState(real_apps=list(self.loader.unmigrated_apps))
if with_applied_migrations:
# Create the forwards plan Django would follow on an empty database
full_plan = self.migration_plan(self.loader.graph.leaf_nodes(), clean_start=True)
applied_migrations = {
self.loader.graph.nodes[key] for key in self.loader.applied_migrations
if key in self.loader.graph.nodes
}
for migration, _ in full_plan:
if migration in applied_migrations:
migration.mutate_state(state, preserve=False)
return state
def migrate(self, targets, plan=None, state=None, fake=False, fake_initial=False):
"""
Migrates the database up to the given targets.
Django first needs to create all project states before a migration is
(un)applied and in a second step run all the database operations.
"""
if plan is None:
plan = self.migration_plan(targets)
# Create the forwards plan Django would follow on an empty database
full_plan = self.migration_plan(self.loader.graph.leaf_nodes(), clean_start=True)
all_forwards = all(not backwards for mig, backwards in plan)
all_backwards = all(backwards for mig, backwards in plan)
if not plan:
if state is None:
# The resulting state should include applied migrations.
state = self._create_project_state(with_applied_migrations=True)
elif all_forwards == all_backwards:
# This should only happen if there's a mixed plan
raise InvalidMigrationPlan(
"Migration plans with both forwards and backwards migrations "
"are not supported. Please split your migration process into "
"separate plans of only forwards OR backwards migrations.",
plan
)
elif all_forwards:
if state is None:
# The resulting state should still include applied migrations.
state = self._create_project_state(with_applied_migrations=True)
state = self._migrate_all_forwards(state, plan, full_plan, fake=fake, fake_initial=fake_initial)
else:
# No need to check for `elif all_backwards` here, as that condition
# would always evaluate to true.
state = self._migrate_all_backwards(plan, full_plan, fake=fake)
self.check_replacements()
return state
def _migrate_all_forwards(self, state, plan, full_plan, fake, fake_initial):
"""
Take a list of 2-tuples of the form (migration instance, False) and
apply them in the order they occur in the full_plan.
"""
migrations_to_run = {m[0] for m in plan}
for migration, _ in full_plan:
if not migrations_to_run:
# We remove every migration that we applied from these sets so
# that we can bail out once the last migration has been applied
# and don't always run until the very end of the migration
# process.
break
if migration in migrations_to_run:
if 'apps' not in state.__dict__:
if self.progress_callback:
self.progress_callback("render_start")
state.apps # Render all -- performance critical
if self.progress_callback:
self.progress_callback("render_success")
state = self.apply_migration(state, migration, fake=fake, fake_initial=fake_initial)
migrations_to_run.remove(migration)
return state
def _migrate_all_backwards(self, plan, full_plan, fake):
"""
Take a list of 2-tuples of the form (migration instance, True) and
unapply them in reverse order they occur in the full_plan.
Since unapplying a migration requires the project state prior to that
migration, Django will compute the migration states before each of them
in a first run over the plan and then unapply them in a second run over
the plan.
"""
migrations_to_run = {m[0] for m in plan}
# Holds all migration states prior to the migrations being unapplied
states = {}
state = self._create_project_state()
applied_migrations = {
self.loader.graph.nodes[key] for key in self.loader.applied_migrations
if key in self.loader.graph.nodes
}
if self.progress_callback:
self.progress_callback("render_start")
for migration, _ in full_plan:
if not migrations_to_run:
# We remove every migration that we applied from this set so
# that we can bail out once the last migration has been applied
# and don't always run until the very end of the migration
# process.
break
if migration in migrations_to_run:
if 'apps' not in state.__dict__:
state.apps # Render all -- performance critical
# The state before this migration
states[migration] = state
# The old state keeps as-is, we continue with the new state
state = migration.mutate_state(state, preserve=True)
migrations_to_run.remove(migration)
elif migration in applied_migrations:
# Only mutate the state if the migration is actually applied
# to make sure the resulting state doesn't include changes
# from unrelated migrations.
migration.mutate_state(state, preserve=False)
if self.progress_callback:
self.progress_callback("render_success")
for migration, _ in plan:
self.unapply_migration(states[migration], migration, fake=fake)
applied_migrations.remove(migration)
# Generate the post migration state by starting from the state before
# the last migration is unapplied and mutating it to include all the
# remaining applied migrations.
last_unapplied_migration = plan[-1][0]
state = states[last_unapplied_migration]
for index, (migration, _) in enumerate(full_plan):
if migration == last_unapplied_migration:
for migration, _ in full_plan[index:]:
if migration in applied_migrations:
migration.mutate_state(state, preserve=False)
break
return state
def collect_sql(self, plan):
"""
Takes a migration plan and returns a list of collected SQL
statements that represent the best-efforts version of that plan.
"""
statements = []
state = None
for migration, backwards in plan:
with self.connection.schema_editor(collect_sql=True, atomic=migration.atomic) as schema_editor:
if state is None:
state = self.loader.project_state((migration.app_label, migration.name), at_end=False)
if not backwards:
state = migration.apply(state, schema_editor, collect_sql=True)
else:
state = migration.unapply(state, schema_editor, collect_sql=True)
statements.extend(schema_editor.collected_sql)
return statements
def apply_migration(self, state, migration, fake=False, fake_initial=False):
"""
Runs a migration forwards.
"""
if self.progress_callback:
self.progress_callback("apply_start", migration, fake)
if not fake:
if fake_initial:
# Test to see if this is an already-applied initial migration
applied, state = self.detect_soft_applied(state, migration)
if applied:
fake = True
if not fake:
# Alright, do it normally
with self.connection.schema_editor(atomic=migration.atomic) as schema_editor:
state = migration.apply(state, schema_editor)
# For replacement migrations, record individual statuses
if migration.replaces:
for app_label, name in migration.replaces:
self.recorder.record_applied(app_label, name)
else:
self.recorder.record_applied(migration.app_label, migration.name)
# Report progress
if self.progress_callback:
self.progress_callback("apply_success", migration, fake)
return state
def unapply_migration(self, state, migration, fake=False):
"""
Runs a migration backwards.
"""
if self.progress_callback:
self.progress_callback("unapply_start", migration, fake)
if not fake:
with self.connection.schema_editor(atomic=migration.atomic) as schema_editor:
state = migration.unapply(state, schema_editor)
# For replacement migrations, record individual statuses
if migration.replaces:
for app_label, name in migration.replaces:
self.recorder.record_unapplied(app_label, name)
else:
self.recorder.record_unapplied(migration.app_label, migration.name)
# Report progress
if self.progress_callback:
self.progress_callback("unapply_success", migration, fake)
return state
def check_replacements(self):
"""
Mark replacement migrations applied if their replaced set all are.
We do this unconditionally on every migrate, rather than just when
migrations are applied or unapplied, so as to correctly handle the case
when a new squash migration is pushed to a deployment that already had
all its replaced migrations applied. In this case no new migration will
be applied, but we still want to correctly maintain the applied state
of the squash migration.
"""
applied = self.recorder.applied_migrations()
for key, migration in self.loader.replacements.items():
all_applied = all(m in applied for m in migration.replaces)
if all_applied and key not in applied:
self.recorder.record_applied(*key)
def detect_soft_applied(self, project_state, migration):
"""
Tests whether a migration has been implicitly applied - that the
tables or columns it would create exist. This is intended only for use
on initial migrations (as it only looks for CreateModel and AddField).
"""
def should_skip_detecting_model(migration, model):
"""
No need to detect tables for proxy models, unmanaged models, or
models that can't be migrated on the current database.
"""
return (
model._meta.proxy or not model._meta.managed or not
router.allow_migrate(
self.connection.alias, migration.app_label,
model_name=model._meta.model_name,
)
)
if migration.initial is None:
# Bail if the migration isn't the first one in its app
if any(app == migration.app_label for app, name in migration.dependencies):
return False, project_state
elif migration.initial is False:
# Bail if it's NOT an initial migration
return False, project_state
if project_state is None:
after_state = self.loader.project_state((migration.app_label, migration.name), at_end=True)
else:
after_state = migration.mutate_state(project_state)
apps = after_state.apps
found_create_model_migration = False
found_add_field_migration = False
existing_table_names = self.connection.introspection.table_names(self.connection.cursor())
# Make sure all create model and add field operations are done
for operation in migration.operations:
if isinstance(operation, migrations.CreateModel):
model = apps.get_model(migration.app_label, operation.name)
if model._meta.swapped:
# We have to fetch the model to test with from the
# main app cache, as it's not a direct dependency.
model = global_apps.get_model(model._meta.swapped)
if should_skip_detecting_model(migration, model):
continue
if model._meta.db_table not in existing_table_names:
return False, project_state
found_create_model_migration = True
elif isinstance(operation, migrations.AddField):
model = apps.get_model(migration.app_label, operation.model_name)
if model._meta.swapped:
# We have to fetch the model to test with from the
# main app cache, as it's not a direct dependency.
model = global_apps.get_model(model._meta.swapped)
if should_skip_detecting_model(migration, model):
continue
table = model._meta.db_table
field = model._meta.get_field(operation.name)
# Handle implicit many-to-many tables created by AddField.
if field.many_to_many:
if field.remote_field.through._meta.db_table not in existing_table_names:
return False, project_state
else:
found_add_field_migration = True
continue
column_names = [
column.name for column in
self.connection.introspection.get_table_description(self.connection.cursor(), table)
]
if field.column not in column_names:
return False, project_state
found_add_field_migration = True
# If we get this far and we found at least one CreateModel or AddField migration,
# the migration is considered implicitly applied.
return (found_create_model_migration or found_add_field_migration), after_state
| [
"[email protected]"
] | |
a715fbf63f7649d806c5d3fabc06da2a4ecab666 | cc44edfa1edbedea3ad044805be7548e0ccba70d | /0x01-python-if_else_loops_functions/4-print_hexa.py | 6306e03ac91e63adc4b344e653568933bf78f701 | [] | no_license | set808/holbertonschool-higher_level_programming | 421f0da1f91cd56eb2daa4e07a51b4a505d53edc | eb276a4e68e5cc43498459eec78fc05f72e2cd48 | refs/heads/master | 2020-03-09T13:07:43.824914 | 2018-09-08T00:26:46 | 2018-09-08T00:26:46 | 128,802,718 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 83 | py | #!/usr/bin/python3
for i in range(0, 99):
print('{:d} = {:#02x}'.format(i, i))
| [
"[email protected]"
] | |
880d0dabd65d48c9ab7140cf7942a975a4dc87e3 | f5e7882e9fa8dca9b49d74819918063963eaf515 | /FILES/prob2.py | 09b48eba87e8bfcd175f537d161d9ee3a423e5a3 | [] | no_license | marri88/python-base | 66ede5e3da3bce92e2661fabf8a2119644dd5ab3 | 00002724412f4081ee6d1b91c22cb1ccb5ed92fe | refs/heads/master | 2023-06-09T16:13:08.921614 | 2021-07-02T09:16:38 | 2021-07-02T09:16:38 | 382,293,700 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 592 | py | # -week_2:Работа С файлами-
#####################################################################################
# first picture: prob №2
# Создайте файл users.txt.
# Напишите программу которая спрашивает у пользователя его Логин и Пароль и записывает в файл users.txt.
r = open('/home/aimira/python/python3/week2files/users.txt', 'w')
a = input("name: ")
b = input("password: ")
r.write(f"name: {a}, password: {b}")
r.close
with open('users.txt', 'r') as c:
print(c.read()) | [
"[email protected]"
] | |
86550fb12d249da7e19769464f8dd19eee7951c5 | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/otherforms/_sharpening.py | 511f6993ea47ed80907f9f44fb3fcbe24158ade6 | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 230 | py |
#calss header
class _SHARPENING():
def __init__(self,):
self.name = "SHARPENING"
self.definitions = sharpen
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.basic = ['sharpen']
| [
"[email protected]"
] | |
73418a4efc33f77e2110c61b4fae9a8d028f2537 | 14a26103e97b1669ca6f1f44996c4ad65b9f0179 | /bim1/week4/16.6.5.py | 714f4bcd5a24a3f05a9b49443eec7b91589b1e9e | [] | no_license | igorbragaia/CES-22 | 6ea0fc3da7ba2274080954071f0070ba78a7f1f4 | 7c09bdec315421e57f2cd44d50f919f3965144ac | refs/heads/master | 2021-09-17T14:40:31.426602 | 2018-07-02T18:54:40 | 2018-07-02T18:54:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,916 | py | import sys
def test(did_pass):
"""
Prints test result
:param did_pass: test result
:return:
"""
linenum = sys._getframe(1).f_lineno # Get the caller's line number.
if did_pass:
msg = "Test at line {0} ok.".format(linenum)
else:
msg = ("Test at line {0} FAILED.".format(linenum))
print(msg)
class Point:
""" Create a new Point, at coordinates x, y """
def __init__(self, x=0, y=0):
""" Create a new point at x, y """
self.x = x
self.y = y
class Rectangle:
""" A class to manufacture rectangle objects"""
def __init__(self, posn, w, h):
""" Initiate rectangle at posn Point, with width w, height h"""
self.corner = posn
self.width = w
self.height = h
def collide(self, rect2):
"""
Checks if current rect and rect2 collides
:param rect2: Rectangle object
:return: boolean
"""
return (self.pointInsideCheck(rect2.corner) or
self.pointInsideCheck(Point(rect2.corner.x + rect2.height, rect2.corner.y)) or
self.pointInsideCheck(Point(rect2.corner.x, rect2.corner.y + rect2.width)) or
self.pointInsideCheck(Point(rect2.corner.x + rect2.height, rect2.corner.y + rect2.width)))
def pointInsideCheck(self, point):
"""
checks if a point is inside current rect
:param point: Point object
:return: boolean
"""
return (point.y >= self.corner.y and point.y <= self.corner.y + self.width and
point.x >= self.corner.x and point.x <= self.corner.x + self.height)
def __str__(self):
return "({0}, {1}, {2})".format(self.corner, self.width, self.height)
print(Rectangle(Point(0, 0), 100, 200).collide(Rectangle(Point(100, 101), 5, 10)))
print(Rectangle(Point(0, 0), 100, 200).collide(Rectangle(Point(100, 99), 5, 10)))
| [
"[email protected]"
] | |
3d4a9d8593553e43461522b4f38e4009058c4b7f | 46355bd117d38191f2ebd23e9250ab121bf839fc | /Airbnb/roundPrice.py | dfa2edbe6aa3ad782e2593c4a64dd5308c29feb7 | [] | no_license | Jason003/Interview_Code_Python | f1e1a59e87cfada78a931be6a27a51898442aca4 | 75dbc8d3906bd00c8129c8ed0b584794c8b41d6b | refs/heads/master | 2020-08-12T20:11:56.454848 | 2019-11-12T01:36:52 | 2019-11-12T01:36:52 | 214,835,963 | 4 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,203 | py | import math
def roundPrice(A):
# attention: in python3, round(1.5) = 1 !!!
def round(x):
fac = x - math.floor(x)
return math.ceil(x) if fac >= 0.5 else math.floor(x)
if not A:
return A
roundSum = sum(map(round, A))
sumRound = round(sum(A))
print(roundSum)
print(sumRound)
res = [round(a) for a in A]
if roundSum == sumRound:
return res
elif roundSum > sumRound:
cnt = roundSum - sumRound # need to make cnt number to round(number) - 1
nums = sorted([(a - math.floor(a), a, i) for i, a in enumerate(A)])
for fac, a, i in nums:
if fac >= 0.5 and cnt > 0:
res[i] = math.floor(a)
cnt -= 1
else:
res[i] = round(a)
return res
else:
cnt = sumRound - roundSum # need to make cnt number to round(number) + 1
nums = sorted([(a - math.floor(a), a, i) for i, a in enumerate(A)])[::-1]
for fac, a, i in nums:
if fac < 0.5 and cnt > 0:
res[i] = math.ceil(a)
cnt -= 1
else:
res[i] = round(a)
return res
print(roundPrice([1,2,3,4])) | [
"[email protected]"
] | |
e38b81ecb5dcb797e6cabf948bf96fed37fc0bb9 | 22bf086e3e7d43b72f0d05aaa3359b766a688a79 | /scripts/extract_clues_info.py | 5b22dcb47717b771f546fc7cd1ccda68132dab69 | [] | no_license | kaspermunch/humanXsweeps | aa7a4e6a175be276713f17f79d7179a5dd644ff5 | 3a2c4aa496aaffa837eb15dd3d382f7712266f38 | refs/heads/master | 2023-04-07T13:36:11.619973 | 2023-03-18T08:05:18 | 2023-03-18T08:05:18 | 161,376,285 | 1 | 0 | null | 2023-01-11T14:12:39 | 2018-12-11T18:25:53 | Jupyter Notebook | UTF-8 | Python | false | false | 886 | py |
import sys
import re
import os
import pandas as pd
import h5py
_, output_file_name, steps_dir, *clues_file_names = sys.argv # pylint: disable=unbalanced-tuple-unpacking
# open output file:
output_file = open(output_file_name, 'w')
# loop over base names of
for clues_file_name in clues_file_names:
# 98000000_99500000_1.bed_98614821.h5
start, end, chain, pos = re.search(r'(\d+)_(\d+)_(\d+).bed_(\d+).h5', clues_file_name).groups()
h5_path = os.path.join(steps_dir, clues_file_name)
if os.path.getsize(h5_path) == 0:
log_likelihood_ratio = 'NA'
selection_coef = 'NA'
else:
h5 = h5py.File(h5_path, 'r')
log_likelihood_ratio = h5['logLikelihoodRatios'][h5.attrs['iHat'], h5.attrs['jHat']]
selection_coef = h5.attrs['sHat']
print(start, end, pos, chain, log_likelihood_ratio, selection_coef, sep='\t', file=output_file)
| [
"[email protected]"
] | |
71ba6aabb8bb089ad893dcdffa33d9eec54dcd76 | 65dc8b306c1a22dc3a8ebf53d399135c5b894b69 | /guestbook/forms.py | 7936a074493597f70ce0116429f3fece381a3b2e | [] | no_license | tokibito/python-hackathon2-guestbook | 49e7f144d2c4f9920abea639816645d0edbca292 | dfa152cf6cb7ebea1779c63846e36f8dbb90d8c1 | refs/heads/master | 2020-07-25T08:20:37.459468 | 2009-11-13T04:53:40 | 2009-11-13T04:53:40 | 208,228,426 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 332 | py | # coding: utf8
from django import forms
from guestbook.models import Greeting
class GreetingForm(forms.ModelForm):
"""
ゲストブックの書き込みフォーム
モデルを元に生成する
"""
class Meta:
model = Greeting
# 書き込み日時は除く
exclude = ('create_at',)
| [
"[email protected]"
] | |
b006bdd2968ab6b20d0f4cebef10dca4504e7561 | b8ea631aae5d132c7b0236684d5f7c12d3c222be | /ABC/ABC_114C_zentan.py | 6204c22fc19b6d2aae27bac78f48b889f8173b21 | [] | no_license | Ryushi-tech/card3 | 68c429313142e58d4722a1cd5a4acc4ab39ca41f | 883636b2f518e38343a12816c5c641b60a87c098 | refs/heads/master | 2021-07-05T22:46:33.089945 | 2020-12-12T15:31:00 | 2020-12-12T15:31:00 | 209,176,836 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 313 | py | import itertools as it
n = int(input())
l = len(str(n))
res = []
for i in range(3, l + 1):
for gen in it.product("357", repeat=i):
gen_s = "".join(gen)
if int(gen_s) <= n:
res.append(gen_s)
cnt = 0
for r in res:
if all(r.count(c) for c in "357"):
cnt += 1
print(cnt)
| [
"[email protected]"
] | |
93d9228177ee76f0b5d92291e7212790f6c0557e | 6a0e51fc8d2ea8819711acb7948f4c21c277c771 | /the_list.py | bf381ad611c0d6716c2dc2a1241e1b868e580752 | [] | no_license | sunzhiyan/Python | eb0bd60edbc85e670ad02af9009df53f5936bff3 | 3881d124b71f81b7e0bd1e70bcd336238fbcfa3f | refs/heads/master | 2022-10-08T21:38:22.799102 | 2021-03-05T02:32:57 | 2021-03-05T02:32:57 | 244,139,633 | 1 | 0 | null | 2022-09-23T22:42:18 | 2020-03-01T11:50:40 | Python | UTF-8 | Python | false | false | 321 | py | # -*- encoding: utf-8 -*-
'''
@File : 列表.py
@Time : 2020/03/03 22:09:13
@Author : xdbcb8
@Version : 1.0
@Contact : [email protected]
@WebSite : www.xdbcb8.com
'''
# here put the import lib
list=[56,23,89,99,56,45,87,56,65,100]
print(max(list))
print(min(list))
print(sum(list))
print(sum(list)/len(list)) | [
"[email protected]"
] | |
38b3cfd6f2bac9bca72f1b1a81f348801111e789 | 00cb405170a6a9572bef0ec8f373813eada08c03 | /Game Structure/geometry/version5/mypainter.py | ce5e1b632d327bbb6cd9614946dd573bfe5136dd | [] | no_license | MarcPartensky/Python-Games | c0ad2857be5832d6029642bb0a96bc8e403a12e3 | ebfcaaf4a028eddb36bbc99184eb3f7a86eb24ed | refs/heads/master | 2022-09-03T00:04:16.402288 | 2022-08-12T17:10:22 | 2022-08-12T17:10:22 | 166,606,022 | 2 | 1 | null | 2021-03-07T16:20:15 | 2019-01-19T23:56:04 | Python | UTF-8 | Python | false | false | 7,413 | py | from mycase import Case
from myabstract import Point, Form
import mycolors
import numpy as np
import shelve
from pygame.locals import *
class Painter:
def __init__(self, *args, **kwargs):
"""Create a painter."""
self.paints = [Board(), Paint(*args, **kwargs)]
self.paint_brush = PaintBrush()
self.painting = 0
def __call__(self, surface):
"""Main loop of the painter."""
while surface.open:
surface.check()
surface.control()
surface.clear()
surface.show()
self.show(surface)
self.control(surface)
surface.flip()
def control(self, surface):
"""Control the painter."""
cursor = surface.point()
cursor = [round(c + 1 / 2) for c in cursor]
self.print(surface)
self.paint(surface)
def print(self, surface):
"""Print the state of the painter on the surface."""
if self.painting == None:
surface.print("Create a new painting.", [-10, 12])
def paint(self, surface):
"""Paint using the surface and the paint."""
keys = surface.press()
click = surface.click()
cursor = surface.point()
cursor = [round(c + 1 / 2) for c in cursor]
self.paint_brush.setPosition(cursor)
p = self.getPaint(cursor)
if p is not None:
c = self.paints[p].getCase(cursor)
if keys[K_r]:
self.paint_brush.setRandomColor()
if keys[K_a]:
self.paint_brush.lightenColor()
if keys[K_b]:
self.paint_brush.darkenColor()
if keys[K_f]:
self.refreshBoard()
if p is None:
if click:
self.createPaint(cursor)
return
if keys[K_s]:
self.save(self.paints[p])
if keys[K_l]:
self.load(p)
if c is None:
return
if keys[K_c]:
self.paint_brush.copyColor(self.paints[p].cases[c])
if not click:
return
self.paint_brush.paint(surface, self.paints[p], c)
def createPaint(self, position):
"""Create a paint."""
size = [20, 20]
self.paints.append(Paint(position, size))
def save(self, paint):
"""Save the paint."""
print("File saved")
with shelve.open('paints') as p:
p["test"] = paint
def load(self, p):
"""Load a paint."""
print("File loaded")
with shelve.open("paints") as paints:
paint = paints["test"]
self.paints[p] = paint
def refreshBoard(self):
"""Change the colors of the board."""
self.paints[0].generate()
def show(self, surface):
"""Show the paints of the painter."""
for paint in self.paints:
paint.show(surface)
self.paint_brush.show(surface)
def getPaint(self, position):
"""Return the case containing the position if there is one."""
for i in range(len(self.paints)):
if position in self.paints[i]:
return i
class PaintBrush:
def __init__(self, position=[0, 0], size=[1, 1], color=mycolors.GREEN):
"""Create a paint brush for the painter."""
self.position = position
self.size = size
self.color = color
def paint(self, surface, paint, c):
"""Color a case."""
paint.cases[c].color = self.color
def copyColor(self, case):
"""Copy the color of the case."""
self.color = case.color
def setRandomColor(self):
"""Set the color of the brush to a random color."""
self.color = mycolors.random()
def lightenColor(self, surface):
"""Lighten the brush."""
self.color = mycolors.lighten(self.color)
def darkencolor(self, surface):
"""Darken the color."""
self.color = mycolors.darken(self.color)
def setPosition(self, position):
"""Set the position of the brush."""
self.position = position
def show(self, surface):
"""Show the paint brush on the surface."""
x, y = self.position
case = Case((x - 1, y - 1), size=self.size, color=self.color)
case.show(surface, fill=False, side_color=mycolors.RED)
class Paint:
"""Paint object reserves an area to draw objects in."""
@classmethod
def random(cls, position=[0, 0], size=[10, 10]):
"""Create a random paint."""
return cls(position, size)
def __init__(self, position=[0, 0], size=[10, 10]):
"""Create a board object."""
self.position = position
self.size = size
self.cases = []
self.generate()
def getCorners(self):
"""Return the corners of the paint."""
px, py = self.position
sx, sy = self.size
corners = (px, py, px + sx, py + sy)
return corners
def generate(self):
"""Generate random cases all over the paint."""
cases = []
xmin, ymin, xmax, ymax = self.getCorners()
for y in np.arange(ymin, ymax):
for x in np.arange(xmin, xmax):
case = Case([float(x), float(y)], color=mycolors.WHITE)
cases.append(case)
self.cases = cases
def __contains__(self, position):
"""Determine if the point is in the paint."""
x, y = position
xmin, ymin, xmax, ymax = self.getCorners()
return (xmin <= x <= xmax) and (ymin <= ymax)
def getCase(self, position):
"""Return the case containing the position if there is one."""
for i in range(len(self.cases)):
if position in self.cases[i]:
return i
def getForm(self):
"""Return the form corresponding to the area of the painting."""
xmin, ymin, xmax, ymax = self.getCorners()
ps = [Point(xmin, ymin), Point(xmax, ymin),
Point(xmax, ymax), Point(xmin, ymax)]
return Form(ps)
def show(self, surface):
"""Show the paint by showing all its cases."""
f = self.getForm()
for case in self.cases:
case.show(surface, side_color=mycolors.WHITE)
f.side_color = mycolors.WHITE
f.side_width = 3
f.show(surface)
def save(self):
"""Save the paint."""
with shelve.open('paints') as paints:
paints[test] = self
class Board(Paint):
def __init__(self):
"""Create an accesory for the painter."""
self.position = [-12, -10]
self.size = [1, 20]
self.generate()
def generate(self):
"""Generate random cases for the board."""
x, y = self.position
sx, sy = self.size
self.cases = [Case([x, y - sy // 2], color=mycolors.random())
for y in range(sy)]
def show(self, surface):
"""Show the paint by showing all its cases."""
f = self.getForm()
for case in self.cases:
case.show(surface, side_color=mycolors.BLACK)
f.side_color = mycolors.BLACK
f.side_width = 3
f.show(surface)
f[0].showText(surface, "Board")
if __name__ == "__main__":
from mycontext import Surface
from myzone import Zone
surface = Surface(name="Painter")
painter = Painter([0, 0], [8, 8])
#print([0,0] in painter.paints[0])
painter(surface)
| [
"[email protected]"
] | |
270742e36424951d8024a33594d64497bf5758e2 | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/331/usersdata/293/94058/submittedfiles/funcoes1.py | cc9f91b72aa5433b90fafe6a6f90c9742a3cf68e | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 452 | py | # -*- coding: utf-8 -*-
def crescente(n,lista_crescente):
#escreva o código da função crescente aqui
cont_crescente=0
for i in range(0,n-1,1):
if lista_crescente[i]<lista_crescente[i+1]
cont_crescente= cont_crescente + 1
if cont_crescente==len(lista_crescente)-1
return ("S")
else:
return ("N")
#escreva as demais funções
#escreva o programa principal
print(crescente(6,[1,2,3,4,5,6]))
| [
"[email protected]"
] | |
1fa310f9ae1a6793e62ea6ef82a206a3635c31df | d74daa1dfe1f4eac96ceb1d006c59ba19b55d37a | /CS401/GG-Project/GG-Project/productProcessers/ProductReader.py | e6a04f2b514a3650ae352f1cd5ba9b37d87b4018 | [] | no_license | muratcancicek/Assignment-Projects | 7aac0cced54f392e26b39f6bc46af813faddd628 | 41c7df2b60f20eb840d409f3fedb4ec6feeafdcc | refs/heads/master | 2021-06-06T17:27:11.616251 | 2017-09-06T12:17:40 | 2017-09-06T12:17:40 | 58,016,251 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 5,422 | py | from MainSrc.PythonVersionHandler import *
from .BsonIO import *
from paths import *
def getCategories(filename):
categoriesData = readBson("categories.bson")
categories = categoriesData['data']['categories']
print_(len(categories))
deepestCategories = [c for c in categories if c['deepest'] == 'Y']
print_(len(deepestCategories))
sortedCategories = sorted(deepestCategories, key=lambda k: k['productCount'], reverse = True)
#print sortedCategories[0]['productCount']
return sortedCategories, [c['categoryCode'] for c in sortedCategories]
def getCrowdedCategories(filename = "categories.bson", n = 100):
sortedCategories = getCategories(filename, n)
finalCategories = sortedCategories[:n]
return finalCategories, [c['categoryCode'] for c in finalCategories]
def getCategoryCodes(codes):
line = '('
for code in codes[:-1]:
line += '\"' + code + '\", '
line += '\"' + codes[-1] + '\")'
print_(line)
def readCodeLines():
codesText = open('codes.txt', "rb")
codeLines = codesText.read().split('\n')
codeLists = [l[1:-1].replace(', ', ',').split(',') for l in codeLines]
for lis in codeLists:
print_(len(lis))
return codeLists
def getCategoriesFromProducts(filename):
products = readBson(filename)
print_('Product Count:', len(products))
codes = [p['category']['code'].encode("utf-8") for p in products]
uniqueCodes = set(codes)
return list(uniqueCodes)
def summarizeProducts(filename, countPerCategory = 10):
uniqueCodes = getCategoriesFromProducts(filename)
counts = {}
for code in uniqueCodes:
counts[code] = codes.count(code)
print_('Product Count per Category:', counts)
storedCodes = [k for k, v in counts.iteritems() if v == countPerCategory]
print_('Stored Product Count:', len(storedCodes))
return storedCodes, uniqueCodes, counts
def getremainingCodes(total, storedFile):
storedCodes, uniqueCodes, counts = summarizeProducts(storedFile)
crowdedCategories, crowdedCodes = getCrowdedCategories(total + len(uniqueCodes))
unstoredCodes = [c for c in crowdedCodes if not c in storedCodes]
print_('Unstored Product Count:', len(unstoredCodes))
intersectionCodes = [c for c in crowdedCodes if c in storedCodes]
print_('Intersection Product Count:', intersectionCodes)
finalCodes = unstoredCodes[:total-len(storedCodes)]
print_('Final Product Count:', len(finalCodes))
intersectCodes = [c for c in finalCodes if c in storedCodes]
print_('Intersection Product Count:', len(intersectCodes))
return finalCodes
def getProducts(filename):
return readBson(filename)
def getProductsByCategoryCode(productList):
codes = [p['category']['code'] for p in productList]
uniqueCodes = set(codes)
categoryList = list(uniqueCodes)
productDict = {}
for category in categoryList:
productDict[category] = []
for product in productList:
productDict[product['category']['code']].append(product)
return productDict
def getExpandedProductsByCategoryCode(productList, code):
return [product for product in productList if product['category_code'] == code]
def mergeProducts():
product230 = evalBson('products230.bson')
product780 = evalBson('products780.bson')
product230Dict = getProductsByCategoryCode(product230)
product230Dict.pop('rc',0)
product780Dict = getProductsByCategoryCode(product780)
#productDict = product230Dict + product780Dict
productDict = {}
productDict = product230Dict.copy()
productDict.update(product780Dict)
return productDict
def fixQuotesOnProduct(product):
if '\"' in product['title']:
product['title'] = fixQuotes(product['title'])
if product['subTitle'] != None:
if '\"' in product['subTitle']:
product['subTitle'] = fixQuotes(product['subTitle'])
for spec in product['specs']:
if '\"' in spec['values'][0]:
spec['values'][0] = fixQuotes(spec['values'][0])
return product
def generateGroupedProductsList(readingFileName = 'products.bson', writingFileName = 'groupedProducts.bson'):
unorderedProductList = evalBson(readingFileName, decoding = 'unicode-escape')
categoryProductsMap = getProductsByCategoryCode(unorderedProductList)
orderedProductList = []
categoryCodes = []
for categoryCode in categoryProductsMap.keys():
categoryCodes.append(categoryCode)
categoryCodes.sort()
for categoryCode in categoryCodes:
orderedProductList.extend(categoryProductsMap[categoryCode])
writeToBson(orderedProductList, writingFileName, decoding = 'unicode-escape', printText = True)
print_('WARNING: Encoding Error')
def generateCategoryCodeNameMap():
categories = evalBson('categories.bson')
cd = getCategoriesFromProducts('products.bson')
map = {}
for c in categories['data']['categories']:
if c['categoryCode'] in cd:
map[c['categoryCode']] = c['categoryName']
writeToBson(map, commonFolder + 'categoryCodeNames.json')
def readProducts(products = None, fileName = commonFolder + 'products.json', decoding = 'utf-8'):
return evalBson(fileName) if products == None else products
def readExpandedProducts(products = None, fileName = commonFolder + 'expandedProducts.bson', decoding = 'utf-8'):
return readProducts(products, fileName, decoding)
| [
"[email protected]"
] | |
fa033282eb9b9f69fe0f237ea0dc575bb19602c9 | 78d35bb7876a3460d4398e1cb3554b06e36c720a | /sdk/network/azure-mgmt-network/azure/mgmt/network/v2019_09_01/operations/_vpn_sites_configuration_operations.py | 681ea2f9326309464669ed83ca33a7a38ae17a3a | [
"MIT",
"LicenseRef-scancode-generic-cla",
"LGPL-2.1-or-later"
] | permissive | catchsrinivas/azure-sdk-for-python | e35f59b60318a31b3c940a7a3a07b61b28118aa5 | 596227a7738a5342274486e30489239d539b11d1 | refs/heads/main | 2023-08-27T09:08:07.986249 | 2021-11-11T11:13:35 | 2021-11-11T11:13:35 | 427,045,896 | 0 | 0 | MIT | 2021-11-11T15:14:31 | 2021-11-11T15:14:31 | null | UTF-8 | Python | false | false | 8,375 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class VpnSitesConfigurationOperations(object):
"""VpnSitesConfigurationOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2019_09_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def _download_initial(
self,
resource_group_name, # type: str
virtual_wan_name, # type: str
request, # type: "_models.GetVpnSitesConfigurationRequest"
**kwargs # type: Any
):
# type: (...) -> None
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-09-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._download_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualWANName': self._serialize.url("virtual_wan_name", virtual_wan_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(request, 'GetVpnSitesConfigurationRequest')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_download_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualWans/{virtualWANName}/vpnConfiguration'} # type: ignore
def begin_download(
self,
resource_group_name, # type: str
virtual_wan_name, # type: str
request, # type: "_models.GetVpnSitesConfigurationRequest"
**kwargs # type: Any
):
# type: (...) -> LROPoller[None]
"""Gives the sas-url to download the configurations for vpn-sites in a resource group.
:param resource_group_name: The resource group name.
:type resource_group_name: str
:param virtual_wan_name: The name of the VirtualWAN for which configuration of all vpn-sites is
needed.
:type virtual_wan_name: str
:param request: Parameters supplied to download vpn-sites configuration.
:type request: ~azure.mgmt.network.v2019_09_01.models.GetVpnSitesConfigurationRequest
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._download_initial(
resource_group_name=resource_group_name,
virtual_wan_name=virtual_wan_name,
request=request,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualWANName': self._serialize.url("virtual_wan_name", virtual_wan_name, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_download.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualWans/{virtualWANName}/vpnConfiguration'} # type: ignore
| [
"[email protected]"
] | |
ac36af1e489a5c9b31c7590762b0a39c7afac82e | 7243df7ee2090f76a3d82b898f8a0f2e82198071 | /csv_to_kml.py | e98a31ab4bea4bcfb7c771d945aefad94930d2ad | [] | no_license | TimSC/air-quality-analysis-2018 | 6e3b8dce1a4ab76b36a362229572e20f7014db09 | 64a163a1b406f708a66a6f510647d9bfd49b7e5c | refs/heads/master | 2020-03-18T13:32:16.576221 | 2019-06-13T13:07:49 | 2019-06-13T13:07:49 | 134,790,376 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,320 | py | #!/usr/bin/env python
'''Example of generating KML from data in a CSV file
References:
'''
from __future__ import print_function
import csv
import urllib2
from datetime import datetime
from lxml import etree
from pykml.factory import KML_ElementMaker as KML
def makeExtendedDataElements(datadict):
'''Converts a dictionary to ExtendedData/Data elements'''
edata = KML.ExtendedData()
for key, value in datadict.iteritems():
edata.append(KML.Data(KML.value(value), name=key + "_"))
return edata
doc = KML.Document()
iconstyles = [
[1,'ff000000'],
[1.2,'ff00ff00'],#10s
[1.4,'ff00ff44'],#20s
[1.6,'ff00cc88'],#30s ffb400
[1.8,'ff00aaaa'],#40s
[2.0,'ff0000ff'],#50s
]
# create a series of Icon Styles
for i, (scale, color) in enumerate(iconstyles):
doc.append(
KML.Style(
KML.IconStyle(
KML.color(color),
KML.scale(scale),
KML.Icon(
KML.href("https://maps.google.com/mapfiles/kml/shapes/caution.png"),
),
KML.hotSpot(x="0.5",y="0",xunits="fraction",yunits="fraction"),
),
#balloonstyle,
id="pollution-style-{threshold}".format(threshold=i),
)
)
adverseStyles = [
['negligible',1,'ff888888'],
['slight',1.33,'ff0000aa'],
['moderate',1.66,'ff0000cc'],
['substantial',2.0,'ff0000ff'],
]
for band, scale, color in adverseStyles:
doc.append(
KML.Style(
KML.IconStyle(
KML.color(color),
KML.scale(scale),
KML.Icon(
KML.href("http://earth.google.com/images/kml-icons/track-directional/track-0.png"),
),
KML.hotSpot(x="0.5",y="0",xunits="fraction",yunits="fraction"),
),
#balloonstyle,
id="adverse-style-{threshold}".format(threshold=band),
)
)
beneficialStyles = [
['negligible',1,'ff888888'],
['slight',1.33,'ff00aa00'],
['moderate',1.66,'ff00cc00'],
['substantial',2.0,'ff00ff00'],
]
for band, scale, color in beneficialStyles:
doc.append(
KML.Style(
KML.IconStyle(
KML.color(color),
KML.scale(scale),
KML.Icon(
KML.href("http://earth.google.com/images/kml-icons/track-directional/track-8.png"),
),
KML.hotSpot(x="0.5",y="0",xunits="fraction",yunits="fraction"),
),
#balloonstyle,
id="beneficial-style-{threshold}".format(threshold=band),
)
)
doc.append(KML.Folder())
receptorPosDict = {}
for row in csv.reader(open("receptors.csv")):
receptorPosDict[int(row[0])] = float(row[1]), float(row[2])
# read in a csv file, and create a placemark for each record
for rowNum, row in enumerate(csv.reader(open("2026-Nitrogen Dioxide Results.csv"))):
if rowNum < 5: continue
receptor, baseline2015, without2026, with2026, pcAqal, change, pcRelChange, significant, direction = row
baseline2015 = float(baseline2015)
receptorNum = int(receptor[1:])
if receptorNum not in receptorPosDict:
print ("No position for receptor", receptorNum)
continue
pos = receptorPosDict[receptorNum]
#print (receptor, pos, baseline2015, significant, direction)
labelData = {}
labelData['Receptor'] = receptorNum
labelData['NO2 Baseline (2015)'] = baseline2015
labelData['NO2 Without scheme (2026)'] = without2026
labelData['NO2 With scheme (2026)'] = with2026
labelData['Impact'] = "{} {}".format(significant, direction)
if 0:
pm = KML.Placemark(
#KML.name("NO2={0}".format(baseline2015)),
KML.styleUrl(
"#pollution-style-{thresh}".format(
thresh=int(baseline2015/10.0)
)
),
makeExtendedDataElements(labelData),
KML.Point(
KML.coordinates("{0},{1}".format(pos[1], pos[0]))
)
)
doc.Folder.append(pm)
if 1:
if direction=="Adverse":
pm = KML.Placemark(
KML.styleUrl(
"#adverse-style-{thresh}".format(
thresh=significant.lower()
)
),
makeExtendedDataElements(labelData),
KML.Point(
KML.coordinates("{0},{1}".format(pos[1], pos[0]))
)
)
doc.Folder.append(pm)
if direction=="Beneficial":
pm = KML.Placemark(
KML.styleUrl(
"#beneficial-style-{thresh}".format(
thresh=significant.lower()
)
),
makeExtendedDataElements(labelData),
KML.Point(
KML.coordinates("{0},{1}".format(pos[1], pos[0]))
)
)
doc.Folder.append(pm)
# check if the schema is valid
from pykml.parser import Schema
schema_gx = Schema("kml22gx.xsd")
schema_gx.assertValid(doc)
fi = open("out.kml", "wt")
fi.write(etree.tostring(doc, pretty_print=True))
fi.close()
| [
"[email protected]"
] | |
14949c91a4d0ab5a8e7d226a1ccfb3b8e203319e | b74d9c0655593d488f1bbf3e6d97a6d587fae9e8 | /printing/wsgi.py | d8b6927a26ae5b4d5ae3ab65cd51e6dc08659019 | [] | no_license | dbca-wa/printing | d3353dce75412cfb1a1cf4a1f3f88373b4d36194 | e0c5359fecf84a5512c4b9ede71f56acd9058bf9 | refs/heads/master | 2022-12-12T23:02:43.993583 | 2016-04-28T07:48:28 | 2016-04-28T07:48:28 | 57,281,158 | 0 | 0 | null | 2020-09-09T09:07:44 | 2016-04-28T07:41:33 | JavaScript | UTF-8 | Python | false | false | 393 | py | """
WSGI config for printing project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "printing.settings")
application = get_wsgi_application()
| [
"[email protected]"
] | |
4a8e75faab481c89f697e468241dc944b98ee689 | e5654e71ad4f043bb28105c3b6f3cd833e1c52dc | /openai/venv/lib/python3.10/site-packages/sqlalchemy/dialects/mysql/base.py | 9948602d3dbf1e8ba0c16adcb1a9cec0447c32a8 | [] | no_license | henrymendez/garage | 0b795f020a68fe2d349b556fb8567f6b96488ed5 | b7aaa920a52613e3f1f04fa5cd7568ad37302d11 | refs/heads/master | 2023-07-19T20:16:02.792007 | 2023-07-07T16:58:15 | 2023-07-07T16:58:15 | 67,760,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 115,423 | py | # mysql/base.py
# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: https://www.opensource.org/licenses/mit-license.php
r"""
.. dialect:: mysql
:name: MySQL / MariaDB
:full_support: 5.6, 5.7, 8.0 / 10.4, 10.5
:normal_support: 5.6+ / 10+
:best_effort: 5.0.2+ / 5.0.2+
Supported Versions and Features
-------------------------------
SQLAlchemy supports MySQL starting with version 5.0.2 through modern releases,
as well as all modern versions of MariaDB. See the official MySQL
documentation for detailed information about features supported in any given
server release.
.. versionchanged:: 1.4 minimum MySQL version supported is now 5.0.2.
MariaDB Support
~~~~~~~~~~~~~~~
The MariaDB variant of MySQL retains fundamental compatibility with MySQL's
protocols however the development of these two products continues to diverge.
Within the realm of SQLAlchemy, the two databases have a small number of
syntactical and behavioral differences that SQLAlchemy accommodates automatically.
To connect to a MariaDB database, no changes to the database URL are required::
engine = create_engine("mysql+pymysql://user:pass@some_mariadb/dbname?charset=utf8mb4")
Upon first connect, the SQLAlchemy dialect employs a
server version detection scheme that determines if the
backing database reports as MariaDB. Based on this flag, the dialect
can make different choices in those of areas where its behavior
must be different.
.. _mysql_mariadb_only_mode:
MariaDB-Only Mode
~~~~~~~~~~~~~~~~~
The dialect also supports an **optional** "MariaDB-only" mode of connection, which may be
useful for the case where an application makes use of MariaDB-specific features
and is not compatible with a MySQL database. To use this mode of operation,
replace the "mysql" token in the above URL with "mariadb"::
engine = create_engine("mariadb+pymysql://user:pass@some_mariadb/dbname?charset=utf8mb4")
The above engine, upon first connect, will raise an error if the server version
detection detects that the backing database is not MariaDB.
When using an engine with ``"mariadb"`` as the dialect name, **all mysql-specific options
that include the name "mysql" in them are now named with "mariadb"**. This means
options like ``mysql_engine`` should be named ``mariadb_engine``, etc. Both
"mysql" and "mariadb" options can be used simultaneously for applications that
use URLs with both "mysql" and "mariadb" dialects::
my_table = Table(
"mytable",
metadata,
Column("id", Integer, primary_key=True),
Column("textdata", String(50)),
mariadb_engine="InnoDB",
mysql_engine="InnoDB",
)
Index(
"textdata_ix",
my_table.c.textdata,
mysql_prefix="FULLTEXT",
mariadb_prefix="FULLTEXT",
)
Similar behavior will occur when the above structures are reflected, i.e. the
"mariadb" prefix will be present in the option names when the database URL
is based on the "mariadb" name.
.. versionadded:: 1.4 Added "mariadb" dialect name supporting "MariaDB-only mode"
for the MySQL dialect.
.. _mysql_connection_timeouts:
Connection Timeouts and Disconnects
-----------------------------------
MySQL / MariaDB feature an automatic connection close behavior, for connections that
have been idle for a fixed period of time, defaulting to eight hours.
To circumvent having this issue, use
the :paramref:`_sa.create_engine.pool_recycle` option which ensures that
a connection will be discarded and replaced with a new one if it has been
present in the pool for a fixed number of seconds::
engine = create_engine('mysql+mysqldb://...', pool_recycle=3600)
For more comprehensive disconnect detection of pooled connections, including
accommodation of server restarts and network issues, a pre-ping approach may
be employed. See :ref:`pool_disconnects` for current approaches.
.. seealso::
:ref:`pool_disconnects` - Background on several techniques for dealing
with timed out connections as well as database restarts.
.. _mysql_storage_engines:
CREATE TABLE arguments including Storage Engines
------------------------------------------------
Both MySQL's and MariaDB's CREATE TABLE syntax includes a wide array of special options,
including ``ENGINE``, ``CHARSET``, ``MAX_ROWS``, ``ROW_FORMAT``,
``INSERT_METHOD``, and many more.
To accommodate the rendering of these arguments, specify the form
``mysql_argument_name="value"``. For example, to specify a table with
``ENGINE`` of ``InnoDB``, ``CHARSET`` of ``utf8mb4``, and ``KEY_BLOCK_SIZE``
of ``1024``::
Table('mytable', metadata,
Column('data', String(32)),
mysql_engine='InnoDB',
mysql_charset='utf8mb4',
mysql_key_block_size="1024"
)
When supporting :ref:`mysql_mariadb_only_mode` mode, similar keys against
the "mariadb" prefix must be included as well. The values can of course
vary independently so that different settings on MySQL vs. MariaDB may
be maintained::
# support both "mysql" and "mariadb-only" engine URLs
Table('mytable', metadata,
Column('data', String(32)),
mysql_engine='InnoDB',
mariadb_engine='InnoDB',
mysql_charset='utf8mb4',
mariadb_charset='utf8',
mysql_key_block_size="1024"
mariadb_key_block_size="1024"
)
The MySQL / MariaDB dialects will normally transfer any keyword specified as
``mysql_keyword_name`` to be rendered as ``KEYWORD_NAME`` in the
``CREATE TABLE`` statement. A handful of these names will render with a space
instead of an underscore; to support this, the MySQL dialect has awareness of
these particular names, which include ``DATA DIRECTORY``
(e.g. ``mysql_data_directory``), ``CHARACTER SET`` (e.g.
``mysql_character_set``) and ``INDEX DIRECTORY`` (e.g.
``mysql_index_directory``).
The most common argument is ``mysql_engine``, which refers to the storage
engine for the table. Historically, MySQL server installations would default
to ``MyISAM`` for this value, although newer versions may be defaulting
to ``InnoDB``. The ``InnoDB`` engine is typically preferred for its support
of transactions and foreign keys.
A :class:`_schema.Table`
that is created in a MySQL / MariaDB database with a storage engine
of ``MyISAM`` will be essentially non-transactional, meaning any
INSERT/UPDATE/DELETE statement referring to this table will be invoked as
autocommit. It also will have no support for foreign key constraints; while
the ``CREATE TABLE`` statement accepts foreign key options, when using the
``MyISAM`` storage engine these arguments are discarded. Reflecting such a
table will also produce no foreign key constraint information.
For fully atomic transactions as well as support for foreign key
constraints, all participating ``CREATE TABLE`` statements must specify a
transactional engine, which in the vast majority of cases is ``InnoDB``.
Case Sensitivity and Table Reflection
-------------------------------------
Both MySQL and MariaDB have inconsistent support for case-sensitive identifier
names, basing support on specific details of the underlying
operating system. However, it has been observed that no matter
what case sensitivity behavior is present, the names of tables in
foreign key declarations are *always* received from the database
as all-lower case, making it impossible to accurately reflect a
schema where inter-related tables use mixed-case identifier names.
Therefore it is strongly advised that table names be declared as
all lower case both within SQLAlchemy as well as on the MySQL / MariaDB
database itself, especially if database reflection features are
to be used.
.. _mysql_isolation_level:
Transaction Isolation Level
---------------------------
All MySQL / MariaDB dialects support setting of transaction isolation level both via a
dialect-specific parameter :paramref:`_sa.create_engine.isolation_level`
accepted
by :func:`_sa.create_engine`, as well as the
:paramref:`.Connection.execution_options.isolation_level` argument as passed to
:meth:`_engine.Connection.execution_options`.
This feature works by issuing the
command ``SET SESSION TRANSACTION ISOLATION LEVEL <level>`` for each new
connection. For the special AUTOCOMMIT isolation level, DBAPI-specific
techniques are used.
To set isolation level using :func:`_sa.create_engine`::
engine = create_engine(
"mysql://scott:tiger@localhost/test",
isolation_level="READ UNCOMMITTED"
)
To set using per-connection execution options::
connection = engine.connect()
connection = connection.execution_options(
isolation_level="READ COMMITTED"
)
Valid values for ``isolation_level`` include:
* ``READ COMMITTED``
* ``READ UNCOMMITTED``
* ``REPEATABLE READ``
* ``SERIALIZABLE``
* ``AUTOCOMMIT``
The special ``AUTOCOMMIT`` value makes use of the various "autocommit"
attributes provided by specific DBAPIs, and is currently supported by
MySQLdb, MySQL-Client, MySQL-Connector Python, and PyMySQL. Using it,
the database connection will return true for the value of
``SELECT @@autocommit;``.
There are also more options for isolation level configurations, such as
"sub-engine" objects linked to a main :class:`_engine.Engine` which each apply
different isolation level settings. See the discussion at
:ref:`dbapi_autocommit` for background.
.. seealso::
:ref:`dbapi_autocommit`
AUTO_INCREMENT Behavior
-----------------------
When creating tables, SQLAlchemy will automatically set ``AUTO_INCREMENT`` on
the first :class:`.Integer` primary key column which is not marked as a
foreign key::
>>> t = Table('mytable', metadata,
... Column('mytable_id', Integer, primary_key=True)
... )
>>> t.create()
CREATE TABLE mytable (
id INTEGER NOT NULL AUTO_INCREMENT,
PRIMARY KEY (id)
)
You can disable this behavior by passing ``False`` to the
:paramref:`_schema.Column.autoincrement` argument of :class:`_schema.Column`.
This flag
can also be used to enable auto-increment on a secondary column in a
multi-column key for some storage engines::
Table('mytable', metadata,
Column('gid', Integer, primary_key=True, autoincrement=False),
Column('id', Integer, primary_key=True)
)
.. _mysql_ss_cursors:
Server Side Cursors
-------------------
Server-side cursor support is available for the mysqlclient, PyMySQL,
mariadbconnector dialects and may also be available in others. This makes use
of either the "buffered=True/False" flag if available or by using a class such
as ``MySQLdb.cursors.SSCursor`` or ``pymysql.cursors.SSCursor`` internally.
Server side cursors are enabled on a per-statement basis by using the
:paramref:`.Connection.execution_options.stream_results` connection execution
option::
with engine.connect() as conn:
result = conn.execution_options(stream_results=True).execute(text("select * from table"))
Note that some kinds of SQL statements may not be supported with
server side cursors; generally, only SQL statements that return rows should be
used with this option.
.. deprecated:: 1.4 The dialect-level server_side_cursors flag is deprecated
and will be removed in a future release. Please use the
:paramref:`_engine.Connection.stream_results` execution option for
unbuffered cursor support.
.. seealso::
:ref:`engine_stream_results`
.. _mysql_unicode:
Unicode
-------
Charset Selection
~~~~~~~~~~~~~~~~~
Most MySQL / MariaDB DBAPIs offer the option to set the client character set for
a connection. This is typically delivered using the ``charset`` parameter
in the URL, such as::
e = create_engine(
"mysql+pymysql://scott:tiger@localhost/test?charset=utf8mb4")
This charset is the **client character set** for the connection. Some
MySQL DBAPIs will default this to a value such as ``latin1``, and some
will make use of the ``default-character-set`` setting in the ``my.cnf``
file as well. Documentation for the DBAPI in use should be consulted
for specific behavior.
The encoding used for Unicode has traditionally been ``'utf8'``. However, for
MySQL versions 5.5.3 and MariaDB 5.5 on forward, a new MySQL-specific encoding
``'utf8mb4'`` has been introduced, and as of MySQL 8.0 a warning is emitted by
the server if plain ``utf8`` is specified within any server-side directives,
replaced with ``utf8mb3``. The rationale for this new encoding is due to the
fact that MySQL's legacy utf-8 encoding only supports codepoints up to three
bytes instead of four. Therefore, when communicating with a MySQL or MariaDB
database that includes codepoints more than three bytes in size, this new
charset is preferred, if supported by both the database as well as the client
DBAPI, as in::
e = create_engine(
"mysql+pymysql://scott:tiger@localhost/test?charset=utf8mb4")
All modern DBAPIs should support the ``utf8mb4`` charset.
In order to use ``utf8mb4`` encoding for a schema that was created with legacy
``utf8``, changes to the MySQL/MariaDB schema and/or server configuration may be
required.
.. seealso::
`The utf8mb4 Character Set \
<https://dev.mysql.com/doc/refman/5.5/en/charset-unicode-utf8mb4.html>`_ - \
in the MySQL documentation
.. _mysql_binary_introducer:
Dealing with Binary Data Warnings and Unicode
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
MySQL versions 5.6, 5.7 and later (not MariaDB at the time of this writing) now
emit a warning when attempting to pass binary data to the database, while a
character set encoding is also in place, when the binary data itself is not
valid for that encoding::
default.py:509: Warning: (1300, "Invalid utf8mb4 character string:
'F9876A'")
cursor.execute(statement, parameters)
This warning is due to the fact that the MySQL client library is attempting to
interpret the binary string as a unicode object even if a datatype such
as :class:`.LargeBinary` is in use. To resolve this, the SQL statement requires
a binary "character set introducer" be present before any non-NULL value
that renders like this::
INSERT INTO table (data) VALUES (_binary %s)
These character set introducers are provided by the DBAPI driver, assuming the
use of mysqlclient or PyMySQL (both of which are recommended). Add the query
string parameter ``binary_prefix=true`` to the URL to repair this warning::
# mysqlclient
engine = create_engine(
"mysql+mysqldb://scott:tiger@localhost/test?charset=utf8mb4&binary_prefix=true")
# PyMySQL
engine = create_engine(
"mysql+pymysql://scott:tiger@localhost/test?charset=utf8mb4&binary_prefix=true")
The ``binary_prefix`` flag may or may not be supported by other MySQL drivers.
SQLAlchemy itself cannot render this ``_binary`` prefix reliably, as it does
not work with the NULL value, which is valid to be sent as a bound parameter.
As the MySQL driver renders parameters directly into the SQL string, it's the
most efficient place for this additional keyword to be passed.
.. seealso::
`Character set introducers <https://dev.mysql.com/doc/refman/5.7/en/charset-introducer.html>`_ - on the MySQL website
ANSI Quoting Style
------------------
MySQL / MariaDB feature two varieties of identifier "quoting style", one using
backticks and the other using quotes, e.g. ```some_identifier``` vs.
``"some_identifier"``. All MySQL dialects detect which version
is in use by checking the value of :ref:`sql_mode<mysql_sql_mode>` when a connection is first
established with a particular :class:`_engine.Engine`.
This quoting style comes
into play when rendering table and column names as well as when reflecting
existing database structures. The detection is entirely automatic and
no special configuration is needed to use either quoting style.
.. _mysql_sql_mode:
Changing the sql_mode
---------------------
MySQL supports operating in multiple
`Server SQL Modes <https://dev.mysql.com/doc/refman/8.0/en/sql-mode.html>`_ for
both Servers and Clients. To change the ``sql_mode`` for a given application, a
developer can leverage SQLAlchemy's Events system.
In the following example, the event system is used to set the ``sql_mode`` on
the ``first_connect`` and ``connect`` events::
from sqlalchemy import create_engine, event
eng = create_engine("mysql://scott:tiger@localhost/test", echo='debug')
# `insert=True` will ensure this is the very first listener to run
@event.listens_for(eng, "connect", insert=True)
def connect(dbapi_connection, connection_record):
cursor = dbapi_connection.cursor()
cursor.execute("SET sql_mode = 'STRICT_ALL_TABLES'")
conn = eng.connect()
In the example illustrated above, the "connect" event will invoke the "SET"
statement on the connection at the moment a particular DBAPI connection is
first created for a given Pool, before the connection is made available to the
connection pool. Additionally, because the function was registered with
``insert=True``, it will be prepended to the internal list of registered
functions.
MySQL / MariaDB SQL Extensions
------------------------------
Many of the MySQL / MariaDB SQL extensions are handled through SQLAlchemy's generic
function and operator support::
table.select(table.c.password==func.md5('plaintext'))
table.select(table.c.username.op('regexp')('^[a-d]'))
And of course any valid SQL statement can be executed as a string as well.
Some limited direct support for MySQL / MariaDB extensions to SQL is currently
available.
* INSERT..ON DUPLICATE KEY UPDATE: See
:ref:`mysql_insert_on_duplicate_key_update`
* SELECT pragma, use :meth:`_expression.Select.prefix_with` and
:meth:`_query.Query.prefix_with`::
select(...).prefix_with(['HIGH_PRIORITY', 'SQL_SMALL_RESULT'])
* UPDATE with LIMIT::
update(..., mysql_limit=10, mariadb_limit=10)
* optimizer hints, use :meth:`_expression.Select.prefix_with` and
:meth:`_query.Query.prefix_with`::
select(...).prefix_with("/*+ NO_RANGE_OPTIMIZATION(t4 PRIMARY) */")
* index hints, use :meth:`_expression.Select.with_hint` and
:meth:`_query.Query.with_hint`::
select(...).with_hint(some_table, "USE INDEX xyz")
* MATCH operator support::
from sqlalchemy.dialects.mysql import match
select(...).where(match(col1, col2, against="some expr").in_boolean_mode())
.. seealso::
:class:`_mysql.match`
.. _mysql_insert_on_duplicate_key_update:
INSERT...ON DUPLICATE KEY UPDATE (Upsert)
------------------------------------------
MySQL / MariaDB allow "upserts" (update or insert)
of rows into a table via the ``ON DUPLICATE KEY UPDATE`` clause of the
``INSERT`` statement. A candidate row will only be inserted if that row does
not match an existing primary or unique key in the table; otherwise, an UPDATE
will be performed. The statement allows for separate specification of the
values to INSERT versus the values for UPDATE.
SQLAlchemy provides ``ON DUPLICATE KEY UPDATE`` support via the MySQL-specific
:func:`.mysql.insert()` function, which provides
the generative method :meth:`~.mysql.Insert.on_duplicate_key_update`:
.. sourcecode:: pycon+sql
>>> from sqlalchemy.dialects.mysql import insert
>>> insert_stmt = insert(my_table).values(
... id='some_existing_id',
... data='inserted value')
>>> on_duplicate_key_stmt = insert_stmt.on_duplicate_key_update(
... data=insert_stmt.inserted.data,
... status='U'
... )
>>> print(on_duplicate_key_stmt)
{opensql}INSERT INTO my_table (id, data) VALUES (%s, %s)
ON DUPLICATE KEY UPDATE data = VALUES(data), status = %s
Unlike PostgreSQL's "ON CONFLICT" phrase, the "ON DUPLICATE KEY UPDATE"
phrase will always match on any primary key or unique key, and will always
perform an UPDATE if there's a match; there are no options for it to raise
an error or to skip performing an UPDATE.
``ON DUPLICATE KEY UPDATE`` is used to perform an update of the already
existing row, using any combination of new values as well as values
from the proposed insertion. These values are normally specified using
keyword arguments passed to the
:meth:`_mysql.Insert.on_duplicate_key_update`
given column key values (usually the name of the column, unless it
specifies :paramref:`_schema.Column.key`
) as keys and literal or SQL expressions
as values:
.. sourcecode:: pycon+sql
>>> insert_stmt = insert(my_table).values(
... id='some_existing_id',
... data='inserted value')
>>> on_duplicate_key_stmt = insert_stmt.on_duplicate_key_update(
... data="some data",
... updated_at=func.current_timestamp(),
... )
>>> print(on_duplicate_key_stmt)
{opensql}INSERT INTO my_table (id, data) VALUES (%s, %s)
ON DUPLICATE KEY UPDATE data = %s, updated_at = CURRENT_TIMESTAMP
In a manner similar to that of :meth:`.UpdateBase.values`, other parameter
forms are accepted, including a single dictionary:
.. sourcecode:: pycon+sql
>>> on_duplicate_key_stmt = insert_stmt.on_duplicate_key_update(
... {"data": "some data", "updated_at": func.current_timestamp()},
... )
as well as a list of 2-tuples, which will automatically provide
a parameter-ordered UPDATE statement in a manner similar to that described
at :ref:`tutorial_parameter_ordered_updates`. Unlike the :class:`_expression.Update`
object,
no special flag is needed to specify the intent since the argument form is
this context is unambiguous:
.. sourcecode:: pycon+sql
>>> on_duplicate_key_stmt = insert_stmt.on_duplicate_key_update(
... [
... ("data", "some data"),
... ("updated_at", func.current_timestamp()),
... ]
... )
>>> print(on_duplicate_key_stmt)
{opensql}INSERT INTO my_table (id, data) VALUES (%s, %s)
ON DUPLICATE KEY UPDATE data = %s, updated_at = CURRENT_TIMESTAMP
.. versionchanged:: 1.3 support for parameter-ordered UPDATE clause within
MySQL ON DUPLICATE KEY UPDATE
.. warning::
The :meth:`_mysql.Insert.on_duplicate_key_update`
method does **not** take into
account Python-side default UPDATE values or generation functions, e.g.
e.g. those specified using :paramref:`_schema.Column.onupdate`.
These values will not be exercised for an ON DUPLICATE KEY style of UPDATE,
unless they are manually specified explicitly in the parameters.
In order to refer to the proposed insertion row, the special alias
:attr:`_mysql.Insert.inserted` is available as an attribute on
the :class:`_mysql.Insert` object; this object is a
:class:`_expression.ColumnCollection` which contains all columns of the target
table:
.. sourcecode:: pycon+sql
>>> stmt = insert(my_table).values(
... id='some_id',
... data='inserted value',
... author='jlh')
>>> do_update_stmt = stmt.on_duplicate_key_update(
... data="updated value",
... author=stmt.inserted.author
... )
>>> print(do_update_stmt)
{opensql}INSERT INTO my_table (id, data, author) VALUES (%s, %s, %s)
ON DUPLICATE KEY UPDATE data = %s, author = VALUES(author)
When rendered, the "inserted" namespace will produce the expression
``VALUES(<columnname>)``.
.. versionadded:: 1.2 Added support for MySQL ON DUPLICATE KEY UPDATE clause
rowcount Support
----------------
SQLAlchemy standardizes the DBAPI ``cursor.rowcount`` attribute to be the
usual definition of "number of rows matched by an UPDATE or DELETE" statement.
This is in contradiction to the default setting on most MySQL DBAPI drivers,
which is "number of rows actually modified/deleted". For this reason, the
SQLAlchemy MySQL dialects always add the ``constants.CLIENT.FOUND_ROWS``
flag, or whatever is equivalent for the target dialect, upon connection.
This setting is currently hardcoded.
.. seealso::
:attr:`_engine.CursorResult.rowcount`
.. _mysql_indexes:
MySQL / MariaDB- Specific Index Options
-----------------------------------------
MySQL and MariaDB-specific extensions to the :class:`.Index` construct are available.
Index Length
~~~~~~~~~~~~~
MySQL and MariaDB both provide an option to create index entries with a certain length, where
"length" refers to the number of characters or bytes in each value which will
become part of the index. SQLAlchemy provides this feature via the
``mysql_length`` and/or ``mariadb_length`` parameters::
Index('my_index', my_table.c.data, mysql_length=10, mariadb_length=10)
Index('a_b_idx', my_table.c.a, my_table.c.b, mysql_length={'a': 4,
'b': 9})
Index('a_b_idx', my_table.c.a, my_table.c.b, mariadb_length={'a': 4,
'b': 9})
Prefix lengths are given in characters for nonbinary string types and in bytes
for binary string types. The value passed to the keyword argument *must* be
either an integer (and, thus, specify the same prefix length value for all
columns of the index) or a dict in which keys are column names and values are
prefix length values for corresponding columns. MySQL and MariaDB only allow a
length for a column of an index if it is for a CHAR, VARCHAR, TEXT, BINARY,
VARBINARY and BLOB.
Index Prefixes
~~~~~~~~~~~~~~
MySQL storage engines permit you to specify an index prefix when creating
an index. SQLAlchemy provides this feature via the
``mysql_prefix`` parameter on :class:`.Index`::
Index('my_index', my_table.c.data, mysql_prefix='FULLTEXT')
The value passed to the keyword argument will be simply passed through to the
underlying CREATE INDEX, so it *must* be a valid index prefix for your MySQL
storage engine.
.. versionadded:: 1.1.5
.. seealso::
`CREATE INDEX <https://dev.mysql.com/doc/refman/5.0/en/create-index.html>`_ - MySQL documentation
Index Types
~~~~~~~~~~~~~
Some MySQL storage engines permit you to specify an index type when creating
an index or primary key constraint. SQLAlchemy provides this feature via the
``mysql_using`` parameter on :class:`.Index`::
Index('my_index', my_table.c.data, mysql_using='hash', mariadb_using='hash')
As well as the ``mysql_using`` parameter on :class:`.PrimaryKeyConstraint`::
PrimaryKeyConstraint("data", mysql_using='hash', mariadb_using='hash')
The value passed to the keyword argument will be simply passed through to the
underlying CREATE INDEX or PRIMARY KEY clause, so it *must* be a valid index
type for your MySQL storage engine.
More information can be found at:
https://dev.mysql.com/doc/refman/5.0/en/create-index.html
https://dev.mysql.com/doc/refman/5.0/en/create-table.html
Index Parsers
~~~~~~~~~~~~~
CREATE FULLTEXT INDEX in MySQL also supports a "WITH PARSER" option. This
is available using the keyword argument ``mysql_with_parser``::
Index(
'my_index', my_table.c.data,
mysql_prefix='FULLTEXT', mysql_with_parser="ngram",
mariadb_prefix='FULLTEXT', mariadb_with_parser="ngram",
)
.. versionadded:: 1.3
.. _mysql_foreign_keys:
MySQL / MariaDB Foreign Keys
-----------------------------
MySQL and MariaDB's behavior regarding foreign keys has some important caveats.
Foreign Key Arguments to Avoid
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Neither MySQL nor MariaDB support the foreign key arguments "DEFERRABLE", "INITIALLY",
or "MATCH". Using the ``deferrable`` or ``initially`` keyword argument with
:class:`_schema.ForeignKeyConstraint` or :class:`_schema.ForeignKey`
will have the effect of
these keywords being rendered in a DDL expression, which will then raise an
error on MySQL or MariaDB. In order to use these keywords on a foreign key while having
them ignored on a MySQL / MariaDB backend, use a custom compile rule::
from sqlalchemy.ext.compiler import compiles
from sqlalchemy.schema import ForeignKeyConstraint
@compiles(ForeignKeyConstraint, "mysql", "mariadb")
def process(element, compiler, **kw):
element.deferrable = element.initially = None
return compiler.visit_foreign_key_constraint(element, **kw)
The "MATCH" keyword is in fact more insidious, and is explicitly disallowed
by SQLAlchemy in conjunction with the MySQL or MariaDB backends. This argument is
silently ignored by MySQL / MariaDB, but in addition has the effect of ON UPDATE and ON
DELETE options also being ignored by the backend. Therefore MATCH should
never be used with the MySQL / MariaDB backends; as is the case with DEFERRABLE and
INITIALLY, custom compilation rules can be used to correct a
ForeignKeyConstraint at DDL definition time.
Reflection of Foreign Key Constraints
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Not all MySQL / MariaDB storage engines support foreign keys. When using the
very common ``MyISAM`` MySQL storage engine, the information loaded by table
reflection will not include foreign keys. For these tables, you may supply a
:class:`~sqlalchemy.ForeignKeyConstraint` at reflection time::
Table('mytable', metadata,
ForeignKeyConstraint(['other_id'], ['othertable.other_id']),
autoload_with=engine
)
.. seealso::
:ref:`mysql_storage_engines`
.. _mysql_unique_constraints:
MySQL / MariaDB Unique Constraints and Reflection
----------------------------------------------------
SQLAlchemy supports both the :class:`.Index` construct with the
flag ``unique=True``, indicating a UNIQUE index, as well as the
:class:`.UniqueConstraint` construct, representing a UNIQUE constraint.
Both objects/syntaxes are supported by MySQL / MariaDB when emitting DDL to create
these constraints. However, MySQL / MariaDB does not have a unique constraint
construct that is separate from a unique index; that is, the "UNIQUE"
constraint on MySQL / MariaDB is equivalent to creating a "UNIQUE INDEX".
When reflecting these constructs, the
:meth:`_reflection.Inspector.get_indexes`
and the :meth:`_reflection.Inspector.get_unique_constraints`
methods will **both**
return an entry for a UNIQUE index in MySQL / MariaDB. However, when performing
full table reflection using ``Table(..., autoload_with=engine)``,
the :class:`.UniqueConstraint` construct is
**not** part of the fully reflected :class:`_schema.Table` construct under any
circumstances; this construct is always represented by a :class:`.Index`
with the ``unique=True`` setting present in the :attr:`_schema.Table.indexes`
collection.
TIMESTAMP / DATETIME issues
---------------------------
.. _mysql_timestamp_onupdate:
Rendering ON UPDATE CURRENT TIMESTAMP for MySQL / MariaDB's explicit_defaults_for_timestamp
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
MySQL / MariaDB have historically expanded the DDL for the :class:`_types.TIMESTAMP`
datatype into the phrase "TIMESTAMP DEFAULT CURRENT_TIMESTAMP ON UPDATE
CURRENT_TIMESTAMP", which includes non-standard SQL that automatically updates
the column with the current timestamp when an UPDATE occurs, eliminating the
usual need to use a trigger in such a case where server-side update changes are
desired.
MySQL 5.6 introduced a new flag `explicit_defaults_for_timestamp
<https://dev.mysql.com/doc/refman/5.6/en/server-system-variables.html
#sysvar_explicit_defaults_for_timestamp>`_ which disables the above behavior,
and in MySQL 8 this flag defaults to true, meaning in order to get a MySQL
"on update timestamp" without changing this flag, the above DDL must be
rendered explicitly. Additionally, the same DDL is valid for use of the
``DATETIME`` datatype as well.
SQLAlchemy's MySQL dialect does not yet have an option to generate
MySQL's "ON UPDATE CURRENT_TIMESTAMP" clause, noting that this is not a general
purpose "ON UPDATE" as there is no such syntax in standard SQL. SQLAlchemy's
:paramref:`_schema.Column.server_onupdate` parameter is currently not related
to this special MySQL behavior.
To generate this DDL, make use of the :paramref:`_schema.Column.server_default`
parameter and pass a textual clause that also includes the ON UPDATE clause::
from sqlalchemy import Table, MetaData, Column, Integer, String, TIMESTAMP
from sqlalchemy import text
metadata = MetaData()
mytable = Table(
"mytable",
metadata,
Column('id', Integer, primary_key=True),
Column('data', String(50)),
Column(
'last_updated',
TIMESTAMP,
server_default=text("CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP")
)
)
The same instructions apply to use of the :class:`_types.DateTime` and
:class:`_types.DATETIME` datatypes::
from sqlalchemy import DateTime
mytable = Table(
"mytable",
metadata,
Column('id', Integer, primary_key=True),
Column('data', String(50)),
Column(
'last_updated',
DateTime,
server_default=text("CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP")
)
)
Even though the :paramref:`_schema.Column.server_onupdate` feature does not
generate this DDL, it still may be desirable to signal to the ORM that this
updated value should be fetched. This syntax looks like the following::
from sqlalchemy.schema import FetchedValue
class MyClass(Base):
__tablename__ = 'mytable'
id = Column(Integer, primary_key=True)
data = Column(String(50))
last_updated = Column(
TIMESTAMP,
server_default=text("CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP"),
server_onupdate=FetchedValue()
)
.. _mysql_timestamp_null:
TIMESTAMP Columns and NULL
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
MySQL historically enforces that a column which specifies the
TIMESTAMP datatype implicitly includes a default value of
CURRENT_TIMESTAMP, even though this is not stated, and additionally
sets the column as NOT NULL, the opposite behavior vs. that of all
other datatypes::
mysql> CREATE TABLE ts_test (
-> a INTEGER,
-> b INTEGER NOT NULL,
-> c TIMESTAMP,
-> d TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
-> e TIMESTAMP NULL);
Query OK, 0 rows affected (0.03 sec)
mysql> SHOW CREATE TABLE ts_test;
+---------+-----------------------------------------------------
| Table | Create Table
+---------+-----------------------------------------------------
| ts_test | CREATE TABLE `ts_test` (
`a` int(11) DEFAULT NULL,
`b` int(11) NOT NULL,
`c` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP,
`d` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP,
`e` timestamp NULL DEFAULT NULL
) ENGINE=MyISAM DEFAULT CHARSET=latin1
Above, we see that an INTEGER column defaults to NULL, unless it is specified
with NOT NULL. But when the column is of type TIMESTAMP, an implicit
default of CURRENT_TIMESTAMP is generated which also coerces the column
to be a NOT NULL, even though we did not specify it as such.
This behavior of MySQL can be changed on the MySQL side using the
`explicit_defaults_for_timestamp
<https://dev.mysql.com/doc/refman/5.6/en/server-system-variables.html
#sysvar_explicit_defaults_for_timestamp>`_ configuration flag introduced in
MySQL 5.6. With this server setting enabled, TIMESTAMP columns behave like
any other datatype on the MySQL side with regards to defaults and nullability.
However, to accommodate the vast majority of MySQL databases that do not
specify this new flag, SQLAlchemy emits the "NULL" specifier explicitly with
any TIMESTAMP column that does not specify ``nullable=False``. In order to
accommodate newer databases that specify ``explicit_defaults_for_timestamp``,
SQLAlchemy also emits NOT NULL for TIMESTAMP columns that do specify
``nullable=False``. The following example illustrates::
from sqlalchemy import MetaData, Integer, Table, Column, text
from sqlalchemy.dialects.mysql import TIMESTAMP
m = MetaData()
t = Table('ts_test', m,
Column('a', Integer),
Column('b', Integer, nullable=False),
Column('c', TIMESTAMP),
Column('d', TIMESTAMP, nullable=False)
)
from sqlalchemy import create_engine
e = create_engine("mysql://scott:tiger@localhost/test", echo=True)
m.create_all(e)
output::
CREATE TABLE ts_test (
a INTEGER,
b INTEGER NOT NULL,
c TIMESTAMP NULL,
d TIMESTAMP NOT NULL
)
.. versionchanged:: 1.0.0 - SQLAlchemy now renders NULL or NOT NULL in all
cases for TIMESTAMP columns, to accommodate
``explicit_defaults_for_timestamp``. Prior to this version, it will
not render "NOT NULL" for a TIMESTAMP column that is ``nullable=False``.
""" # noqa
from array import array as _array
from collections import defaultdict
from itertools import compress
import re
from sqlalchemy import literal_column
from sqlalchemy import text
from sqlalchemy.sql import visitors
from . import reflection as _reflection
from .enumerated import ENUM
from .enumerated import SET
from .json import JSON
from .json import JSONIndexType
from .json import JSONPathType
from .reserved_words import RESERVED_WORDS_MARIADB
from .reserved_words import RESERVED_WORDS_MYSQL
from .types import _FloatType
from .types import _IntegerType
from .types import _MatchType
from .types import _NumericType
from .types import _StringType
from .types import BIGINT
from .types import BIT
from .types import CHAR
from .types import DATETIME
from .types import DECIMAL
from .types import DOUBLE
from .types import FLOAT
from .types import INTEGER
from .types import LONGBLOB
from .types import LONGTEXT
from .types import MEDIUMBLOB
from .types import MEDIUMINT
from .types import MEDIUMTEXT
from .types import NCHAR
from .types import NUMERIC
from .types import NVARCHAR
from .types import REAL
from .types import SMALLINT
from .types import TEXT
from .types import TIME
from .types import TIMESTAMP
from .types import TINYBLOB
from .types import TINYINT
from .types import TINYTEXT
from .types import VARCHAR
from .types import YEAR
from ... import exc
from ... import log
from ... import schema as sa_schema
from ... import sql
from ... import types as sqltypes
from ... import util
from ...engine import default
from ...engine import reflection
from ...sql import coercions
from ...sql import compiler
from ...sql import elements
from ...sql import functions
from ...sql import operators
from ...sql import roles
from ...sql import util as sql_util
from ...sql.sqltypes import Unicode
from ...types import BINARY
from ...types import BLOB
from ...types import BOOLEAN
from ...types import DATE
from ...types import VARBINARY
from ...util import topological
AUTOCOMMIT_RE = re.compile(
r"\s*(?:UPDATE|INSERT|CREATE|DELETE|DROP|ALTER|LOAD +DATA|REPLACE)",
re.I | re.UNICODE,
)
SET_RE = re.compile(
r"\s*SET\s+(?:(?:GLOBAL|SESSION)\s+)?\w", re.I | re.UNICODE
)
# old names
MSTime = TIME
MSSet = SET
MSEnum = ENUM
MSLongBlob = LONGBLOB
MSMediumBlob = MEDIUMBLOB
MSTinyBlob = TINYBLOB
MSBlob = BLOB
MSBinary = BINARY
MSVarBinary = VARBINARY
MSNChar = NCHAR
MSNVarChar = NVARCHAR
MSChar = CHAR
MSString = VARCHAR
MSLongText = LONGTEXT
MSMediumText = MEDIUMTEXT
MSTinyText = TINYTEXT
MSText = TEXT
MSYear = YEAR
MSTimeStamp = TIMESTAMP
MSBit = BIT
MSSmallInteger = SMALLINT
MSTinyInteger = TINYINT
MSMediumInteger = MEDIUMINT
MSBigInteger = BIGINT
MSNumeric = NUMERIC
MSDecimal = DECIMAL
MSDouble = DOUBLE
MSReal = REAL
MSFloat = FLOAT
MSInteger = INTEGER
colspecs = {
_IntegerType: _IntegerType,
_NumericType: _NumericType,
_FloatType: _FloatType,
sqltypes.Numeric: NUMERIC,
sqltypes.Float: FLOAT,
sqltypes.Time: TIME,
sqltypes.Enum: ENUM,
sqltypes.MatchType: _MatchType,
sqltypes.JSON: JSON,
sqltypes.JSON.JSONIndexType: JSONIndexType,
sqltypes.JSON.JSONPathType: JSONPathType,
}
# Everything 3.23 through 5.1 excepting OpenGIS types.
ischema_names = {
"bigint": BIGINT,
"binary": BINARY,
"bit": BIT,
"blob": BLOB,
"boolean": BOOLEAN,
"char": CHAR,
"date": DATE,
"datetime": DATETIME,
"decimal": DECIMAL,
"double": DOUBLE,
"enum": ENUM,
"fixed": DECIMAL,
"float": FLOAT,
"int": INTEGER,
"integer": INTEGER,
"json": JSON,
"longblob": LONGBLOB,
"longtext": LONGTEXT,
"mediumblob": MEDIUMBLOB,
"mediumint": MEDIUMINT,
"mediumtext": MEDIUMTEXT,
"nchar": NCHAR,
"nvarchar": NVARCHAR,
"numeric": NUMERIC,
"set": SET,
"smallint": SMALLINT,
"text": TEXT,
"time": TIME,
"timestamp": TIMESTAMP,
"tinyblob": TINYBLOB,
"tinyint": TINYINT,
"tinytext": TINYTEXT,
"varbinary": VARBINARY,
"varchar": VARCHAR,
"year": YEAR,
}
class MySQLExecutionContext(default.DefaultExecutionContext):
def should_autocommit_text(self, statement):
return AUTOCOMMIT_RE.match(statement)
def create_server_side_cursor(self):
if self.dialect.supports_server_side_cursors:
return self._dbapi_connection.cursor(self.dialect._sscursor)
else:
raise NotImplementedError()
def fire_sequence(self, seq, type_):
return self._execute_scalar(
(
"select nextval(%s)"
% self.identifier_preparer.format_sequence(seq)
),
type_,
)
class MySQLCompiler(compiler.SQLCompiler):
render_table_with_column_in_update_from = True
"""Overridden from base SQLCompiler value"""
extract_map = compiler.SQLCompiler.extract_map.copy()
extract_map.update({"milliseconds": "millisecond"})
def default_from(self):
"""Called when a ``SELECT`` statement has no froms,
and no ``FROM`` clause is to be appended.
"""
if self.stack:
stmt = self.stack[-1]["selectable"]
if stmt._where_criteria:
return " FROM DUAL"
return ""
def visit_random_func(self, fn, **kw):
return "rand%s" % self.function_argspec(fn)
def visit_sequence(self, seq, **kw):
return "nextval(%s)" % self.preparer.format_sequence(seq)
def visit_sysdate_func(self, fn, **kw):
return "SYSDATE()"
def _render_json_extract_from_binary(self, binary, operator, **kw):
# note we are intentionally calling upon the process() calls in the
# order in which they appear in the SQL String as this is used
# by positional parameter rendering
if binary.type._type_affinity is sqltypes.JSON:
return "JSON_EXTRACT(%s, %s)" % (
self.process(binary.left, **kw),
self.process(binary.right, **kw),
)
# for non-JSON, MySQL doesn't handle JSON null at all so it has to
# be explicit
case_expression = "CASE JSON_EXTRACT(%s, %s) WHEN 'null' THEN NULL" % (
self.process(binary.left, **kw),
self.process(binary.right, **kw),
)
if binary.type._type_affinity is sqltypes.Integer:
type_expression = (
"ELSE CAST(JSON_EXTRACT(%s, %s) AS SIGNED INTEGER)"
% (
self.process(binary.left, **kw),
self.process(binary.right, **kw),
)
)
elif binary.type._type_affinity is sqltypes.Numeric:
if (
binary.type.scale is not None
and binary.type.precision is not None
):
# using DECIMAL here because MySQL does not recognize NUMERIC
type_expression = (
"ELSE CAST(JSON_EXTRACT(%s, %s) AS DECIMAL(%s, %s))"
% (
self.process(binary.left, **kw),
self.process(binary.right, **kw),
binary.type.precision,
binary.type.scale,
)
)
else:
# FLOAT / REAL not added in MySQL til 8.0.17
type_expression = (
"ELSE JSON_EXTRACT(%s, %s)+0.0000000000000000000000"
% (
self.process(binary.left, **kw),
self.process(binary.right, **kw),
)
)
elif binary.type._type_affinity is sqltypes.Boolean:
# the NULL handling is particularly weird with boolean, so
# explicitly return true/false constants
type_expression = "WHEN true THEN true ELSE false"
elif binary.type._type_affinity is sqltypes.String:
# (gord): this fails with a JSON value that's a four byte unicode
# string. SQLite has the same problem at the moment
# (zzzeek): I'm not really sure. let's take a look at a test case
# that hits each backend and maybe make a requires rule for it?
type_expression = "ELSE JSON_UNQUOTE(JSON_EXTRACT(%s, %s))" % (
self.process(binary.left, **kw),
self.process(binary.right, **kw),
)
else:
# other affinity....this is not expected right now
type_expression = "ELSE JSON_EXTRACT(%s, %s)" % (
self.process(binary.left, **kw),
self.process(binary.right, **kw),
)
return case_expression + " " + type_expression + " END"
def visit_json_getitem_op_binary(self, binary, operator, **kw):
return self._render_json_extract_from_binary(binary, operator, **kw)
def visit_json_path_getitem_op_binary(self, binary, operator, **kw):
return self._render_json_extract_from_binary(binary, operator, **kw)
def visit_on_duplicate_key_update(self, on_duplicate, **kw):
statement = self.current_executable
if on_duplicate._parameter_ordering:
parameter_ordering = [
coercions.expect(roles.DMLColumnRole, key)
for key in on_duplicate._parameter_ordering
]
ordered_keys = set(parameter_ordering)
cols = [
statement.table.c[key]
for key in parameter_ordering
if key in statement.table.c
] + [c for c in statement.table.c if c.key not in ordered_keys]
else:
cols = statement.table.c
clauses = []
# traverses through all table columns to preserve table column order
for column in (col for col in cols if col.key in on_duplicate.update):
val = on_duplicate.update[column.key]
if coercions._is_literal(val):
val = elements.BindParameter(None, val, type_=column.type)
value_text = self.process(val.self_group(), use_schema=False)
else:
def replace(obj):
if (
isinstance(obj, elements.BindParameter)
and obj.type._isnull
):
obj = obj._clone()
obj.type = column.type
return obj
elif (
isinstance(obj, elements.ColumnClause)
and obj.table is on_duplicate.inserted_alias
):
obj = literal_column(
"VALUES(" + self.preparer.quote(obj.name) + ")"
)
return obj
else:
# element is not replaced
return None
val = visitors.replacement_traverse(val, {}, replace)
value_text = self.process(val.self_group(), use_schema=False)
name_text = self.preparer.quote(column.name)
clauses.append("%s = %s" % (name_text, value_text))
non_matching = set(on_duplicate.update) - set(c.key for c in cols)
if non_matching:
util.warn(
"Additional column names not matching "
"any column keys in table '%s': %s"
% (
self.statement.table.name,
(", ".join("'%s'" % c for c in non_matching)),
)
)
return "ON DUPLICATE KEY UPDATE " + ", ".join(clauses)
def visit_concat_op_binary(self, binary, operator, **kw):
return "concat(%s, %s)" % (
self.process(binary.left, **kw),
self.process(binary.right, **kw),
)
_match_valid_flag_combinations = frozenset(
(
# (boolean_mode, natural_language, query_expansion)
(False, False, False),
(True, False, False),
(False, True, False),
(False, False, True),
(False, True, True),
)
)
_match_flag_expressions = (
"IN BOOLEAN MODE",
"IN NATURAL LANGUAGE MODE",
"WITH QUERY EXPANSION",
)
def visit_mysql_match(self, element, **kw):
return self.visit_match_op_binary(element, element.operator, **kw)
def visit_match_op_binary(self, binary, operator, **kw):
"""
Note that `mysql_boolean_mode` is enabled by default because of
backward compatibility
"""
modifiers = binary.modifiers
boolean_mode = modifiers.get("mysql_boolean_mode", True)
natural_language = modifiers.get("mysql_natural_language", False)
query_expansion = modifiers.get("mysql_query_expansion", False)
flag_combination = (boolean_mode, natural_language, query_expansion)
if flag_combination not in self._match_valid_flag_combinations:
flags = (
"in_boolean_mode=%s" % boolean_mode,
"in_natural_language_mode=%s" % natural_language,
"with_query_expansion=%s" % query_expansion,
)
flags = ", ".join(flags)
raise exc.CompileError("Invalid MySQL match flags: %s" % flags)
match_clause = binary.left
match_clause = self.process(match_clause, **kw)
against_clause = self.process(binary.right, **kw)
if any(flag_combination):
flag_expressions = compress(
self._match_flag_expressions,
flag_combination,
)
against_clause = [against_clause]
against_clause.extend(flag_expressions)
against_clause = " ".join(against_clause)
return "MATCH (%s) AGAINST (%s)" % (match_clause, against_clause)
def get_from_hint_text(self, table, text):
return text
def visit_typeclause(self, typeclause, type_=None, **kw):
if type_ is None:
type_ = typeclause.type.dialect_impl(self.dialect)
if isinstance(type_, sqltypes.TypeDecorator):
return self.visit_typeclause(typeclause, type_.impl, **kw)
elif isinstance(type_, sqltypes.Integer):
if getattr(type_, "unsigned", False):
return "UNSIGNED INTEGER"
else:
return "SIGNED INTEGER"
elif isinstance(type_, sqltypes.TIMESTAMP):
return "DATETIME"
elif isinstance(
type_,
(
sqltypes.DECIMAL,
sqltypes.DateTime,
sqltypes.Date,
sqltypes.Time,
),
):
return self.dialect.type_compiler.process(type_)
elif isinstance(type_, sqltypes.String) and not isinstance(
type_, (ENUM, SET)
):
adapted = CHAR._adapt_string_for_cast(type_)
return self.dialect.type_compiler.process(adapted)
elif isinstance(type_, sqltypes._Binary):
return "BINARY"
elif isinstance(type_, sqltypes.JSON):
return "JSON"
elif isinstance(type_, sqltypes.NUMERIC):
return self.dialect.type_compiler.process(type_).replace(
"NUMERIC", "DECIMAL"
)
elif (
isinstance(type_, sqltypes.Float)
and self.dialect._support_float_cast
):
return self.dialect.type_compiler.process(type_)
else:
return None
def visit_cast(self, cast, **kw):
type_ = self.process(cast.typeclause)
if type_ is None:
util.warn(
"Datatype %s does not support CAST on MySQL/MariaDb; "
"the CAST will be skipped."
% self.dialect.type_compiler.process(cast.typeclause.type)
)
return self.process(cast.clause.self_group(), **kw)
return "CAST(%s AS %s)" % (self.process(cast.clause, **kw), type_)
def render_literal_value(self, value, type_):
value = super(MySQLCompiler, self).render_literal_value(value, type_)
if self.dialect._backslash_escapes:
value = value.replace("\\", "\\\\")
return value
# override native_boolean=False behavior here, as
# MySQL still supports native boolean
def visit_true(self, element, **kw):
return "true"
def visit_false(self, element, **kw):
return "false"
def get_select_precolumns(self, select, **kw):
"""Add special MySQL keywords in place of DISTINCT.
.. deprecated 1.4:: this usage is deprecated.
:meth:`_expression.Select.prefix_with` should be used for special
keywords at the start of a SELECT.
"""
if isinstance(select._distinct, util.string_types):
util.warn_deprecated(
"Sending string values for 'distinct' is deprecated in the "
"MySQL dialect and will be removed in a future release. "
"Please use :meth:`.Select.prefix_with` for special keywords "
"at the start of a SELECT statement",
version="1.4",
)
return select._distinct.upper() + " "
return super(MySQLCompiler, self).get_select_precolumns(select, **kw)
def visit_join(self, join, asfrom=False, from_linter=None, **kwargs):
if from_linter:
from_linter.edges.add((join.left, join.right))
if join.full:
join_type = " FULL OUTER JOIN "
elif join.isouter:
join_type = " LEFT OUTER JOIN "
else:
join_type = " INNER JOIN "
return "".join(
(
self.process(
join.left, asfrom=True, from_linter=from_linter, **kwargs
),
join_type,
self.process(
join.right, asfrom=True, from_linter=from_linter, **kwargs
),
" ON ",
self.process(join.onclause, from_linter=from_linter, **kwargs),
)
)
def for_update_clause(self, select, **kw):
if select._for_update_arg.read:
tmp = " LOCK IN SHARE MODE"
else:
tmp = " FOR UPDATE"
if select._for_update_arg.of and self.dialect.supports_for_update_of:
tables = util.OrderedSet()
for c in select._for_update_arg.of:
tables.update(sql_util.surface_selectables_only(c))
tmp += " OF " + ", ".join(
self.process(table, ashint=True, use_schema=False, **kw)
for table in tables
)
if select._for_update_arg.nowait:
tmp += " NOWAIT"
if select._for_update_arg.skip_locked:
tmp += " SKIP LOCKED"
return tmp
def limit_clause(self, select, **kw):
# MySQL supports:
# LIMIT <limit>
# LIMIT <offset>, <limit>
# and in server versions > 3.3:
# LIMIT <limit> OFFSET <offset>
# The latter is more readable for offsets but we're stuck with the
# former until we can refine dialects by server revision.
limit_clause, offset_clause = (
select._limit_clause,
select._offset_clause,
)
if limit_clause is None and offset_clause is None:
return ""
elif offset_clause is not None:
# As suggested by the MySQL docs, need to apply an
# artificial limit if one wasn't provided
# https://dev.mysql.com/doc/refman/5.0/en/select.html
if limit_clause is None:
# hardwire the upper limit. Currently
# needed by OurSQL with Python 3
# (https://bugs.launchpad.net/oursql/+bug/686232),
# but also is consistent with the usage of the upper
# bound as part of MySQL's "syntax" for OFFSET with
# no LIMIT
return " \n LIMIT %s, %s" % (
self.process(offset_clause, **kw),
"18446744073709551615",
)
else:
return " \n LIMIT %s, %s" % (
self.process(offset_clause, **kw),
self.process(limit_clause, **kw),
)
else:
# No offset provided, so just use the limit
return " \n LIMIT %s" % (self.process(limit_clause, **kw),)
def update_limit_clause(self, update_stmt):
limit = update_stmt.kwargs.get("%s_limit" % self.dialect.name, None)
if limit:
return "LIMIT %s" % limit
else:
return None
def update_tables_clause(self, update_stmt, from_table, extra_froms, **kw):
kw["asfrom"] = True
return ", ".join(
t._compiler_dispatch(self, **kw)
for t in [from_table] + list(extra_froms)
)
def update_from_clause(
self, update_stmt, from_table, extra_froms, from_hints, **kw
):
return None
def delete_table_clause(self, delete_stmt, from_table, extra_froms):
"""If we have extra froms make sure we render any alias as hint."""
ashint = False
if extra_froms:
ashint = True
return from_table._compiler_dispatch(
self, asfrom=True, iscrud=True, ashint=ashint
)
def delete_extra_from_clause(
self, delete_stmt, from_table, extra_froms, from_hints, **kw
):
"""Render the DELETE .. USING clause specific to MySQL."""
kw["asfrom"] = True
return "USING " + ", ".join(
t._compiler_dispatch(self, fromhints=from_hints, **kw)
for t in [from_table] + extra_froms
)
def visit_empty_set_expr(self, element_types):
return (
"SELECT %(outer)s FROM (SELECT %(inner)s) "
"as _empty_set WHERE 1!=1"
% {
"inner": ", ".join(
"1 AS _in_%s" % idx
for idx, type_ in enumerate(element_types)
),
"outer": ", ".join(
"_in_%s" % idx for idx, type_ in enumerate(element_types)
),
}
)
def visit_is_distinct_from_binary(self, binary, operator, **kw):
return "NOT (%s <=> %s)" % (
self.process(binary.left),
self.process(binary.right),
)
def visit_is_not_distinct_from_binary(self, binary, operator, **kw):
return "%s <=> %s" % (
self.process(binary.left),
self.process(binary.right),
)
def _mariadb_regexp_flags(self, flags, pattern, **kw):
return "CONCAT('(?', %s, ')', %s)" % (
self.process(flags, **kw),
self.process(pattern, **kw),
)
def _regexp_match(self, op_string, binary, operator, **kw):
flags = binary.modifiers["flags"]
if flags is None:
return self._generate_generic_binary(binary, op_string, **kw)
elif self.dialect.is_mariadb:
return "%s%s%s" % (
self.process(binary.left, **kw),
op_string,
self._mariadb_regexp_flags(flags, binary.right),
)
else:
text = "REGEXP_LIKE(%s, %s, %s)" % (
self.process(binary.left, **kw),
self.process(binary.right, **kw),
self.process(flags, **kw),
)
if op_string == " NOT REGEXP ":
return "NOT %s" % text
else:
return text
def visit_regexp_match_op_binary(self, binary, operator, **kw):
return self._regexp_match(" REGEXP ", binary, operator, **kw)
def visit_not_regexp_match_op_binary(self, binary, operator, **kw):
return self._regexp_match(" NOT REGEXP ", binary, operator, **kw)
def visit_regexp_replace_op_binary(self, binary, operator, **kw):
flags = binary.modifiers["flags"]
replacement = binary.modifiers["replacement"]
if flags is None:
return "REGEXP_REPLACE(%s, %s, %s)" % (
self.process(binary.left, **kw),
self.process(binary.right, **kw),
self.process(replacement, **kw),
)
elif self.dialect.is_mariadb:
return "REGEXP_REPLACE(%s, %s, %s)" % (
self.process(binary.left, **kw),
self._mariadb_regexp_flags(flags, binary.right),
self.process(replacement, **kw),
)
else:
return "REGEXP_REPLACE(%s, %s, %s, %s)" % (
self.process(binary.left, **kw),
self.process(binary.right, **kw),
self.process(replacement, **kw),
self.process(flags, **kw),
)
class MySQLDDLCompiler(compiler.DDLCompiler):
def get_column_specification(self, column, **kw):
"""Builds column DDL."""
colspec = [
self.preparer.format_column(column),
self.dialect.type_compiler.process(
column.type, type_expression=column
),
]
if column.computed is not None:
colspec.append(self.process(column.computed))
is_timestamp = isinstance(
column.type._unwrapped_dialect_impl(self.dialect),
sqltypes.TIMESTAMP,
)
if not column.nullable:
colspec.append("NOT NULL")
# see: https://docs.sqlalchemy.org/en/latest/dialects/mysql.html#mysql_timestamp_null # noqa
elif column.nullable and is_timestamp:
colspec.append("NULL")
comment = column.comment
if comment is not None:
literal = self.sql_compiler.render_literal_value(
comment, sqltypes.String()
)
colspec.append("COMMENT " + literal)
if (
column.table is not None
and column is column.table._autoincrement_column
and (
column.server_default is None
or isinstance(column.server_default, sa_schema.Identity)
)
and not (
self.dialect.supports_sequences
and isinstance(column.default, sa_schema.Sequence)
and not column.default.optional
)
):
colspec.append("AUTO_INCREMENT")
else:
default = self.get_column_default_string(column)
if default is not None:
colspec.append("DEFAULT " + default)
return " ".join(colspec)
def post_create_table(self, table):
"""Build table-level CREATE options like ENGINE and COLLATE."""
table_opts = []
opts = dict(
(k[len(self.dialect.name) + 1 :].upper(), v)
for k, v in table.kwargs.items()
if k.startswith("%s_" % self.dialect.name)
)
if table.comment is not None:
opts["COMMENT"] = table.comment
partition_options = [
"PARTITION_BY",
"PARTITIONS",
"SUBPARTITIONS",
"SUBPARTITION_BY",
]
nonpart_options = set(opts).difference(partition_options)
part_options = set(opts).intersection(partition_options)
for opt in topological.sort(
[
("DEFAULT_CHARSET", "COLLATE"),
("DEFAULT_CHARACTER_SET", "COLLATE"),
("CHARSET", "COLLATE"),
("CHARACTER_SET", "COLLATE"),
],
nonpart_options,
):
arg = opts[opt]
if opt in _reflection._options_of_type_string:
arg = self.sql_compiler.render_literal_value(
arg, sqltypes.String()
)
if opt in (
"DATA_DIRECTORY",
"INDEX_DIRECTORY",
"DEFAULT_CHARACTER_SET",
"CHARACTER_SET",
"DEFAULT_CHARSET",
"DEFAULT_COLLATE",
):
opt = opt.replace("_", " ")
joiner = "="
if opt in (
"TABLESPACE",
"DEFAULT CHARACTER SET",
"CHARACTER SET",
"COLLATE",
):
joiner = " "
table_opts.append(joiner.join((opt, arg)))
for opt in topological.sort(
[
("PARTITION_BY", "PARTITIONS"),
("PARTITION_BY", "SUBPARTITION_BY"),
("PARTITION_BY", "SUBPARTITIONS"),
("PARTITIONS", "SUBPARTITIONS"),
("PARTITIONS", "SUBPARTITION_BY"),
("SUBPARTITION_BY", "SUBPARTITIONS"),
],
part_options,
):
arg = opts[opt]
if opt in _reflection._options_of_type_string:
arg = self.sql_compiler.render_literal_value(
arg, sqltypes.String()
)
opt = opt.replace("_", " ")
joiner = " "
table_opts.append(joiner.join((opt, arg)))
return " ".join(table_opts)
def visit_create_index(self, create, **kw):
index = create.element
self._verify_index_table(index)
preparer = self.preparer
table = preparer.format_table(index.table)
columns = [
self.sql_compiler.process(
elements.Grouping(expr)
if (
isinstance(expr, elements.BinaryExpression)
or (
isinstance(expr, elements.UnaryExpression)
and expr.modifier
not in (operators.desc_op, operators.asc_op)
)
or isinstance(expr, functions.FunctionElement)
)
else expr,
include_table=False,
literal_binds=True,
)
for expr in index.expressions
]
name = self._prepared_index_name(index)
text = "CREATE "
if index.unique:
text += "UNIQUE "
index_prefix = index.kwargs.get("%s_prefix" % self.dialect.name, None)
if index_prefix:
text += index_prefix + " "
text += "INDEX "
if create.if_not_exists:
text += "IF NOT EXISTS "
text += "%s ON %s " % (name, table)
length = index.dialect_options[self.dialect.name]["length"]
if length is not None:
if isinstance(length, dict):
# length value can be a (column_name --> integer value)
# mapping specifying the prefix length for each column of the
# index
columns = ", ".join(
"%s(%d)" % (expr, length[col.name])
if col.name in length
else (
"%s(%d)" % (expr, length[expr])
if expr in length
else "%s" % expr
)
for col, expr in zip(index.expressions, columns)
)
else:
# or can be an integer value specifying the same
# prefix length for all columns of the index
columns = ", ".join(
"%s(%d)" % (col, length) for col in columns
)
else:
columns = ", ".join(columns)
text += "(%s)" % columns
parser = index.dialect_options["mysql"]["with_parser"]
if parser is not None:
text += " WITH PARSER %s" % (parser,)
using = index.dialect_options["mysql"]["using"]
if using is not None:
text += " USING %s" % (preparer.quote(using))
return text
def visit_primary_key_constraint(self, constraint):
text = super(MySQLDDLCompiler, self).visit_primary_key_constraint(
constraint
)
using = constraint.dialect_options["mysql"]["using"]
if using:
text += " USING %s" % (self.preparer.quote(using))
return text
def visit_drop_index(self, drop):
index = drop.element
text = "\nDROP INDEX "
if drop.if_exists:
text += "IF EXISTS "
return text + "%s ON %s" % (
self._prepared_index_name(index, include_schema=False),
self.preparer.format_table(index.table),
)
def visit_drop_constraint(self, drop):
constraint = drop.element
if isinstance(constraint, sa_schema.ForeignKeyConstraint):
qual = "FOREIGN KEY "
const = self.preparer.format_constraint(constraint)
elif isinstance(constraint, sa_schema.PrimaryKeyConstraint):
qual = "PRIMARY KEY "
const = ""
elif isinstance(constraint, sa_schema.UniqueConstraint):
qual = "INDEX "
const = self.preparer.format_constraint(constraint)
elif isinstance(constraint, sa_schema.CheckConstraint):
if self.dialect.is_mariadb:
qual = "CONSTRAINT "
else:
qual = "CHECK "
const = self.preparer.format_constraint(constraint)
else:
qual = ""
const = self.preparer.format_constraint(constraint)
return "ALTER TABLE %s DROP %s%s" % (
self.preparer.format_table(constraint.table),
qual,
const,
)
def define_constraint_match(self, constraint):
if constraint.match is not None:
raise exc.CompileError(
"MySQL ignores the 'MATCH' keyword while at the same time "
"causes ON UPDATE/ON DELETE clauses to be ignored."
)
return ""
def visit_set_table_comment(self, create):
return "ALTER TABLE %s COMMENT %s" % (
self.preparer.format_table(create.element),
self.sql_compiler.render_literal_value(
create.element.comment, sqltypes.String()
),
)
def visit_drop_table_comment(self, create):
return "ALTER TABLE %s COMMENT ''" % (
self.preparer.format_table(create.element)
)
def visit_set_column_comment(self, create):
return "ALTER TABLE %s CHANGE %s %s" % (
self.preparer.format_table(create.element.table),
self.preparer.format_column(create.element),
self.get_column_specification(create.element),
)
class MySQLTypeCompiler(compiler.GenericTypeCompiler):
def _extend_numeric(self, type_, spec):
"Extend a numeric-type declaration with MySQL specific extensions."
if not self._mysql_type(type_):
return spec
if type_.unsigned:
spec += " UNSIGNED"
if type_.zerofill:
spec += " ZEROFILL"
return spec
def _extend_string(self, type_, defaults, spec):
"""Extend a string-type declaration with standard SQL CHARACTER SET /
COLLATE annotations and MySQL specific extensions.
"""
def attr(name):
return getattr(type_, name, defaults.get(name))
if attr("charset"):
charset = "CHARACTER SET %s" % attr("charset")
elif attr("ascii"):
charset = "ASCII"
elif attr("unicode"):
charset = "UNICODE"
else:
charset = None
if attr("collation"):
collation = "COLLATE %s" % type_.collation
elif attr("binary"):
collation = "BINARY"
else:
collation = None
if attr("national"):
# NATIONAL (aka NCHAR/NVARCHAR) trumps charsets.
return " ".join(
[c for c in ("NATIONAL", spec, collation) if c is not None]
)
return " ".join(
[c for c in (spec, charset, collation) if c is not None]
)
def _mysql_type(self, type_):
return isinstance(type_, (_StringType, _NumericType))
def visit_NUMERIC(self, type_, **kw):
if type_.precision is None:
return self._extend_numeric(type_, "NUMERIC")
elif type_.scale is None:
return self._extend_numeric(
type_,
"NUMERIC(%(precision)s)" % {"precision": type_.precision},
)
else:
return self._extend_numeric(
type_,
"NUMERIC(%(precision)s, %(scale)s)"
% {"precision": type_.precision, "scale": type_.scale},
)
def visit_DECIMAL(self, type_, **kw):
if type_.precision is None:
return self._extend_numeric(type_, "DECIMAL")
elif type_.scale is None:
return self._extend_numeric(
type_,
"DECIMAL(%(precision)s)" % {"precision": type_.precision},
)
else:
return self._extend_numeric(
type_,
"DECIMAL(%(precision)s, %(scale)s)"
% {"precision": type_.precision, "scale": type_.scale},
)
def visit_DOUBLE(self, type_, **kw):
if type_.precision is not None and type_.scale is not None:
return self._extend_numeric(
type_,
"DOUBLE(%(precision)s, %(scale)s)"
% {"precision": type_.precision, "scale": type_.scale},
)
else:
return self._extend_numeric(type_, "DOUBLE")
def visit_REAL(self, type_, **kw):
if type_.precision is not None and type_.scale is not None:
return self._extend_numeric(
type_,
"REAL(%(precision)s, %(scale)s)"
% {"precision": type_.precision, "scale": type_.scale},
)
else:
return self._extend_numeric(type_, "REAL")
def visit_FLOAT(self, type_, **kw):
if (
self._mysql_type(type_)
and type_.scale is not None
and type_.precision is not None
):
return self._extend_numeric(
type_, "FLOAT(%s, %s)" % (type_.precision, type_.scale)
)
elif type_.precision is not None:
return self._extend_numeric(
type_, "FLOAT(%s)" % (type_.precision,)
)
else:
return self._extend_numeric(type_, "FLOAT")
def visit_INTEGER(self, type_, **kw):
if self._mysql_type(type_) and type_.display_width is not None:
return self._extend_numeric(
type_,
"INTEGER(%(display_width)s)"
% {"display_width": type_.display_width},
)
else:
return self._extend_numeric(type_, "INTEGER")
def visit_BIGINT(self, type_, **kw):
if self._mysql_type(type_) and type_.display_width is not None:
return self._extend_numeric(
type_,
"BIGINT(%(display_width)s)"
% {"display_width": type_.display_width},
)
else:
return self._extend_numeric(type_, "BIGINT")
def visit_MEDIUMINT(self, type_, **kw):
if self._mysql_type(type_) and type_.display_width is not None:
return self._extend_numeric(
type_,
"MEDIUMINT(%(display_width)s)"
% {"display_width": type_.display_width},
)
else:
return self._extend_numeric(type_, "MEDIUMINT")
def visit_TINYINT(self, type_, **kw):
if self._mysql_type(type_) and type_.display_width is not None:
return self._extend_numeric(
type_, "TINYINT(%s)" % type_.display_width
)
else:
return self._extend_numeric(type_, "TINYINT")
def visit_SMALLINT(self, type_, **kw):
if self._mysql_type(type_) and type_.display_width is not None:
return self._extend_numeric(
type_,
"SMALLINT(%(display_width)s)"
% {"display_width": type_.display_width},
)
else:
return self._extend_numeric(type_, "SMALLINT")
def visit_BIT(self, type_, **kw):
if type_.length is not None:
return "BIT(%s)" % type_.length
else:
return "BIT"
def visit_DATETIME(self, type_, **kw):
if getattr(type_, "fsp", None):
return "DATETIME(%d)" % type_.fsp
else:
return "DATETIME"
def visit_DATE(self, type_, **kw):
return "DATE"
def visit_TIME(self, type_, **kw):
if getattr(type_, "fsp", None):
return "TIME(%d)" % type_.fsp
else:
return "TIME"
def visit_TIMESTAMP(self, type_, **kw):
if getattr(type_, "fsp", None):
return "TIMESTAMP(%d)" % type_.fsp
else:
return "TIMESTAMP"
def visit_YEAR(self, type_, **kw):
if type_.display_width is None:
return "YEAR"
else:
return "YEAR(%s)" % type_.display_width
def visit_TEXT(self, type_, **kw):
if type_.length:
return self._extend_string(type_, {}, "TEXT(%d)" % type_.length)
else:
return self._extend_string(type_, {}, "TEXT")
def visit_TINYTEXT(self, type_, **kw):
return self._extend_string(type_, {}, "TINYTEXT")
def visit_MEDIUMTEXT(self, type_, **kw):
return self._extend_string(type_, {}, "MEDIUMTEXT")
def visit_LONGTEXT(self, type_, **kw):
return self._extend_string(type_, {}, "LONGTEXT")
def visit_VARCHAR(self, type_, **kw):
if type_.length:
return self._extend_string(type_, {}, "VARCHAR(%d)" % type_.length)
else:
raise exc.CompileError(
"VARCHAR requires a length on dialect %s" % self.dialect.name
)
def visit_CHAR(self, type_, **kw):
if type_.length:
return self._extend_string(
type_, {}, "CHAR(%(length)s)" % {"length": type_.length}
)
else:
return self._extend_string(type_, {}, "CHAR")
def visit_NVARCHAR(self, type_, **kw):
# We'll actually generate the equiv. "NATIONAL VARCHAR" instead
# of "NVARCHAR".
if type_.length:
return self._extend_string(
type_,
{"national": True},
"VARCHAR(%(length)s)" % {"length": type_.length},
)
else:
raise exc.CompileError(
"NVARCHAR requires a length on dialect %s" % self.dialect.name
)
def visit_NCHAR(self, type_, **kw):
# We'll actually generate the equiv.
# "NATIONAL CHAR" instead of "NCHAR".
if type_.length:
return self._extend_string(
type_,
{"national": True},
"CHAR(%(length)s)" % {"length": type_.length},
)
else:
return self._extend_string(type_, {"national": True}, "CHAR")
def visit_VARBINARY(self, type_, **kw):
return "VARBINARY(%d)" % type_.length
def visit_JSON(self, type_, **kw):
return "JSON"
def visit_large_binary(self, type_, **kw):
return self.visit_BLOB(type_)
def visit_enum(self, type_, **kw):
if not type_.native_enum:
return super(MySQLTypeCompiler, self).visit_enum(type_)
else:
return self._visit_enumerated_values("ENUM", type_, type_.enums)
def visit_BLOB(self, type_, **kw):
if type_.length:
return "BLOB(%d)" % type_.length
else:
return "BLOB"
def visit_TINYBLOB(self, type_, **kw):
return "TINYBLOB"
def visit_MEDIUMBLOB(self, type_, **kw):
return "MEDIUMBLOB"
def visit_LONGBLOB(self, type_, **kw):
return "LONGBLOB"
def _visit_enumerated_values(self, name, type_, enumerated_values):
quoted_enums = []
for e in enumerated_values:
quoted_enums.append("'%s'" % e.replace("'", "''"))
return self._extend_string(
type_, {}, "%s(%s)" % (name, ",".join(quoted_enums))
)
def visit_ENUM(self, type_, **kw):
return self._visit_enumerated_values("ENUM", type_, type_.enums)
def visit_SET(self, type_, **kw):
return self._visit_enumerated_values("SET", type_, type_.values)
def visit_BOOLEAN(self, type_, **kw):
return "BOOL"
class MySQLIdentifierPreparer(compiler.IdentifierPreparer):
reserved_words = RESERVED_WORDS_MYSQL
def __init__(self, dialect, server_ansiquotes=False, **kw):
if not server_ansiquotes:
quote = "`"
else:
quote = '"'
super(MySQLIdentifierPreparer, self).__init__(
dialect, initial_quote=quote, escape_quote=quote
)
def _quote_free_identifiers(self, *ids):
"""Unilaterally identifier-quote any number of strings."""
return tuple([self.quote_identifier(i) for i in ids if i is not None])
class MariaDBIdentifierPreparer(MySQLIdentifierPreparer):
reserved_words = RESERVED_WORDS_MARIADB
@log.class_logger
class MySQLDialect(default.DefaultDialect):
"""Details of the MySQL dialect.
Not used directly in application code.
"""
name = "mysql"
supports_statement_cache = True
supports_alter = True
# MySQL has no true "boolean" type; we
# allow for the "true" and "false" keywords, however
supports_native_boolean = False
# identifiers are 64, however aliases can be 255...
max_identifier_length = 255
max_index_name_length = 64
max_constraint_name_length = 64
supports_native_enum = True
supports_sequences = False # default for MySQL ...
# ... may be updated to True for MariaDB 10.3+ in initialize()
sequences_optional = False
supports_for_update_of = False # default for MySQL ...
# ... may be updated to True for MySQL 8+ in initialize()
# MySQL doesn't support "DEFAULT VALUES" but *does* support
# "VALUES (DEFAULT)"
supports_default_values = False
supports_default_metavalue = True
supports_sane_rowcount = True
supports_sane_multi_rowcount = False
supports_multivalues_insert = True
supports_comments = True
inline_comments = True
default_paramstyle = "format"
colspecs = colspecs
cte_follows_insert = True
statement_compiler = MySQLCompiler
ddl_compiler = MySQLDDLCompiler
type_compiler = MySQLTypeCompiler
ischema_names = ischema_names
preparer = MySQLIdentifierPreparer
is_mariadb = False
_mariadb_normalized_version_info = None
# default SQL compilation settings -
# these are modified upon initialize(),
# i.e. first connect
_backslash_escapes = True
_server_ansiquotes = False
construct_arguments = [
(sa_schema.Table, {"*": None}),
(sql.Update, {"limit": None}),
(sa_schema.PrimaryKeyConstraint, {"using": None}),
(
sa_schema.Index,
{
"using": None,
"length": None,
"prefix": None,
"with_parser": None,
},
),
]
def __init__(
self,
isolation_level=None,
json_serializer=None,
json_deserializer=None,
is_mariadb=None,
**kwargs
):
kwargs.pop("use_ansiquotes", None) # legacy
default.DefaultDialect.__init__(self, **kwargs)
self.isolation_level = isolation_level
self._json_serializer = json_serializer
self._json_deserializer = json_deserializer
self._set_mariadb(is_mariadb, None)
def on_connect(self):
if self.isolation_level is not None:
def connect(conn):
self.set_isolation_level(conn, self.isolation_level)
return connect
else:
return None
_isolation_lookup = set(
[
"SERIALIZABLE",
"READ UNCOMMITTED",
"READ COMMITTED",
"REPEATABLE READ",
]
)
def set_isolation_level(self, connection, level):
level = level.replace("_", " ")
# adjust for ConnectionFairy being present
# allows attribute set e.g. "connection.autocommit = True"
# to work properly
if hasattr(connection, "dbapi_connection"):
connection = connection.dbapi_connection
self._set_isolation_level(connection, level)
def _set_isolation_level(self, connection, level):
if level not in self._isolation_lookup:
raise exc.ArgumentError(
"Invalid value '%s' for isolation_level. "
"Valid isolation levels for %s are %s"
% (level, self.name, ", ".join(self._isolation_lookup))
)
cursor = connection.cursor()
cursor.execute("SET SESSION TRANSACTION ISOLATION LEVEL %s" % level)
cursor.execute("COMMIT")
cursor.close()
def get_isolation_level(self, connection):
cursor = connection.cursor()
if self._is_mysql and self.server_version_info >= (5, 7, 20):
cursor.execute("SELECT @@transaction_isolation")
else:
cursor.execute("SELECT @@tx_isolation")
row = cursor.fetchone()
if row is None:
util.warn(
"Could not retrieve transaction isolation level for MySQL "
"connection."
)
raise NotImplementedError()
val = row[0]
cursor.close()
if util.py3k and isinstance(val, bytes):
val = val.decode()
return val.upper().replace("-", " ")
@classmethod
def _is_mariadb_from_url(cls, url):
dbapi = cls.dbapi()
dialect = cls(dbapi=dbapi)
cargs, cparams = dialect.create_connect_args(url)
conn = dialect.connect(*cargs, **cparams)
try:
cursor = conn.cursor()
cursor.execute("SELECT VERSION() LIKE '%MariaDB%'")
val = cursor.fetchone()[0]
except:
raise
else:
return bool(val)
finally:
conn.close()
def _get_server_version_info(self, connection):
# get database server version info explicitly over the wire
# to avoid proxy servers like MaxScale getting in the
# way with their own values, see #4205
dbapi_con = connection.connection
cursor = dbapi_con.cursor()
cursor.execute("SELECT VERSION()")
val = cursor.fetchone()[0]
cursor.close()
if util.py3k and isinstance(val, bytes):
val = val.decode()
return self._parse_server_version(val)
def _parse_server_version(self, val):
version = []
is_mariadb = False
r = re.compile(r"[.\-+]")
tokens = r.split(val)
for token in tokens:
parsed_token = re.match(
r"^(?:(\d+)(?:a|b|c)?|(MariaDB\w*))$", token
)
if not parsed_token:
continue
elif parsed_token.group(2):
self._mariadb_normalized_version_info = tuple(version[-3:])
is_mariadb = True
else:
digit = int(parsed_token.group(1))
version.append(digit)
server_version_info = tuple(version)
self._set_mariadb(server_version_info and is_mariadb, val)
if not is_mariadb:
self._mariadb_normalized_version_info = server_version_info
if server_version_info < (5, 0, 2):
raise NotImplementedError(
"the MySQL/MariaDB dialect supports server "
"version info 5.0.2 and above."
)
# setting it here to help w the test suite
self.server_version_info = server_version_info
return server_version_info
def _set_mariadb(self, is_mariadb, server_version_info):
if is_mariadb is None:
return
if not is_mariadb and self.is_mariadb:
raise exc.InvalidRequestError(
"MySQL version %s is not a MariaDB variant."
% (server_version_info,)
)
if is_mariadb:
self.preparer = MariaDBIdentifierPreparer
# this would have been set by the default dialect already,
# so set it again
self.identifier_preparer = self.preparer(self)
self.is_mariadb = is_mariadb
def do_begin_twophase(self, connection, xid):
connection.execute(sql.text("XA BEGIN :xid"), dict(xid=xid))
def do_prepare_twophase(self, connection, xid):
connection.execute(sql.text("XA END :xid"), dict(xid=xid))
connection.execute(sql.text("XA PREPARE :xid"), dict(xid=xid))
def do_rollback_twophase(
self, connection, xid, is_prepared=True, recover=False
):
if not is_prepared:
connection.execute(sql.text("XA END :xid"), dict(xid=xid))
connection.execute(sql.text("XA ROLLBACK :xid"), dict(xid=xid))
def do_commit_twophase(
self, connection, xid, is_prepared=True, recover=False
):
if not is_prepared:
self.do_prepare_twophase(connection, xid)
connection.execute(sql.text("XA COMMIT :xid"), dict(xid=xid))
def do_recover_twophase(self, connection):
resultset = connection.exec_driver_sql("XA RECOVER")
return [row["data"][0 : row["gtrid_length"]] for row in resultset]
def is_disconnect(self, e, connection, cursor):
if isinstance(
e,
(
self.dbapi.OperationalError,
self.dbapi.ProgrammingError,
self.dbapi.InterfaceError,
),
) and self._extract_error_code(e) in (
1927,
2006,
2013,
2014,
2045,
2055,
4031,
):
return True
elif isinstance(
e, (self.dbapi.InterfaceError, self.dbapi.InternalError)
):
# if underlying connection is closed,
# this is the error you get
return "(0, '')" in str(e)
else:
return False
def _compat_fetchall(self, rp, charset=None):
"""Proxy result rows to smooth over MySQL-Python driver
inconsistencies."""
return [_DecodingRow(row, charset) for row in rp.fetchall()]
def _compat_fetchone(self, rp, charset=None):
"""Proxy a result row to smooth over MySQL-Python driver
inconsistencies."""
row = rp.fetchone()
if row:
return _DecodingRow(row, charset)
else:
return None
def _compat_first(self, rp, charset=None):
"""Proxy a result row to smooth over MySQL-Python driver
inconsistencies."""
row = rp.first()
if row:
return _DecodingRow(row, charset)
else:
return None
def _extract_error_code(self, exception):
raise NotImplementedError()
def _get_default_schema_name(self, connection):
return connection.exec_driver_sql("SELECT DATABASE()").scalar()
def has_table(self, connection, table_name, schema=None):
self._ensure_has_table_connection(connection)
if schema is None:
schema = self.default_schema_name
rs = connection.execute(
text(
"SELECT COUNT(*) FROM information_schema.tables WHERE "
"table_schema = :table_schema AND "
"table_name = :table_name"
).bindparams(
sql.bindparam("table_schema", type_=Unicode),
sql.bindparam("table_name", type_=Unicode),
),
{
"table_schema": util.text_type(schema),
"table_name": util.text_type(table_name),
},
)
return bool(rs.scalar())
def has_sequence(self, connection, sequence_name, schema=None):
if not self.supports_sequences:
self._sequences_not_supported()
if not schema:
schema = self.default_schema_name
# MariaDB implements sequences as a special type of table
#
cursor = connection.execute(
sql.text(
"SELECT TABLE_NAME FROM INFORMATION_SCHEMA.TABLES "
"WHERE TABLE_TYPE='SEQUENCE' and TABLE_NAME=:name AND "
"TABLE_SCHEMA=:schema_name"
),
dict(
name=util.text_type(sequence_name),
schema_name=util.text_type(schema),
),
)
return cursor.first() is not None
def _sequences_not_supported(self):
raise NotImplementedError(
"Sequences are supported only by the "
"MariaDB series 10.3 or greater"
)
@reflection.cache
def get_sequence_names(self, connection, schema=None, **kw):
if not self.supports_sequences:
self._sequences_not_supported()
if not schema:
schema = self.default_schema_name
# MariaDB implements sequences as a special type of table
cursor = connection.execute(
sql.text(
"SELECT TABLE_NAME FROM INFORMATION_SCHEMA.TABLES "
"WHERE TABLE_TYPE='SEQUENCE' and TABLE_SCHEMA=:schema_name"
),
dict(schema_name=schema),
)
return [
row[0]
for row in self._compat_fetchall(
cursor, charset=self._connection_charset
)
]
def initialize(self, connection):
# this is driver-based, does not need server version info
# and is fairly critical for even basic SQL operations
self._connection_charset = self._detect_charset(connection)
# call super().initialize() because we need to have
# server_version_info set up. in 1.4 under python 2 only this does the
# "check unicode returns" thing, which is the one area that some
# SQL gets compiled within initialize() currently
default.DefaultDialect.initialize(self, connection)
self._detect_sql_mode(connection)
self._detect_ansiquotes(connection) # depends on sql mode
self._detect_casing(connection)
if self._server_ansiquotes:
# if ansiquotes == True, build a new IdentifierPreparer
# with the new setting
self.identifier_preparer = self.preparer(
self, server_ansiquotes=self._server_ansiquotes
)
self.supports_sequences = (
self.is_mariadb and self.server_version_info >= (10, 3)
)
self.supports_for_update_of = (
self._is_mysql and self.server_version_info >= (8,)
)
self._needs_correct_for_88718_96365 = (
not self.is_mariadb and self.server_version_info >= (8,)
)
self._warn_for_known_db_issues()
def _warn_for_known_db_issues(self):
if self.is_mariadb:
mdb_version = self._mariadb_normalized_version_info
if mdb_version > (10, 2) and mdb_version < (10, 2, 9):
util.warn(
"MariaDB %r before 10.2.9 has known issues regarding "
"CHECK constraints, which impact handling of NULL values "
"with SQLAlchemy's boolean datatype (MDEV-13596). An "
"additional issue prevents proper migrations of columns "
"with CHECK constraints (MDEV-11114). Please upgrade to "
"MariaDB 10.2.9 or greater, or use the MariaDB 10.1 "
"series, to avoid these issues." % (mdb_version,)
)
@property
def _support_float_cast(self):
if not self.server_version_info:
return False
elif self.is_mariadb:
# ref https://mariadb.com/kb/en/mariadb-1045-release-notes/
return self.server_version_info >= (10, 4, 5)
else:
# ref https://dev.mysql.com/doc/relnotes/mysql/8.0/en/news-8-0-17.html#mysqld-8-0-17-feature # noqa
return self.server_version_info >= (8, 0, 17)
@property
def _is_mariadb(self):
return self.is_mariadb
@property
def _is_mysql(self):
return not self.is_mariadb
@property
def _is_mariadb_102(self):
return self.is_mariadb and self._mariadb_normalized_version_info > (
10,
2,
)
@reflection.cache
def get_schema_names(self, connection, **kw):
rp = connection.exec_driver_sql("SHOW schemas")
return [r[0] for r in rp]
@reflection.cache
def get_table_names(self, connection, schema=None, **kw):
"""Return a Unicode SHOW TABLES from a given schema."""
if schema is not None:
current_schema = schema
else:
current_schema = self.default_schema_name
charset = self._connection_charset
rp = connection.exec_driver_sql(
"SHOW FULL TABLES FROM %s"
% self.identifier_preparer.quote_identifier(current_schema)
)
return [
row[0]
for row in self._compat_fetchall(rp, charset=charset)
if row[1] == "BASE TABLE"
]
@reflection.cache
def get_view_names(self, connection, schema=None, **kw):
if schema is None:
schema = self.default_schema_name
charset = self._connection_charset
rp = connection.exec_driver_sql(
"SHOW FULL TABLES FROM %s"
% self.identifier_preparer.quote_identifier(schema)
)
return [
row[0]
for row in self._compat_fetchall(rp, charset=charset)
if row[1] in ("VIEW", "SYSTEM VIEW")
]
@reflection.cache
def get_table_options(self, connection, table_name, schema=None, **kw):
parsed_state = self._parsed_state_or_create(
connection, table_name, schema, **kw
)
return parsed_state.table_options
@reflection.cache
def get_columns(self, connection, table_name, schema=None, **kw):
parsed_state = self._parsed_state_or_create(
connection, table_name, schema, **kw
)
return parsed_state.columns
@reflection.cache
def get_pk_constraint(self, connection, table_name, schema=None, **kw):
parsed_state = self._parsed_state_or_create(
connection, table_name, schema, **kw
)
for key in parsed_state.keys:
if key["type"] == "PRIMARY":
# There can be only one.
cols = [s[0] for s in key["columns"]]
return {"constrained_columns": cols, "name": None}
return {"constrained_columns": [], "name": None}
@reflection.cache
def get_foreign_keys(self, connection, table_name, schema=None, **kw):
parsed_state = self._parsed_state_or_create(
connection, table_name, schema, **kw
)
default_schema = None
fkeys = []
for spec in parsed_state.fk_constraints:
ref_name = spec["table"][-1]
ref_schema = len(spec["table"]) > 1 and spec["table"][-2] or schema
if not ref_schema:
if default_schema is None:
default_schema = connection.dialect.default_schema_name
if schema == default_schema:
ref_schema = schema
loc_names = spec["local"]
ref_names = spec["foreign"]
con_kw = {}
for opt in ("onupdate", "ondelete"):
if spec.get(opt, False) not in ("NO ACTION", None):
con_kw[opt] = spec[opt]
fkey_d = {
"name": spec["name"],
"constrained_columns": loc_names,
"referred_schema": ref_schema,
"referred_table": ref_name,
"referred_columns": ref_names,
"options": con_kw,
}
fkeys.append(fkey_d)
if self._needs_correct_for_88718_96365:
self._correct_for_mysql_bugs_88718_96365(fkeys, connection)
return fkeys
def _correct_for_mysql_bugs_88718_96365(self, fkeys, connection):
# Foreign key is always in lower case (MySQL 8.0)
# https://bugs.mysql.com/bug.php?id=88718
# issue #4344 for SQLAlchemy
# table name also for MySQL 8.0
# https://bugs.mysql.com/bug.php?id=96365
# issue #4751 for SQLAlchemy
# for lower_case_table_names=2, information_schema.columns
# preserves the original table/schema casing, but SHOW CREATE
# TABLE does not. this problem is not in lower_case_table_names=1,
# but use case-insensitive matching for these two modes in any case.
if self._casing in (1, 2):
def lower(s):
return s.lower()
else:
# if on case sensitive, there can be two tables referenced
# with the same name different casing, so we need to use
# case-sensitive matching.
def lower(s):
return s
default_schema_name = connection.dialect.default_schema_name
col_tuples = [
(
lower(rec["referred_schema"] or default_schema_name),
lower(rec["referred_table"]),
col_name,
)
for rec in fkeys
for col_name in rec["referred_columns"]
]
if col_tuples:
correct_for_wrong_fk_case = connection.execute(
sql.text(
"""
select table_schema, table_name, column_name
from information_schema.columns
where (table_schema, table_name, lower(column_name)) in
:table_data;
"""
).bindparams(sql.bindparam("table_data", expanding=True)),
dict(table_data=col_tuples),
)
# in casing=0, table name and schema name come back in their
# exact case.
# in casing=1, table name and schema name come back in lower
# case.
# in casing=2, table name and schema name come back from the
# information_schema.columns view in the case
# that was used in CREATE DATABASE and CREATE TABLE, but
# SHOW CREATE TABLE converts them to *lower case*, therefore
# not matching. So for this case, case-insensitive lookup
# is necessary
d = defaultdict(dict)
for schema, tname, cname in correct_for_wrong_fk_case:
d[(lower(schema), lower(tname))]["SCHEMANAME"] = schema
d[(lower(schema), lower(tname))]["TABLENAME"] = tname
d[(lower(schema), lower(tname))][cname.lower()] = cname
for fkey in fkeys:
rec = d[
(
lower(fkey["referred_schema"] or default_schema_name),
lower(fkey["referred_table"]),
)
]
fkey["referred_table"] = rec["TABLENAME"]
if fkey["referred_schema"] is not None:
fkey["referred_schema"] = rec["SCHEMANAME"]
fkey["referred_columns"] = [
rec[col.lower()] for col in fkey["referred_columns"]
]
@reflection.cache
def get_check_constraints(self, connection, table_name, schema=None, **kw):
parsed_state = self._parsed_state_or_create(
connection, table_name, schema, **kw
)
return [
{"name": spec["name"], "sqltext": spec["sqltext"]}
for spec in parsed_state.ck_constraints
]
@reflection.cache
def get_table_comment(self, connection, table_name, schema=None, **kw):
parsed_state = self._parsed_state_or_create(
connection, table_name, schema, **kw
)
return {
"text": parsed_state.table_options.get(
"%s_comment" % self.name, None
)
}
@reflection.cache
def get_indexes(self, connection, table_name, schema=None, **kw):
parsed_state = self._parsed_state_or_create(
connection, table_name, schema, **kw
)
indexes = []
for spec in parsed_state.keys:
dialect_options = {}
unique = False
flavor = spec["type"]
if flavor == "PRIMARY":
continue
if flavor == "UNIQUE":
unique = True
elif flavor in ("FULLTEXT", "SPATIAL"):
dialect_options["%s_prefix" % self.name] = flavor
elif flavor is None:
pass
else:
self.logger.info(
"Converting unknown KEY type %s to a plain KEY", flavor
)
pass
if spec["parser"]:
dialect_options["%s_with_parser" % (self.name)] = spec[
"parser"
]
index_d = {}
index_d["name"] = spec["name"]
index_d["column_names"] = [s[0] for s in spec["columns"]]
mysql_length = {
s[0]: s[1] for s in spec["columns"] if s[1] is not None
}
if mysql_length:
dialect_options["%s_length" % self.name] = mysql_length
index_d["unique"] = unique
if flavor:
index_d["type"] = flavor
if dialect_options:
index_d["dialect_options"] = dialect_options
indexes.append(index_d)
return indexes
@reflection.cache
def get_unique_constraints(
self, connection, table_name, schema=None, **kw
):
parsed_state = self._parsed_state_or_create(
connection, table_name, schema, **kw
)
return [
{
"name": key["name"],
"column_names": [col[0] for col in key["columns"]],
"duplicates_index": key["name"],
}
for key in parsed_state.keys
if key["type"] == "UNIQUE"
]
@reflection.cache
def get_view_definition(self, connection, view_name, schema=None, **kw):
charset = self._connection_charset
full_name = ".".join(
self.identifier_preparer._quote_free_identifiers(schema, view_name)
)
sql = self._show_create_table(
connection, None, charset, full_name=full_name
)
return sql
def _parsed_state_or_create(
self, connection, table_name, schema=None, **kw
):
return self._setup_parser(
connection,
table_name,
schema,
info_cache=kw.get("info_cache", None),
)
@util.memoized_property
def _tabledef_parser(self):
"""return the MySQLTableDefinitionParser, generate if needed.
The deferred creation ensures that the dialect has
retrieved server version information first.
"""
preparer = self.identifier_preparer
return _reflection.MySQLTableDefinitionParser(self, preparer)
@reflection.cache
def _setup_parser(self, connection, table_name, schema=None, **kw):
charset = self._connection_charset
parser = self._tabledef_parser
full_name = ".".join(
self.identifier_preparer._quote_free_identifiers(
schema, table_name
)
)
sql = self._show_create_table(
connection, None, charset, full_name=full_name
)
if parser._check_view(sql):
# Adapt views to something table-like.
columns = self._describe_table(
connection, None, charset, full_name=full_name
)
sql = parser._describe_to_create(table_name, columns)
return parser.parse(sql, charset)
def _fetch_setting(self, connection, setting_name):
charset = self._connection_charset
if self.server_version_info and self.server_version_info < (5, 6):
sql = "SHOW VARIABLES LIKE '%s'" % setting_name
fetch_col = 1
else:
sql = "SELECT @@%s" % setting_name
fetch_col = 0
show_var = connection.exec_driver_sql(sql)
row = self._compat_first(show_var, charset=charset)
if not row:
return None
else:
return row[fetch_col]
def _detect_charset(self, connection):
raise NotImplementedError()
def _detect_casing(self, connection):
"""Sniff out identifier case sensitivity.
Cached per-connection. This value can not change without a server
restart.
"""
# https://dev.mysql.com/doc/refman/en/identifier-case-sensitivity.html
setting = self._fetch_setting(connection, "lower_case_table_names")
if setting is None:
cs = 0
else:
# 4.0.15 returns OFF or ON according to [ticket:489]
# 3.23 doesn't, 4.0.27 doesn't..
if setting == "OFF":
cs = 0
elif setting == "ON":
cs = 1
else:
cs = int(setting)
self._casing = cs
return cs
def _detect_collations(self, connection):
"""Pull the active COLLATIONS list from the server.
Cached per-connection.
"""
collations = {}
charset = self._connection_charset
rs = connection.exec_driver_sql("SHOW COLLATION")
for row in self._compat_fetchall(rs, charset):
collations[row[0]] = row[1]
return collations
def _detect_sql_mode(self, connection):
setting = self._fetch_setting(connection, "sql_mode")
if setting is None:
util.warn(
"Could not retrieve SQL_MODE; please ensure the "
"MySQL user has permissions to SHOW VARIABLES"
)
self._sql_mode = ""
else:
self._sql_mode = setting or ""
def _detect_ansiquotes(self, connection):
"""Detect and adjust for the ANSI_QUOTES sql mode."""
mode = self._sql_mode
if not mode:
mode = ""
elif mode.isdigit():
mode_no = int(mode)
mode = (mode_no | 4 == mode_no) and "ANSI_QUOTES" or ""
self._server_ansiquotes = "ANSI_QUOTES" in mode
# as of MySQL 5.0.1
self._backslash_escapes = "NO_BACKSLASH_ESCAPES" not in mode
def _show_create_table(
self, connection, table, charset=None, full_name=None
):
"""Run SHOW CREATE TABLE for a ``Table``."""
if full_name is None:
full_name = self.identifier_preparer.format_table(table)
st = "SHOW CREATE TABLE %s" % full_name
rp = None
try:
rp = connection.execution_options(
skip_user_error_events=True
).exec_driver_sql(st)
except exc.DBAPIError as e:
if self._extract_error_code(e.orig) == 1146:
util.raise_(exc.NoSuchTableError(full_name), replace_context=e)
else:
raise
row = self._compat_first(rp, charset=charset)
if not row:
raise exc.NoSuchTableError(full_name)
return row[1].strip()
def _describe_table(self, connection, table, charset=None, full_name=None):
"""Run DESCRIBE for a ``Table`` and return processed rows."""
if full_name is None:
full_name = self.identifier_preparer.format_table(table)
st = "DESCRIBE %s" % full_name
rp, rows = None, None
try:
try:
rp = connection.execution_options(
skip_user_error_events=True
).exec_driver_sql(st)
except exc.DBAPIError as e:
code = self._extract_error_code(e.orig)
if code == 1146:
util.raise_(
exc.NoSuchTableError(full_name), replace_context=e
)
elif code == 1356:
util.raise_(
exc.UnreflectableTableError(
"Table or view named %s could not be "
"reflected: %s" % (full_name, e)
),
replace_context=e,
)
else:
raise
rows = self._compat_fetchall(rp, charset=charset)
finally:
if rp:
rp.close()
return rows
class _DecodingRow(object):
"""Return unicode-decoded values based on type inspection.
Smooth over data type issues (esp. with alpha driver versions) and
normalize strings as Unicode regardless of user-configured driver
encoding settings.
"""
# Some MySQL-python versions can return some columns as
# sets.Set(['value']) (seriously) but thankfully that doesn't
# seem to come up in DDL queries.
_encoding_compat = {
"koi8r": "koi8_r",
"koi8u": "koi8_u",
"utf16": "utf-16-be", # MySQL's uft16 is always bigendian
"utf8mb4": "utf8", # real utf8
"utf8mb3": "utf8", # real utf8; saw this happen on CI but I cannot
# reproduce, possibly mariadb10.6 related
"eucjpms": "ujis",
}
def __init__(self, rowproxy, charset):
self.rowproxy = rowproxy
self.charset = self._encoding_compat.get(charset, charset)
def __getitem__(self, index):
item = self.rowproxy[index]
if isinstance(item, _array):
item = item.tostring()
if self.charset and isinstance(item, util.binary_type):
return item.decode(self.charset)
else:
return item
def __getattr__(self, attr):
item = getattr(self.rowproxy, attr)
if isinstance(item, _array):
item = item.tostring()
if self.charset and isinstance(item, util.binary_type):
return item.decode(self.charset)
else:
return item
| [
"[email protected]"
] | |
829d8ceb31ec21a8324a4ee14faa7bf5ad47e755 | 20f951bd927e4e5cde8ef7781813fcf0d51cc3ea | /fossir/modules/events/papers/controllers/base.py | ae72c3fe163c825494ededfcc8484349b10fe67e | [] | no_license | HodardCodeclub/SoftwareDevelopment | 60a0fbab045cb1802925d4dd5012d5b030c272e0 | 6300f2fae830c0c2c73fe0afd9c684383bce63e5 | refs/heads/master | 2021-01-20T00:30:02.800383 | 2018-04-27T09:28:25 | 2018-04-27T09:28:25 | 101,277,325 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 2,637 | py |
from __future__ import unicode_literals
from flask import request, session
from werkzeug.exceptions import Forbidden, NotFound
from fossir.modules.events.contributions.models.contributions import Contribution
from fossir.modules.events.controllers.base import RHDisplayEventBase
from fossir.modules.events.management.controllers.base import ManageEventMixin
from fossir.modules.events.util import check_event_locked
class RHPapersBase(RHDisplayEventBase):
"""Base class for all paper-related RHs"""
EVENT_FEATURE = 'papers'
def _check_access(self):
RHDisplayEventBase._check_access(self)
# Only let managers access the management versions.
if self.management and not self.event.cfp.is_manager(session.user):
raise Forbidden
@property
def management(self):
"""Whether the RH is currently used in the management area"""
return request.view_args.get('management', False)
class RHManagePapersBase(ManageEventMixin, RHPapersBase):
"""
Base class for all paper-related RHs that require full event
management permissions
"""
ROLE = 'paper_manager'
DENY_FRAMES = True
@property
def management(self):
"""Whether the RH is currently used in the management area"""
return request.view_args.get('management', True)
class RHJudgingAreaBase(RHPapersBase):
"""Base class for all paper-related RHs only available to judges/managers"""
def _check_access(self):
RHPapersBase._check_access(self)
if not session.user or not self.event.cfp.can_access_judging_area(session.user):
raise Forbidden
check_event_locked(self, self.event)
class RHPaperBase(RHPapersBase):
PAPER_REQUIRED = True
normalize_url_spec = {
'locators': {
lambda self: self.contribution
}
}
def _process_args(self):
RHPapersBase._process_args(self)
self.contribution = Contribution.get_one(request.view_args['contrib_id'], is_deleted=False)
self.paper = self.contribution.paper
if self.paper is None and self.PAPER_REQUIRED:
raise NotFound
def _check_access(self):
RHPapersBase._check_access(self)
if not self._check_paper_protection():
raise Forbidden
check_event_locked(self, self.event)
def _check_paper_protection(self):
"""Perform a permission check on the current paper.
Override this in case you want to check for more specific
privileges than the generic "can access".
"""
return self.contribution.can_access(session.user)
| [
"[email protected]"
] | |
d89c6d607ae28029364a25d266750f8ce316d329 | e718d3ccc181a72e7bfe0faad42285f2829a96e5 | /GDP_projectOriginal/service/urls.py | a21fadcfe3bedc40bf12129e1924b069fbbf87aa | [] | no_license | Jerrykim91/GDP_project | 8a17e75276ee92c93ad621163bffa57a528c258f | cd8e626cc3f01c1051f13115ad3d6217dd99ddc6 | refs/heads/master | 2022-05-02T01:08:33.955292 | 2021-03-25T14:43:21 | 2021-03-25T14:43:21 | 234,493,053 | 0 | 0 | null | 2022-04-22T23:27:15 | 2020-01-17T07:16:07 | Jupyter Notebook | UTF-8 | Python | false | false | 651 | py | # service\urls.py
# import
from django.urls import path
from . import views
urlpatterns = [
# GDP_graph
path('search_main', views.search_main, name="search_main"),
path('search_detail', views.search_detail, name="search_detail"),
path('search_show', views.search_show, name="search_show"),
path('sort_by_year', views.sort_by_year, name="sort_by_year"),
path('search_country', views.search_country, name="search_country"),
path('search_country_graph', views.search_country_graph, name="search_country_graph"),
path('search_country_graph_pop', views.search_country_graph_pop, name="search_country_graph_pop")
]
| [
"[email protected]"
] | |
84f719db77d5ee7f722e08c95dd6bff85761425f | 25e989e986522cf91365a6cc51e3c68b3d29351b | /databases/migrations/2018_06_26_165322_add_is_active_to_users_table.py | e16a431117d57b3dbff7c92a8a01d0b377b0c016 | [
"MIT"
] | permissive | josephmancuso/gbaleague-masonite2 | ff7a3865927705649deea07f68d89829b2132d31 | b3dd5ec3f20c07eaabcc3129b0c50379a946a82b | refs/heads/master | 2022-05-06T10:47:21.809432 | 2019-03-31T22:01:04 | 2019-03-31T22:01:04 | 136,680,885 | 0 | 1 | MIT | 2022-03-21T22:16:43 | 2018-06-09T01:33:01 | Python | UTF-8 | Python | false | false | 398 | py | from orator.migrations import Migration
class AddIsActiveToUsersTable(Migration):
def up(self):
"""
Run the migrations.
"""
with self.schema.table('users') as table:
table.integer('is_active').nullable()
def down(self):
"""
Revert the migrations.
"""
with self.schema.table('users') as table:
pass
| [
"[email protected]"
] | |
240b036586fb7f56681f4b847e5c7f238329931a | e3365bc8fa7da2753c248c2b8a5c5e16aef84d9f | /indices/plantagenet.py | 2477aa615648c2fa4fd3b2f52656b0e68052b2f4 | [] | no_license | psdh/WhatsintheVector | e8aabacc054a88b4cb25303548980af9a10c12a8 | a24168d068d9c69dc7a0fd13f606c080ae82e2a6 | refs/heads/master | 2021-01-25T10:34:22.651619 | 2015-09-23T11:54:06 | 2015-09-23T11:54:06 | 42,749,205 | 2 | 3 | null | 2015-09-23T11:54:07 | 2015-09-18T22:06:38 | Python | UTF-8 | Python | false | false | 177 | py | ii = [('TennAP.py', 1), ('ClarGE2.py', 1), ('ClarGE.py', 1), ('LandWPA2.py', 1), ('WadeJEB.py', 2), ('SoutRD2.py', 1), ('MereHHB2.py', 2), ('SadlMLP2.py', 1), ('BeckWRE.py', 1)] | [
"[email protected]"
] | |
6bfa7de91c5b3465d54ffa63e86ba56adc35cf78 | ba0731b2dbc4c1529eaaa79811ec15754c19b4cd | /references/domain.py | 9d612aaa40fa48d9746f598c63c465aedf4bba37 | [
"MIT"
] | permissive | arXiv/arxiv-references | 35f87084cf91947c572faf1a86f119b308fada66 | a755aeaa864ff807ff16ae2c3960f9fee54d8dd8 | refs/heads/master | 2022-12-21T02:34:57.166298 | 2018-05-04T20:30:48 | 2018-05-04T20:30:48 | 94,906,433 | 8 | 6 | MIT | 2022-12-08T02:06:20 | 2017-06-20T15:26:25 | Python | UTF-8 | Python | false | false | 3,178 | py | """Core data structures in the references application."""
from typing import List, Optional
from datetime import datetime
from base64 import b64encode
from dataclasses import dataclass, field, asdict
from unidecode import unidecode
@dataclass
class Author:
"""A parsed author name in a bibliographic reference."""
surname: str = field(default_factory=str)
givennames: str = field(default_factory=str)
prefix: str = field(default_factory=str)
suffix: str = field(default_factory=str)
fullname: str = field(default_factory=str)
@dataclass
class Identifier:
"""A persistent identifier for a cited reference."""
identifer_type: str
"""E.g. ISBN, ISSN, URI."""
identifier: str
@dataclass
class Reference:
"""An instance of a parsed bibliographic reference."""
title: Optional[str] = field(default=None)
"""The title of the reference."""
raw: str = field(default_factory=str)
"""The un-parsed reference string."""
arxiv_id: Optional[str] = field(default=None)
"""arXiv paper ID."""
authors: List[Author] = field(default_factory=list)
reftype: str = field(default='article')
"""The type of work to which the reference refers."""
doi: Optional[str] = field(default=None)
volume: Optional[str] = field(default=None)
issue: Optional[str] = field(default=None)
pages: Optional[str] = field(default=None)
source: Optional[str] = field(default=None)
"""Journal, conference, etc."""
year: Optional[str] = field(default=None)
identifiers: List[Identifier] = field(default_factory=list)
identifier: str = field(default_factory=str)
"""Unique identifier for this extracted reference."""
score: float = field(default=0.)
def __post_init__(self) -> None:
"""Set the identifier based on reference content."""
hash_string = bytes(unidecode(self.raw), encoding='ascii')
self.identifier = str(b64encode(hash_string), encoding='utf-8')[:100]
def to_dict(self) -> dict:
"""Return a dict representation of this object."""
return {k: v for k, v in asdict(self).items() if v is not None}
@dataclass
class ReferenceSet:
"""A collection of :class:`.Reference`."""
document_id: str
"""arXiv paper ID (with version affix)."""
references: List[Reference]
version: str
"""Version of this application."""
score: float
"""In the range 0-1; relative quality of the set as a whole."""
created: datetime
updated: datetime
extractor: str = 'combined'
"""
Name of the extractor used.
Default is combined (for reconciled reference set). May also be 'author',
for the author-curated set.
"""
extractors: List[str] = field(default_factory=list)
"""Extractors used to generate this reference set."""
raw: bool = field(default=False)
"""If True, refs are from a single extractor before reconciliation."""
def to_dict(self) -> dict:
"""Generate a dict representation of this object."""
data: dict = asdict(self)
data['created'] = self.created.isoformat()
data['updated'] = self.updated.isoformat()
return data
| [
"[email protected]"
] | |
7b66b925a73790e4aaf1a9068d9c96e34ae8985d | 2d048e630f8d9c546860820ef27700c1020b44cd | /th.py | 5ae01e2c7426d7eb7c21903856965982279d5aca | [] | no_license | 0h-n0/tch-mnist-simple-benchmak | 60ce3b6500f161f9a768e965d82eadf50a1e051f | c2f07661b64c83de82ad760b4a7b20b601a7129b | refs/heads/master | 2020-12-31T22:36:26.946751 | 2020-02-08T02:40:40 | 2020-02-08T02:40:40 | 239,057,622 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,243 | py | from __future__ import print_function
import time
import argparse
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torchvision import datasets, transforms
from torch.optim.lr_scheduler import StepLR
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(1, 32, 3, 1)
self.conv2 = nn.Conv2d(32, 64, 3, 1)
self.dropout1 = nn.Dropout2d(0.25)
self.dropout2 = nn.Dropout2d(0.5)
self.fc1 = nn.Linear(9216, 128)
self.fc2 = nn.Linear(128, 10)
def forward(self, x):
x = self.conv1(x)
x = F.relu(x)
x = self.conv2(x)
x = F.max_pool2d(x, 2)
x = self.dropout1(x)
x = torch.flatten(x, 1)
x = self.fc1(x)
x = F.relu(x)
x = self.dropout2(x)
x = self.fc2(x)
output = F.log_softmax(x, dim=1)
return output
def train(args, model, device, train_loader, optimizer, epoch):
model.train()
for batch_idx, (data, target) in enumerate(train_loader):
data, target = data.to(device), target.to(device)
optimizer.zero_grad()
output = model(data)
loss = F.nll_loss(output, target)
loss.backward()
optimizer.step()
def test(args, model, device, test_loader):
model.eval()
test_loss = 0
correct = 0
with torch.no_grad():
for data, target in test_loader:
data, target = data.to(device), target.to(device)
output = model(data)
test_loss += F.nll_loss(output, target, reduction='sum').item() # sum up batch loss
pred = output.argmax(dim=1, keepdim=True) # get the index of the max log-probability
correct += pred.eq(target.view_as(pred)).sum().item()
test_loss /= len(test_loader)
print('Test set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)'.format(
test_loss, correct, len(test_loader),
100. * correct / len(test_loader)))
def main():
# Training settings
parser = argparse.ArgumentParser(description='PyTorch MNIST Example')
parser.add_argument('--batch-size', type=int, default=256, metavar='N',
help='input batch size for training (default: 64)')
parser.add_argument('--test-batch-size', type=int, default=1000, metavar='N',
help='input batch size for testing (default: 1000)')
parser.add_argument('--epochs', type=int, default=50, metavar='N',
help='number of epochs to train (default: 14)')
parser.add_argument('--lr', type=float, default=1.0, metavar='LR',
help='learning rate (default: 1.0)')
parser.add_argument('--gamma', type=float, default=0.7, metavar='M',
help='Learning rate step gamma (default: 0.7)')
parser.add_argument('--no-cuda', action='store_true', default=False,
help='disables CUDA training')
parser.add_argument('--seed', type=int, default=1, metavar='S',
help='random seed (default: 1)')
parser.add_argument('--log-interval', type=int, default=10, metavar='N',
help='how many batches to wait before logging training status')
parser.add_argument('--save-model', action='store_true', default=False,
help='For Saving the current Model')
args = parser.parse_args()
use_cuda = not args.no_cuda and torch.cuda.is_available()
torch.manual_seed(args.seed)
device = torch.device("cuda" if use_cuda else "cpu")
kwargs = {'num_workers': 1, 'pin_memory': True} if use_cuda else {}
train_loader = torch.utils.data.DataLoader(
datasets.MNIST('../data', train=True, download=True,
transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
])),
batch_size=args.batch_size, shuffle=True, **kwargs)
test_loader = torch.utils.data.DataLoader(
datasets.MNIST('../data', train=False, transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
])),
batch_size=args.test_batch_size, shuffle=True, **kwargs)
model = Net().to(device)
optimizer = optim.Adadelta(model.parameters(), lr=args.lr)
train_loader = [(data.to(device), target.to(device)) for (data, target) in train_loader]
test_loader = [(data.to(device), target.to(device)) for (data, target) in test_loader]
scheduler = StepLR(optimizer, step_size=1, gamma=args.gamma)
#torch.backends.cudnn.benchmark = True
times = []
for epoch in range(1, args.epochs + 1):
s = time.time()
train(args, model, device, train_loader, optimizer, epoch)
test(args, model, device, test_loader)
scheduler.step()
times.append((time.time() - s))
print(f"{(time.time() - s)}s")
print("ave=>", torch.FloatTensor(times).mean())
if args.save_model:
torch.save(model.state_dict(), "mnist_cnn.pt")
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
ed34089d75838c5200f1175d0b9aece8648523e1 | 36b75aac4236e928e22552e8812abd45d32aecf1 | /modules/dbnd-airflow/src/dbnd_airflow_contrib/utils/system_utils.py | 88d9452cd26e2ec091d604cdb48fdd92751efb1d | [
"Apache-2.0"
] | permissive | reloadbrain/dbnd | 7793aa1864f678005de626068b0ac9361d637d65 | ec0076f9a142b20e2f7afd886ed1a18683c553ec | refs/heads/master | 2023-09-01T08:04:09.486666 | 2021-10-14T16:43:00 | 2021-10-14T16:43:00 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,451 | py | import logging
import re
import subprocess
logger = logging.getLogger(__name__)
def print_stack_trace(stack_frame):
try:
import traceback
traceback.print_stack(stack_frame)
except Exception as e:
logger.info("Could not print stack trace! Exception: %s", e)
def print_cpu_memory_usage():
try:
import psutil
cpu_usage_percent = psutil.cpu_percent(interval=1)
virtual_memory = psutil.virtual_memory()
last_minute_load, last_5_minute_load, last_15_minute_load = [
x / psutil.cpu_count() * 100 for x in psutil.getloadavg()
]
logger.info(
"""
Cpu usage %%: %s"
"Virtual memory: %s"
"Last minute cpu load %%: %s"
"Last 5 minute cpu load %%: %s"
"Last 15 minute cpu load %%: %s"
"""
% (
cpu_usage_percent,
virtual_memory,
last_minute_load,
last_5_minute_load,
last_15_minute_load,
)
)
except Exception as e:
logger.info("Could not read cpu and memory usage! Exception: %s", e)
def print_dmesg():
try:
human_dmesg()
except Exception as e:
logger.info("Could not get dmesg data! Exception: %s", e)
_datetime_format = "%Y-%m-%d %H:%M:%S"
_dmesg_line_regex = re.compile("^\[(?P<time>\d+\.\d+)\](?P<line>.*)$")
def human_dmesg():
from datetime import datetime, timedelta
now = datetime.now()
uptime_diff = None
with open("/proc/uptime") as f:
uptime_diff = f.read().strip().split()[0]
uptime = now - timedelta(
seconds=int(uptime_diff.split(".")[0]),
microseconds=int(uptime_diff.split(".")[1]),
)
dmesg_data = subprocess.check_output(["dmesg"]).decode()
for line in dmesg_data.split("\n"):
if not line:
continue
match = _dmesg_line_regex.match(line)
if match:
seconds = int(match.groupdict().get("time", "").split(".")[0])
nanoseconds = int(match.groupdict().get("time", "").split(".")[1])
microseconds = int(round(nanoseconds * 0.001))
line = match.groupdict().get("line", "")
t = uptime + timedelta(seconds=seconds, microseconds=microseconds)
logger.info("[%s]%s" % (t.strftime(_datetime_format), line))
| [
"[email protected]"
] | |
e5d148377f281451a04b29aa1ba61f66b2a2f021 | 150d9e4cee92be00251625b7f9ff231cc8306e9f | /LongestCommonSubsequenceSP.py | bcfcc3820f481d704dc5053bddf126c12d6f8f44 | [] | no_license | JerinPaulS/Python-Programs | 0d3724ce277794be597104d9e8f8becb67282cb0 | d0778178d89d39a93ddb9b95ca18706554eb7655 | refs/heads/master | 2022-05-12T02:18:12.599648 | 2022-04-20T18:02:15 | 2022-04-20T18:02:15 | 216,547,245 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,680 | py | '''
1143. Longest Common Subsequence
Given two strings text1 and text2, return the length of their longest common subsequence. If there is no common subsequence, return 0.
A subsequence of a string is a new string generated from the original string with some characters (can be none) deleted without changing the relative order of the remaining characters.
For example, "ace" is a subsequence of "abcde".
A common subsequence of two strings is a subsequence that is common to both strings.
Example 1:
Input: text1 = "abcde", text2 = "ace"
Output: 3
Explanation: The longest common subsequence is "ace" and its length is 3.
Example 2:
Input: text1 = "abc", text2 = "abc"
Output: 3
Explanation: The longest common subsequence is "abc" and its length is 3.
Example 3:
Input: text1 = "abc", text2 = "def"
Output: 0
Explanation: There is no such common subsequence, so the result is 0.
Constraints:
1 <= text1.length, text2.length <= 1000
text1 and text2 consist of only lowercase English characters.
'''
class Solution(object):
def longestCommonSubsequence(self, text1, text2):
"""
:type text1: str
:type text2: str
:rtype: int
"""
len1 = len(text1)
len2 = len(text2)
dp = []
for row in range(len1 + 1):
temp = []
for col in range(len2 + 1):
temp.append(0)
dp.append(temp)
for row in range(len1 - 1, -1, -1):
for col in range(len2 - 1, -1, -1):
if text1[row] == text2[col]:
dp[row][col] = dp[row + 1][col + 1] + 1
else:
dp[row][col] = max(dp[row + 1][col], dp[row][col + 1])
return dp[0][0] | [
"[email protected]"
] | |
d8604f174194d617ea021a36342e8bb56ed872f3 | 555cf712793ecf24ee32b3b8193b88f9380b6681 | /cybox-2.0.0b6/cybox/objects/x509_certificate_object.py | e7f038c63312e96356b96c60d55f2faf550603ce | [] | no_license | bopopescu/crits_dependencies | b198d45d75d0ce2f60867d26bd4fda232fa5b78a | 112e380b203069fe97e13a20bce1e926ffe18ba5 | refs/heads/master | 2022-11-21T15:01:42.761440 | 2014-06-02T22:18:38 | 2014-06-02T22:18:38 | 282,258,349 | 0 | 0 | null | 2020-07-24T15:43:44 | 2020-07-24T15:43:43 | null | UTF-8 | Python | false | false | 26,783 | py | # Copyright (c) 2013, The MITRE Corporation. All rights reserved.
# See LICENSE.txt for complete terms.
import cybox
import cybox.utils as utils
import cybox.bindings.x509_certificate_object as x509_certificate_binding
from cybox.common import ObjectProperties, String, Integer, DateTime, NonNegativeInteger
class X509Certificate(ObjectProperties):
_binding = x509_certificate_binding
_namespace = 'http://cybox.mitre.org/objects#X509CertificateObject-2'
_XSI_NS = "X509CertificateObj"
_XSI_TYPE = "X509CertificateObjectType"
def __init__(self):
super(X509Certificate, self).__init__()
self.certificate = None
self.certificate_signature = None
def to_obj(self):
x509_certificate_obj = x509_certificate_binding.X509CertificateObjectType()
super(X509Certificate, self).to_obj(x509_certificate_obj)
if self.certificate is not None: x509_certificate_obj.set_Certificate(self.certificate.to_obj())
if self.certificate_signature is not None: x509_certificate_obj.set_Certificate_Signature(self.certificate_signature.to_obj())
return x509_certificate_obj
def to_dict(self):
x509_certificate_dict = {}
super(X509Certificate, self).to_dict(x509_certificate_dict)
if self.certificate is not None: x509_certificate_dict['certificate'] = self.certificate.to_dict()
if self.certificate_signature is not None: x509_certificate_dict['certificate_signature'] = self.certificate_signature.to_dict()
return x509_certificate_dict
@staticmethod
def from_dict(x509_certificate_dict):
if not x509_certificate_dict:
return None
x509_certificate_ = X509Certificate()
x509_certificate_.certificate = X509Cert.from_dict(x509_certificate_dict.get('certificate'))
x509_certificate_.certificate_signature = X509CertificateSignature.from_dict(x509_certificate_dict.get('certificate_signature'))
return x509_certificate_
@staticmethod
def from_obj(x509_certificate_obj):
if not x509_certificate_obj:
return None
x509_certificate_ = X509Certificate()
x509_certificate_.certificate = X509Cert.from_obj(x509_certificate_obj.get_Certificate())
x509_certificate_.certificate_signature = X509CertificateSignature.from_obj(x509_certificate_obj.get_Certificate_Signature())
return x509_certificate_
class X509Cert(cybox.Entity):
_namespace = 'http://cybox.mitre.org/objects#X509CertificateObject-2'
def __init__(self):
super(X509Cert, self).__init__()
self.version = None
self.serial_number = None
self.signature_algorithm = None
self.issuer = None
self.validity = None
self.subject = None
self.subject_public_key = None
self.standard_extensions = None
self.non_standard_extensions = None
def to_obj(self):
x509_cert_obj = x509_certificate_binding.X509CertificateType()
if self.version is not None : x509_cert_obj.set_Version(self.version.to_obj())
if self.serial_number is not None : x509_cert_obj.set_Serial_Number(self.serial_number.to_obj())
if self.signature_algorithm is not None : x509_cert_obj.set_Signature_Algorithm(self.signature_algorithm.to_obj())
if self.issuer is not None : x509_cert_obj.set_Issuer(self.issuer.to_obj())
if self.validity is not None : x509_cert_obj.set_Validity(self.validity.to_obj())
if self.subject is not None : x509_cert_obj.set_Subject(self.subject.to_obj())
if self.subject_public_key is not None : x509_cert_obj.set_Subject_Public_Key(self.subject_public_key.to_obj())
if self.standard_extensions is not None : x509_cert_obj.set_Standard_Extensions(self.standard_extensions.to_obj())
if self.non_standard_extensions is not None : x509_cert_obj.set_Non_Standard_Extensions(self.non_standard_extensions.to_obj())
return x509_cert_obj
def to_dict(self):
x509_cert_dict = {}
if self.version is not None : x509_cert_dict['version'] = self.version.to_dict()
if self.serial_number is not None : x509_cert_dict['serial_number'] = self.serial_number.to_dict()
if self.signature_algorithm is not None : x509_cert_dict['signature_algorithm'] = self.signature_algorithm.to_dict()
if self.issuer is not None : x509_cert_dict['issuer'] = self.issuer.to_dict()
if self.validity is not None : x509_cert_dict['validity'] = self.validity.to_dict()
if self.subject is not None : x509_cert_dict['subject'] = self.subject.to_dict()
if self.subject_public_key is not None : x509_cert_dict['subject_public_key'] = self.subject_public_key.to_dict()
if self.standard_extensions is not None : x509_cert_dict['standard_extensions'] = self.standard_extensions.to_dict()
if self.non_standard_extensions is not None : x509_cert_dict['non_standard_extensions'] = self.non_standard_extensions.to_dict()
return x509_cert_dict
@staticmethod
def from_dict(x509_cert_dict):
if not x509_cert_dict:
return None
x509_cert_ = X509Cert()
x509_cert_.version = String.from_dict(x509_cert_dict.get('version'))
x509_cert_.serial_number = String.from_dict(x509_cert_dict.get('serial_number'))
x509_cert_.signature_algorithm = String.from_dict(x509_cert_dict.get('signature_algorithm'))
x509_cert_.issuer = String.from_dict(x509_cert_dict.get('issuer'))
x509_cert_.validity = Validity.from_dict(x509_cert_dict.get('validity'))
x509_cert_.subject = String.from_dict(x509_cert_dict.get('subject'))
x509_cert_.subject_public_key = SubjectPublicKey.from_dict(x509_cert_dict.get('subject_public_key'))
x509_cert_.standard_extensions = X509V3Extensions.from_dict(x509_cert_dict.get('standard_extensions'))
x509_cert_.non_standard_extensions = X509NonStandardExtensions.from_dict(x509_cert_dict.get('non_standard_extensions'))
return x509_cert_
@staticmethod
def from_obj(x509_cert_obj):
if not x509_cert_obj:
return None
x509_cert_ = X509Cert()
x509_cert_.version = String.from_obj(x509_cert_obj.get_Version())
x509_cert_.serial_number = String.from_obj(x509_cert_obj.get_Serial_Number())
x509_cert_.signature_algorithm = String.from_obj(x509_cert_obj.get_Signature_Algorithm())
x509_cert_.issuer = String.from_obj(x509_cert_obj.get_Issuer())
x509_cert_.validity = Validity.from_obj(x509_cert_obj.get_Validity())
x509_cert_.subject = String.from_obj(x509_cert_obj.get_Subject())
x509_cert_.subject_public_key = SubjectPublicKey.from_obj(x509_cert_obj.get_Subject_Public_Key())
x509_cert_.standard_extensions = X509V3Extensions.from_obj(x509_cert_obj.get_Standard_Extensions())
x509_cert_.non_standard_extensions = X509NonStandardExtensions.from_obj(x509_cert_obj.get_Non_Standard_Extensions())
return x509_cert_
class SubjectPublicKey(cybox.Entity):
_namespace = 'http://cybox.mitre.org/objects#X509CertificateObject-2'
def __init__(self):
super(SubjectPublicKey, self).__init__()
self.public_key_algorithm = None
self.rsa_public_key = None
def to_obj(self):
subject_public_key_obj = x509_certificate_binding.SubjectPublicKeyType()
if self.public_key_algorithm is not None : subject_public_key_obj.set_Public_Key_Algorithm(self.public_key_algorithm.to_obj())
if self.rsa_public_key is not None : subject_public_key_obj.set_RSA_Public_Key(self.rsa_public_key.to_obj())
return subject_public_key_obj
def to_dict(self):
subject_public_key_dict = {}
if self.public_key_algorithm is not None : subject_public_key_dict['public_key_algorithm'] = self.public_key_algorithm.to_dict()
if self.rsa_public_key is not None : subject_public_key_dict['rsa_public_key'] = self.rsa_public_key.to_dict()
return subject_public_key_dict
@staticmethod
def from_dict(subject_public_key_dict):
if not subject_public_key_dict:
return None
subject_public_key_ = SubjectPublicKey()
subject_public_key_.public_key_algorithm = String.from_dict(subject_public_key_dict.get('public_key_algorithm'))
subject_public_key_.rsa_public_key = RSAPublicKey.from_dict(subject_public_key_dict.get('rsa_public_key'))
return subject_public_key_
@staticmethod
def from_obj(subject_public_key_obj):
if not subject_public_key_obj:
return None
subject_public_key_ = SubjectPublicKey()
subject_public_key_.public_key_algorithm = String.from_obj(subject_public_key_obj.get_Public_Key_Algorithm())
subject_public_key_.rsa_public_key = RSAPublicKey.from_obj(subject_public_key_obj.get_RSA_Public_Key())
return subject_public_key_
class RSAPublicKey(cybox.Entity):
_namespace = 'http://cybox.mitre.org/objects#X509CertificateObject-2'
def __init__(self):
super(RSAPublicKey, self).__init__()
self.modulus = None
self.exponent = None
def to_obj(self):
rsa_public_key_obj = x509_certificate_binding.RSAPublicKeyType()
if self.modulus is not None : rsa_public_key_obj.set_Modulus(self.modulus.to_obj())
if self.exponent is not None : rsa_public_key_obj.set_Exponent(self.exponent.to_obj())
return rsa_public_key_obj
def to_dict(self):
rsa_public_key_dict = {}
if self.modulus is not None : subject_public_key_dict['modulus'] = self.modulus.to_dict()
if self.exponent is not None : subject_public_key_dict['exponent'] = self.exponent.to_dict()
return rsa_public_key_dict
@staticmethod
def from_dict(rsa_public_key_dict):
if not rsa_public_key_dict:
return None
rsa_public_key_ = RSAPublicKey()
rsa_public_key_.modulus = String.from_dict(rsa_public_key_dict.get('modulus'))
rsa_public_key_.exponent = Integer.from_dict(rsa_public_key_dict.get('exponent'))
return rsa_public_key_
@staticmethod
def from_obj(rsa_public_key_obj):
if not rsa_public_key_obj:
return None
rsa_public_key_ = RSAPublicKey()
rsa_public_key_.modulus = String.from_obj(rsa_public_key_obj.get_Modulus())
rsa_public_key_.exponent = Integer.from_obj(rsa_public_key_obj.get_Exponent())
return rsa_public_key_
class Validity(cybox.Entity):
_namespace = 'http://cybox.mitre.org/objects#X509CertificateObject-2'
def __init__(self):
super(Validity, self).__init__()
self.not_before = None
self.not_after = None
def to_obj(self):
validity_obj = x509_certificate_binding.ValidityType()
if self.not_before is not None : validity_obj.set_Not_Before(self.not_before.to_obj())
if self.not_after is not None : validity_obj.set_Not_After(self.not_after.to_obj())
return validity_obj
def to_dict(self):
validity_dict = {}
if self.not_before is not None : validity_dict['not_before'] = self.not_before.to_dict()
if self.not_after is not None : validity_dict['not_after'] = self.not_after.to_dict()
return validity_obj
@staticmethod
def from_dict(validity_dict):
if not validity_dict:
return None
validity_ = Validity()
validity_.not_after = DateTime.from_dict(validity_dict.get('not_after'))
validity_.not_before = DateTime.from_dict(validity_dict.get('not_before'))
return validity_
@staticmethod
def from_obj(validity_obj):
if not validity_obj:
return None
validity_ = Validity()
validity_.not_after = DateTime.from_obj(validity_obj.get_Not_After())
validity_.not_before = DateTime.from_obj(validity_obj.get_Not_Before())
return validity_
class X509V3Extensions(cybox.Entity):
_namespace = 'http://cybox.mitre.org/objects#X509CertificateObject-2'
def __init__(self):
super(X509V3Extensions, self).__init__()
self.basic_constraints = None
self.name_constraints = None
self.policy_constraints = None
self.key_usage = None
self.extended_key_usage = None
self.subject_key_identifier = None
self.authority_key_identifier = None
self.subject_alternative_name = None
self.issuer_alternative_name = None
self.subject_directory_attributes = None
self.crl_distribution_points = None
self.inhibit_any_policy = None
self.private_key_usage_period = None
self.certificate_policies = None
self.policy_mappings = None
def to_obj(self):
x509_v3_extensions_obj = x509_certificate_binding.X509V3ExtensionsType()
if self.basic_constraints is not None : x509_v3_extensions_obj.set_Basic_Constraints(self.basic_constraints.to_obj())
if self.name_constraints is not None : x509_v3_extensions_obj.set_Name_Constraints(self.name_constraints.to_obj())
if self.policy_constraints is not None : x509_v3_extensions_obj.set_Policy_Constraints(self.policy_constraints.to_obj())
if self.key_usage is not None : x509_v3_extensions_obj.set_Key_Usage(self.key_usage.to_obj())
if self.extended_key_usage is not None : x509_v3_extensions_obj.set_Extended_Key_Usage(self.extended_key_usage.to_obj())
if self.subject_key_identifier is not None : x509_v3_extensions_obj.set_Subject_Key_Identifier(self.subject_key_identifier.to_obj())
if self.authority_key_identifier is not None : x509_v3_extensions_obj.set_Authority_Key_Identifier(self.authority_key_identifier.to_obj())
if self.subject_alternative_name is not None : x509_v3_extensions_obj.set_Subject_Alternative_Name(self.subject_alternative_name.to_obj())
if self.issuer_alternative_name is not None : x509_v3_extensions_obj.set_Issuer_Alternative_Name(self.issuer_alternative_name.to_obj())
if self.subject_directory_attributes is not None : x509_v3_extensions_obj.set_Subject_Directory_Attributes(self.subject_directory_attributes.to_obj())
if self.crl_distribution_points is not None : x509_v3_extensions_obj.set_CRL_Distribution_Points(self.crl_distribution_points.to_obj())
if self.inhibit_any_policy is not None : x509_v3_extensions_obj.set_Inhibit_Any_Policy(self.inhibit_any_policy.to_obj())
if self.private_key_usage_period is not None : x509_v3_extensions_obj.set_Private_Key_Usage_Period(self.private_key_usage_period.to_obj())
if self.certificate_policies is not None : x509_v3_extensions_obj.set_Certificate_Policies(self.certificate_policies.to_obj())
if self.policy_mappings is not None : x509_v3_extensions_obj.set_Policy_Mappings(self.policy_mappings.to_obj())
return x509_v3_extensions_obj
def to_dict(self):
x509_v3_extensions_dict = {}
if self.basic_constraints is not None : x509_v3_extensions_dict['basic_constraints'] = self.basic_constraints.to_dict()
if self.name_constraints is not None : x509_v3_extensions_dict['name_constraints'] = self.name_constraints.to_dict()
if self.policy_constraints is not None : x509_v3_extensions_dict['policy_constraints'] = self.policy_constraints.to_dict()
if self.key_usage is not None : x509_v3_extensions_dict['key_usage'] = self.key_usage.to_dict()
if self.extended_key_usage is not None : x509_v3_extensions_dict['extended_key_usage'] = self.extended_key_usage.to_dict()
if self.subject_key_identifier is not None : x509_v3_extensions_dict['subject_key_identifier'] = self.subject_key_identifier.to_dict()
if self.authority_key_identifier is not None : x509_v3_extensions_dict['authority_key_identifier'] = self.authority_key_identifier.to_dict()
if self.subject_alternative_name is not None : x509_v3_extensions_dict['subject_alternative_name'] = self.subject_alternative_name.to_dict()
if self.issuer_alternative_name is not None : x509_v3_extensions_dict['issuer_alternative_name'] = self.issuer_alternative_name.to_dict()
if self.subject_directory_attributes is not None : x509_v3_extensions_dict['subject_directory_attributes'] = self.subject_directory_attributes.to_dict()
if self.crl_distribution_points is not None : x509_v3_extensions_dict['crl_distribution_points'] = self.crl_distribution_points.to_dict()
if self.inhibit_any_policy is not None : x509_v3_extensions_dict['inhibit_any_policy'] = self.inhibit_any_policy.to_dict()
if self.private_key_usage_period is not None : x509_v3_extensions_dict['private_key_usage_period'] = self.private_key_usage_period.to_dict()
if self.certificate_policies is not None : x509_v3_extensions_dict['certificate_policies'] = self.certificate_policies.to_dict()
if self.policy_mappings is not None :x509_v3_extensions_dict['policy_mappings'] = self.policy_mappings.to_dict()
return x509_v3_extensions_dict
@staticmethod
def from_dict(x509_v3_extensions_dict):
if not x509_v3_extensions_dict:
return None
x509_v3_extensions_ = X509V3Extensions()
x509_v3_extensions_.basic_constraints = String.from_dict(x509_v3_extensions_dict.get('basic_constraints'))
x509_v3_extensions_.name_constraints = String.from_dict(x509_v3_extensions_dict.get('name_constraints'))
x509_v3_extensions_.policy_constraints = String.from_dict(x509_v3_extensions_dict.get('policy_constraints'))
x509_v3_extensions_.key_usage = String.from_dict(x509_v3_extensions_dict.get('key_usage'))
x509_v3_extensions_.extended_key_usage = String.from_dict(x509_v3_extensions_dict.get('extended_key_usage'))
x509_v3_extensions_.subject_key_identifier = String.from_dict(x509_v3_extensions_dict.get('subject_key_identifier'))
x509_v3_extensions_.authority_key_identifier = String.from_dict(x509_v3_extensions_dict.get('authority_key_identifier'))
x509_v3_extensions_.subject_alternative_name = String.from_dict(x509_v3_extensions_dict.get('subject_alternative_name'))
x509_v3_extensions_.issuer_alternative_name = String.from_dict(x509_v3_extensions_dict.get('issuer_alternative_name'))
x509_v3_extensions_.subject_directory_attributes = String.from_dict(x509_v3_extensions_dict.get('subject_directory_attributes'))
x509_v3_extensions_.crl_distribution_points = String.from_dict(x509_v3_extensions_dict.get('crl_distribution_points'))
x509_v3_extensions_.inhibit_any_policy = NonNegativeInteger.from_dict(x509_v3_extensions_dict.get('inhibit_any_policy'))
x509_v3_extensions_.private_key_usage_period = Validity.from_dict(x509_v3_extensions_dict.get('private_key_usage_period'))
x509_v3_extensions_.certificate_policies = String.from_dict(x509_v3_extensions_dict.get('certificate_policies'))
x509_v3_extensions_.policy_mappings = String.from_dict(x509_v3_extensions_dict.get('policy_mappings'))
return x509_v3_extensions_
@staticmethod
def from_obj(x509_v3_extensions_obj):
if not x509_v3_extensions_obj:
return None
x509_v3_extensions_ = X509V3Extensions()
x509_v3_extensions_.basic_constraints = String.from_obj(x509_v3_extensions_obj.get_Basic_Constraints())
x509_v3_extensions_.name_constraints = String.from_obj(x509_v3_extensions_obj.get_Name_Constraints())
x509_v3_extensions_.policy_constraints = String.from_obj(x509_v3_extensions_obj.get_Policy_Constraints())
x509_v3_extensions_.key_usage = String.from_obj(x509_v3_extensions_obj.get_Key_Usage())
x509_v3_extensions_.extended_key_usage = String.from_obj(x509_v3_extensions_obj.get_Extended_Key_Usage())
x509_v3_extensions_.subject_key_identifier = String.from_obj(x509_v3_extensions_obj.get_Subject_Key_Identifier())
x509_v3_extensions_.authority_key_identifier = String.from_obj(x509_v3_extensions_obj.get_Authority_Key_Identifier())
x509_v3_extensions_.subject_alternative_name = String.from_obj(x509_v3_extensions_obj.get_Subject_Alternative_Name())
x509_v3_extensions_.issuer_alternative_name = String.from_obj(x509_v3_extensions_obj.get_Issuer_Alternative_Name())
x509_v3_extensions_.subject_directory_attributes = String.from_obj(x509_v3_extensions_obj.get_Subject_Directory_Attributes())
x509_v3_extensions_.crl_distribution_points = String.from_obj(x509_v3_extensions_obj.get_CRL_Distribution_Points())
x509_v3_extensions_.inhibit_any_policy = NonNegativeInteger.from_obj(x509_v3_extensions_obj.get_Inhibit_Any_Policy())
x509_v3_extensions_.private_key_usage_period = Validity.from_obj(x509_v3_extensions_obj.get_Private_Key_Usage_Period())
x509_v3_extensions_.certificate_policies = String.from_obj(x509_v3_extensions_obj.get_Certificate_Policies())
x509_v3_extensions_.policy_mappings = String.from_obj(x509_v3_extensions_obj.get_Policy_Mappings())
return x509_v3_extensions_
class X509NonStandardExtensions(cybox.Entity):
_namespace = 'http://cybox.mitre.org/objects#X509CertificateObject-2'
def __init__(self):
super(X509NonStandardExtensions, self).__init__()
self.netscape_comment = None
self.netscape_certificate_type = None
self.old_authority_key_identifier = None
self.old_primary_key_attributes = None
def to_obj(self):
x509_non_standard_extensions_obj = x509_certificate_binding.X509NonStandardExtensionsType()
if self.netscape_comment is not None : x509_non_standard_extensions_obj.set_Netscape_Comment(self.netscape_comment.to_obj())
if self.netscape_certificate_type is not None : x509_non_standard_extensions_obj.set_Netscape_Certificate_Type(self.netscape_certificate_type.to_obj())
if self.old_authority_key_identifier is not None : x509_non_standard_extensions_obj.set_Old_Authority_Key_Identifier(self.old_authority_key_identifier.to_obj())
if self.old_primary_key_attributes is not None : x509_non_standard_extensions_obj.set_Old_Primary_Key_Attributes(self.old_primary_key_attributes.to_obj())
return x509_non_standard_extensions_obj
def to_dict(self):
x509_non_standard_extensions_dict = {}
if self.netscape_comment is not None : x509_non_standard_extensions_dict['netscape_comment'] = self.netscape_comment.to_dict()
if self.netscape_certificate_type is not None : x509_non_standard_extensions_dict['netscape_certificate_type'] = self.netscape_certificate_type.to_dict()
if self.old_authority_key_identifier is not None : x509_non_standard_extensions_dict['old_authority_key_identifier'] = self.old_authority_key_identifier.to_dict()
if self.old_primary_key_attributes is not None : x509_non_standard_extensions_dict['old_primary_key_attributes'] = self.old_primary_key_attributes.to_dict()
return x509_non_standard_extensions_dict
@staticmethod
def from_dict(x509_non_standard_extensions_dict):
if not x509_non_standard_extensions_dict:
return None
x509_non_standard_extensions_ = X509NonStandardExtensions()
x509_non_standard_extensions_.netscape_comment = String.from_dict(x509_non_standard_extensions_dict.get('netscape_comment'))
x509_non_standard_extensions_.netscape_certificate_type = String.from_dict(x509_non_standard_extensions_dict.get('netscape_certificate_type'))
x509_non_standard_extensions_.old_authority_key_identifier = String.from_dict(x509_non_standard_extensions_dict.get('old_authority_key_identifier'))
x509_non_standard_extensions_.old_primary_key_attributes = String.from_dict(x509_non_standard_extensions_dict.get('old_primary_key_attributes'))
return x509_non_standard_extensions_
@staticmethod
def from_obj(x509_non_standard_extensions_obj):
if not x509_non_standard_extensions_obj:
return None
x509_non_standard_extensions_ = X509NonStandardExtensions()
x509_non_standard_extensions_.netscape_comment = String.from_obj(x509_non_standard_extensions_obj.get_Netscape_Comment())
x509_non_standard_extensions_.netscape_certificate_type = String.from_obj(x509_non_standard_extensions_obj.get_Netscape_Certificate_Type())
x509_non_standard_extensions_.old_authority_key_identifier = String.from_obj(x509_non_standard_extensions_obj.get_Old_Authority_Key_Identifier())
x509_non_standard_extensions_.old_primary_key_attributes = String.from_obj(x509_non_standard_extensions_obj.get_Old_Primary_Key_Attributes())
return x509_non_standard_extensions_
class X509CertificateSignature(cybox.Entity):
_namespace = 'http://cybox.mitre.org/objects#X509CertificateObject-2'
def __init__(self):
super(X509CertificateSignature, self).__init__()
self.signature_algorithm = None
self.signature = None
def to_obj(self):
x509_certificate_signature_obj = x509_certificate_binding.X509CertificateSignatureType()
if self.signature_algorithm is not None : x509_certificate_signature_obj.set_Signature_Algorithm(self.signature_algorithm.to_obj())
if self.signature is not None : x509_certificate_signature_obj.set_Signature(self.signature.to_obj())
return x509_certificate_signature_obj
def to_dict(self):
x509_certificate_signature_dict = {}
if self.signature_algorithm is not None : x509_certificate_signature_dict['signature_algorithm'] = self.signature_algorithm.to_dict()
if self.signature is not None : x509_certificate_signature_dict['signature'] = self.signature.to_dict()
return x509_certificate_signature_dict
@staticmethod
def from_dict(x509_certificate_signature_dict):
if not x509_certificate_signature_dict:
return None
x509_certificate_signature_ = X509CertificateSignature()
x509_certificate_signature_.signature_algorithm = String.from_dict(x509_certificate_signature_dict.get('signature_algorithm'))
x509_certificate_signature_.signature = String.from_dict(x509_certificate_signature_dict.get('signature'))
return x509_certificate_signature_
@staticmethod
def from_obj(x509_certificate_signature_obj):
if not x509_certificate_signature_obj:
return None
x509_certificate_signature_ = X509CertificateSignature()
x509_certificate_signature_.signature_algorithm = String.from_obj(x509_certificate_signature_obj.get_Signature_Algorithm())
x509_certificate_signature_.signature = String.from_obj(x509_certificate_signature_obj.get_Signature())
return x509_certificate_signature_
| [
"[email protected]"
] | |
d971cd5adcf944f961e3a880ed247c1f0f3464c2 | 0ef98f8a60e4d30001c918dae6fa7ac6283abca9 | /61.py | 8fb34a02053700a90124fd61efbb6ce24271c8d1 | [] | no_license | samrithasudhagar/pro | a0169fc89c8c6d6189ac984ec3fab26e23269264 | c90cb60fefb74174f12db5ee80812c2374e4e3ce | refs/heads/master | 2020-04-22T11:00:47.588732 | 2019-06-20T09:43:28 | 2019-06-20T09:43:28 | 170,324,794 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 228 | py | a=input()
c=input()
s=""
l=["a","b","c","d","e","f","g","h","i","j","k","l","m","n","o","p","q","r","s","t","u","v","w","x","y","z"]
for i in range(len(a)):
k=l.index(a[i])
p=l.index(c[i])
s=s+l[(k+p)%26+1]
print(s)
| [
"[email protected]"
] | |
869346c339d17559765444ba752e5b7d83edd7bd | 1d0da695f42cf0c20311c2c583f23e8f68e5acbc | /tensor2tensor/mesh_tensorflow/simd_mesh_impl.py | 3033e53d3cd9a466f9c396bd67dec0022c95fca4 | [
"Apache-2.0"
] | permissive | fbpatel/tensor2tensor | be5ed143bdad30ffa41274cf64268dcc45ad6b99 | 271da5289a10249f41fcebf3b7fbb50bbbd9041e | refs/heads/master | 2020-03-25T20:41:40.663350 | 2018-08-09T04:57:35 | 2018-08-09T04:58:07 | 144,141,419 | 1 | 0 | null | 2018-08-09T11:08:08 | 2018-08-09T11:08:08 | null | UTF-8 | Python | false | false | 11,524 | py | # coding=utf-8
# Copyright 2018 The Tensor2Tensor Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""SIMD Mesh implementation (for TPU/XLA)."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensor2tensor.mesh_tensorflow import mesh_tensorflow as mtf
from tensor2tensor.mesh_tensorflow import mtf_utils
from tensor2tensor.mesh_tensorflow import tpu_variables
import tensorflow as tf
from tensorflow.contrib.tpu.python.ops import tpu_ops
from tensorflow.python.framework import ops
class SimdMeshImpl(mtf.MeshImpl):
"""Mesh implementation for TPU using SIMD and MPI operations."""
def __init__(self, shape, layout, devices, device_assignment):
super(SimdMeshImpl, self).__init__(shape, layout)
self._devices = devices
self._device_assignment = device_assignment
tf.logging.info("SimdMeshImpl init: {0} {1}".format(shape, layout))
self._pnum_tensor = None
@property
def pnum_tensor(self):
if self._pnum_tensor is not None:
return self._pnum_tensor
with mtf_utils.outside_all_rewrites():
tf.logging.info("Create pnum_tensor")
self._pnum_tensor = tpu_ops.tpu_replicated_input(
list(range(self.size)), name="pnum_constants")
return self._pnum_tensor
class LaidOutTensor(object):
"""One Slice."""
def __init__(self, tensor_list):
assert isinstance(tensor_list, list)
self._tensor_list = tensor_list
def __repr__(self):
return "[" + ",".join([str(t) for t in self._tensor_list]) + "]"
@property
def tensor_list(self):
return self._tensor_list
@property
def one_slice(self):
return self._tensor_list[0]
@classmethod
def from_tensor_list(cls, tensor_list):
return cls(tensor_list)
@property
def all_slices(self):
return self._tensor_list
@property
def slice_shape(self):
return self.one_slice.shape.as_list()
def to_laid_out_tensor(self):
return self
class LaidOutVariable(object):
"""Maintains slice-variables and copy operations."""
def __init__(self, variable, mesh_impl):
"""Create a LaidOutVariable.
Args:
variable: a Variable (Operation)
mesh_impl: a MeshImpl
"""
self._variable = variable
self._mesh_impl = mesh_impl
shape = variable.outputs[0].shape
dtype = variable.outputs[0].dtype
slice_shape = mesh_impl.slice_shape(shape)
base_name = variable.name
slices = []
for pnum in xrange(mesh_impl.size):
slice_var_name = base_name + "_slice_%d" % pnum
tpu_device = mesh_impl.device_assignment.tpu_device(replica=pnum)
# The initializer is unimportant, since the slice variables will be
# overwritten. zeros_initializer() is here to avoid the default
# initialization which adds lots of useless operations to the TF graph.
with ops.device(tpu_device):
slices.append(
tf.get_variable(
slice_var_name,
slice_shape,
dtype=dtype,
collections=[],
initializer=tf.zeros_initializer()))
self._laid_out_tensor = mesh_impl.LaidOutTensor(
[tpu_variables.ReplicatedVariable(base_name, slices)])
with tf.device("cpu:0"), mtf_utils.outside_all_rewrites():
self._copy_master_to_slices = self.assign_to_slices(
mesh_impl.make_slices(variable.master, shape),
assign_to_tensor_list=slices)
self._copy_slices_to_master = tf.assign(
variable.master,
mesh_impl.combine_slices(slices, shape, device="cpu:0"))
def assign_to_slices(self, slice_values, assign_to_tensor_list=None):
"""Assign to the slice variables.
Args:
slice_values: a list of tf.Tensor
assign_to_tensor_list: an optional list of tf.Variable
Returns:
a tf.operation
"""
if assign_to_tensor_list is None:
assign_to_tensor_list = self._laid_out_tensor.all_slices
# Handle both N -> 1 and N -> N cases.
num_slices = min(
len(assign_to_tensor_list), len(slice_values))
devices = [""] * num_slices
return tf.group(
mtf.parallel(devices, tf.assign, assign_to_tensor_list[:num_slices],
slice_values[:num_slices]))
@property
def laid_out_tensor(self):
return self._laid_out_tensor
@property
def copy_master_to_slices(self):
return self._copy_master_to_slices
@property
def copy_slices_to_master(self):
return self._copy_slices_to_master
def laid_out_pnum(self):
"""Returns a LaidOutTensor containing the processor number.
Returns:
a LaidOutTensor where each slice is an integer scalar
"""
return self.LaidOutTensor([self.pnum_tensor])
def allreduce(self, x, mesh_axes, reduction_fn_string):
"""Grouped allreduce, (summed across the given dimensions).
Args:
x: a LaidOutTensor
mesh_axes: a list of integers
reduction_fn_string: "SUM"
Returns:
a LaidOutTensor
Raises:
ValueError: if the reduction is not yet implemented.
"""
if not mesh_axes:
return x
x = x.to_laid_out_tensor()
if reduction_fn_string == "SUM":
partitioning = [
mtf.pnum_to_group(self.shape, mesh_axes, pnum)
for pnum in xrange(self.size)]
return self.LaidOutTensor(
[tpu_ops.cross_replica_sum(x.one_slice, partitioning)])
else:
for axis in mesh_axes:
x = self.allconcat(x, axis, 0, stack=True)
x = self.LaidOutTensor(
[mtf.reduction_fn(reduction_fn_string)(x.one_slice, 0)])
return x
def allconcat(self, x, mesh_axis, concat_axis, stack=False):
"""Grouped allconcat (like MPI allgather followed by concat).
Args:
x: a LaidOutTensor
mesh_axis: an integer - the mesh axis along which to group
concat_axis: an integer (the Tensor axis along which to concatenate)
stack: a boolean - whether to stack instead of concat
Returns:
a LaidOutTensor
"""
x = x.to_laid_out_tensor()
coord = self.laid_out_pcoord(mesh_axis)
t = x.one_slice
old_shape = t.shape.as_list()
num_parts = self.shape[mesh_axis].size
t = tf.expand_dims(t, concat_axis)
t *= tf.reshape(
tf.one_hot(coord.one_slice, num_parts, dtype=t.dtype),
[num_parts if i == concat_axis else 1
for i in xrange(len(old_shape) + 1)])
if not stack:
new_shape = old_shape[:]
new_shape[concat_axis] *= num_parts
t = tf.reshape(t, new_shape)
return self.allreduce(self.LaidOutTensor([t]), [mesh_axis], "SUM")
def alltoall(self, x, mesh_axis, split_axis, concat_axis):
"""Grouped alltoall (like MPI alltoall with splitting and concatenation).
Args:
x: a LaidOutTensor
mesh_axis: an integer the mesh axis along which to group
split_axis: an integer (the Tensor axis along which to split)
concat_axis: an integer (the Tensor axis along which to concatenate)
Returns:
a LaidOutTensor
"""
x = x.to_laid_out_tensor()
x = self.allconcat(x, mesh_axis, concat_axis)
x = self.allsplit(x, mesh_axis, split_axis)
return x
def slice(self, tf_tensor, tensor_shape):
""""Slice out the correspoding part of tensor given the pnum variable."""
tensor_layout = self.tensor_layout(tensor_shape)
if tensor_layout.is_fully_replicated:
return self.LaidOutTensor([tf_tensor])
else:
slice_shape = self.slice_shape(tensor_shape)
slice_begins = [
self.slice_begin(tensor_shape, pnum) for pnum in xrange(self.size)
]
slice_begins_tensor = tf.stack(slice_begins)
# slice on source device
selected_slice_begin = tf.gather(slice_begins_tensor, self.pnum_tensor)
return self.LaidOutTensor(
[tf.slice(tf_tensor, selected_slice_begin, slice_shape)])
def slicewise(self, fn, *inputs):
"""Execute a function in parallel on all slices.
Args:
fn: a function from tf.Tensors to tf.Tensor or a tuple of tf.Tensors.
*inputs: a list of inputs. Each input is either a LaidOutTensor or
is convertible to a tf.Tensor.
Returns:
a LaidOutTensor, or a tuple of LaidOutTensors if fn returns a tuple.
"""
if fn == tf.add:
assert len(inputs) == 2
if isinstance(inputs[0], mtf.LazyAllreduceSum):
# sum of LazyAllreduceSum (keep delaying the allreduce)
return inputs[0] + inputs[1]
# convert all inputs to LaidOutTensor where possible
inputs = mtf.convert_args_to_laid_out_tensors(inputs)
ret = fn(*[x.one_slice if isinstance(x, self.LaidOutTensor)
else x for x in inputs])
if isinstance(ret, tuple):
return tuple([self.LaidOutTensor([t]) for t in ret])
else:
return self.LaidOutTensor([ret])
@property
def device_assignment(self):
return self._device_assignment
@property
def devices(self):
return self._devices
def random(self, shape, tf_fn, kwargs):
"""Call a random tf operation (e.g. random_uniform).
Args:
shape: a Shape
tf_fn: a function such as tf.random_uniform
kwargs: kwargs to pass to tf_fn, except for seed
Returns:
a LaidOutTensor
"""
# TODO(noam): can we make things better with stateless_random?
slice_shape = self.slice_shape(shape)
x = tf_fn(slice_shape, **kwargs)
# TPU does not have seeds enabled. Sync up the
# random choices by zeroing out all but the first core per group of
# identical slices, then allreducing by group.
layout = self.tensor_layout(shape)
# we need to sync across these axes.
mesh_axes = [i for i in xrange(self.ndims)
if i not in layout.tensor_axis_to_mesh_axis]
multiplier = 1.0
for axis in mesh_axes:
multiplier *= tf.cast(
tf.equal(self.laid_out_pcoord(axis).one_slice, 0), x.dtype)
x *= multiplier
x = self.LaidOutTensor([x])
x = self.allreduce(x, mesh_axes, "SUM")
return x
def export_to_tf_tensor(self, x, laid_out_x):
"""Turn a Tensor into a tf.Tensor.
Args:
x: a Tensor
laid_out_x: a LaidOutTensor
Returns:
a tf.Tensor
"""
tensor_layout = self.tensor_layout(x.shape)
if not tensor_layout.is_fully_replicated:
raise NotImplementedError(
"SimdMeshImpl only supports export_to_tf_tensor of fully-replicated "
"Tensors. Try reshaping to new dimension names. "
" x.shape = %s tensor_layout=%s"
% (x.shape, tensor_layout))
return laid_out_x.one_slice
def import_tf_tensor(self, x, tf_x):
"""Import a tf.Tensor, producing a LaidOutTensor.
Args:
x: a Tensor
tf_x: a tf.Tensor
Returns:
a LaidOutTensor
"""
return self.slice(tf_x, x.shape)
@property
def supports_control_dependencies(self):
return False
| [
"[email protected]"
] | |
db970fba12cef3fd3dc931f70a998a1bb9b80ed5 | 5b52feaf975c810693bbd9c67deb061824cdca32 | /Darlington/phase-2/FILE 1/O/day 84 solution/qtn3.py | 13efc55791274ad1db6d3c1951ab660afa1f5ddb | [
"MIT"
] | permissive | darlcruz/python-challenge-solutions | 1dd21796b86f8fdcfa9a1a15faa26ab3e8e0f7b1 | 3e03a420d01177b71750d4d1b84cb3cbbf8c6900 | refs/heads/master | 2022-12-20T23:50:08.641120 | 2020-09-28T21:34:14 | 2020-09-28T21:34:14 | 263,591,779 | 0 | 0 | MIT | 2020-05-13T10:00:21 | 2020-05-13T10:00:20 | null | UTF-8 | Python | false | false | 372 | py | # program to create a file where all letters of English alphabet are listed by
# specified number of letters on each line.
import string
def letters_file_line(n):
with open("words1.txt", "w") as f:
alphabet = string.ascii_uppercase
letters = [alphabet[i:i + n] + "\n" for i in range(0, len(alphabet), n)]
f.writelines(letters)
letters_file_line(3) | [
"[email protected]"
] | |
69729b36949e741a9c7edf3832821394f61312c9 | 44f216cc3bb4771c8186349013ff0ed1abc98ea6 | /torch/distributed/_shard/sharded_tensor/_ops/math_ops.py | 6d3ed59da38cc711b1b67bbf8fd16e507d64083c | [
"BSD-2-Clause",
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"BSL-1.0",
"Apache-2.0"
] | permissive | eiphy/pytorch | a8fc21a3c0552b392ed8c3a1d69f7ed8660c56ac | 104f0bf09ec7609d1c5626a7d7953ade4f8c9007 | refs/heads/master | 2022-05-23T02:10:13.158924 | 2022-05-07T21:26:00 | 2022-05-07T21:26:00 | 244,914,898 | 2 | 0 | NOASSERTION | 2020-03-04T14:00:53 | 2020-03-04T14:00:53 | null | UTF-8 | Python | false | false | 3,242 | py | import torch
from torch import Tensor
from torch.distributed._shard.sharded_tensor import (
ShardedTensor,
sharded_op_impl
)
from torch.distributed._shard.replicated_tensor import ReplicatedTensor
from torch.distributed._shard._utils import narrow_tensor
def binary_math_op_impl(op, types, args=(), kwargs=None, pg=None):
"""
Handles ``__torch_function__`` dispatch for the binary math ops
such as `torch.add`, `torch.mul`, `torch.div`, etc.
This method computes on ShardedTensor, or ShardedTensor op ReplicatedTensor
"""
if len(args) != 2:
raise ValueError("Only support binary math op on ShardedTensor for now!")
lhs = args[0]
rhs = args[1]
# Validate types
if isinstance(lhs, ReplicatedTensor):
assert isinstance(rhs, ShardedTensor)
st_size = rhs.size()
st_meta = rhs.local_shards()[0].metadata
if st_size != lhs.size():
# try to broadcast replicated tensor
lhs = lhs.expand(st_size)
replica_part = narrow_tensor(lhs, st_meta)
res = op(replica_part, rhs.local_tensor())
return ShardedTensor._init_from_local_tensor(
res,
rhs.sharding_spec(),
rhs.size(), # type: ignore[arg-type]
process_group=pg)
elif isinstance(rhs, ReplicatedTensor):
assert isinstance(lhs, ShardedTensor)
st_size = lhs.size()
st_meta = lhs.local_shards()[0].metadata
if st_size != rhs.size():
# try to broadcast replicated tensor
rhs = rhs.expand(st_size)
replica_part = narrow_tensor(rhs, st_meta)
res = op(lhs.local_tensor(), replica_part)
return ShardedTensor._init_from_local_tensor(
res,
lhs.sharding_spec(),
lhs.size(), # type: ignore[arg-type]
process_group=pg)
elif isinstance(lhs, (int, float)):
assert isinstance(rhs, ShardedTensor)
res = op(lhs, rhs.local_tensor())
return ShardedTensor._init_from_local_tensor(
res,
rhs.sharding_spec(),
rhs.size(), # type: ignore[arg-type]
process_group=pg)
elif isinstance(rhs, (int, float)):
assert isinstance(lhs, ShardedTensor)
res = op(lhs.local_tensor(), rhs)
return ShardedTensor._init_from_local_tensor(
res,
lhs.sharding_spec(),
lhs.size(), # type: ignore[arg-type]
process_group=pg)
else:
raise RuntimeError(
f"torch function '{op.__name__}', with args: {args} and "
f"kwargs: {kwargs} not supported yet for ShardedTensor!")
def register_math_op(op):
@sharded_op_impl(op)
def binary_math_op(types, args=(), kwargs=None, pg=None):
return binary_math_op_impl(op, types, args, kwargs, pg)
binary_ops = [
# add
torch.add,
Tensor.add,
Tensor.__add__,
Tensor.__radd__,
# sub
torch.sub,
Tensor.sub,
Tensor.__sub__,
Tensor.__rsub__,
# mul
torch.mul,
Tensor.mul,
Tensor.__mul__,
Tensor.__rmul__,
# div
torch.div,
Tensor.div,
Tensor.__div__,
Tensor.__rdiv__,
]
for op in binary_ops:
register_math_op(op)
| [
"[email protected]"
] | |
c00894ef0c8a747b7a9d05f73efe554aa75785e6 | 5111b0c881c8d86705f2b237e14024396e34091a | /task_check_list/models/task_check_list.py | 88812aad7bc1b21e05fae5f1a896ebb05a2373dd | [] | no_license | odoomates/odooapps | a22fa15346694563733008c42549ebc0da7fc9f6 | 68061b6fa79818d17727ef620e28fff44b48df72 | refs/heads/16.0 | 2023-08-11T15:25:28.508718 | 2023-08-10T17:58:45 | 2023-08-10T17:58:45 | 173,598,986 | 182 | 306 | null | 2023-08-10T17:58:46 | 2019-03-03T16:20:23 | Python | UTF-8 | Python | false | false | 1,019 | py | # -*- coding: utf-8 -*-
from odoo import models, fields, api
class ProjectTask(models.Model):
_inherit = 'project.task'
@api.depends('task_checklist')
def checklist_progress(self):
total_len = self.env['task.checklist'].search_count([])
for rec in self:
if total_len != 0:
check_list_len = len(rec.task_checklist)
rec.checklist_progress = (check_list_len * 100) / total_len
else:
rec.checklist_progress = 0
task_checklist = fields.Many2many('task.checklist', string='Check List')
checklist_progress = fields.Float(compute=checklist_progress, string='Progress', store=True,
default=0.0)
max_rate = fields.Integer(string='Maximum rate', default=100)
class TaskChecklist(models.Model):
_name = 'task.checklist'
_description = 'Checklist for the task'
name = fields.Char(string='Name', required=True)
description = fields.Char(string='Description')
| [
"[email protected]"
] | |
1c68bb6b8863c584a3f0728adcaa19a31159f831 | 754d26af3d5fa0900d1dbfc934f3b9f0970e2a47 | /unchained/community/announcement/views.py | da9977b7d41c7f38d2fd2e479fe5ec88fbdaba8e | [] | no_license | live-wire/community | 8a7bfdb4e2d6562d12be334ba0e655ffe041bb5f | 7b2efa7b78465134138ee08fc557f4fedf678394 | refs/heads/master | 2021-07-11T10:26:31.535653 | 2020-03-20T02:37:30 | 2020-03-20T02:37:30 | 140,683,308 | 3 | 2 | null | 2020-06-06T12:03:38 | 2018-07-12T08:17:50 | JavaScript | UTF-8 | Python | false | false | 1,121 | py | from django.shortcuts import render
from rest_framework import generics
from rest_framework import mixins
from django.contrib.auth.models import User
from rest_framework import permissions
from rest_framework.decorators import api_view
from rest_framework.response import Response
from rest_framework.reverse import reverse
from rest_framework import renderers
from rest_framework import viewsets
# Create your views here.
from rest_framework.decorators import action
from rest_framework.response import Response
from community.csrfsession import CsrfExemptSessionAuthentication
from .serializers import AnnouncementSerializer
from .models import Announcement
class AnnouncementViewSet(viewsets.ModelViewSet):
"""
This viewset automatically provides `list`, `create`, `retrieve`,
`update` and `destroy` actions.
Additionally we also provide an extra `highlight` action.
"""
queryset = Announcement.objects.all()
serializer_class = AnnouncementSerializer
permission_classes = (permissions.IsAuthenticatedOrReadOnly, )
authentication_classes = (CsrfExemptSessionAuthentication, )
| [
"[email protected]"
] | |
639352824e62fb004ba1510bda6aad1babb20041 | e6dab5aa1754ff13755a1f74a28a201681ab7e1c | /.parts/lib/django-1.4/tests/regressiontests/custom_columns_regress/__init__.py | b7533f1e4ee752db1b64d6e4502cd93c5563b205 | [] | no_license | ronkagan/Euler_1 | 67679203a9510147320f7c6513eefd391630703e | 022633cc298475c4f3fd0c6e2bde4f4728713995 | refs/heads/master | 2021-01-06T20:45:52.901025 | 2014-09-06T22:34:16 | 2014-09-06T22:34:16 | 23,744,842 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 122 | py | /home/action/.parts/packages/googleappengine/1.9.4/lib/django-1.4/tests/regressiontests/custom_columns_regress/__init__.py | [
"[email protected]"
] | |
50a4aa9cfcccc1e4f762802aeab4c1d0c615195b | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p04019/s609202447.py | 9cd2c625f2777b825900b773714a1f4e6fb7849b | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 326 | py | S=input()
n=0
s=0
w=0
e=0
for i in range(len(S)):
if S[i]=="N":
n+=1
if S[i]=="S":
s+=1
if S[i]=="W":
w+=1
if S[i]=="E":
e+=1
if n!=0 and s!=0 and w!=0 and e!=0 or n==0 and s==0 and w!=0 and e!=0 or n!=0 and s!=0 and w==0 and e==0:
print("Yes")
else:
print("No")
| [
"[email protected]"
] | |
96df2462240b880242a521c7a6728ce366df98c0 | 553b34a101c54090e68f540d96369ac7d5774d95 | /python/python_koans/python2/koans/about_list_assignments.py | aa05dc5b8a69e50728be9798b3455af462f5c023 | [
"MIT"
] | permissive | topliceanu/learn | fd124e1885b5c0bfea8587510b5eab79da629099 | 1c5b1433c3d6bfd834df35dee08607fcbdd9f4e3 | refs/heads/master | 2022-07-16T19:50:40.939933 | 2022-06-12T15:40:20 | 2022-06-12T15:40:20 | 21,684,180 | 26 | 12 | MIT | 2020-03-26T20:51:35 | 2014-07-10T07:22:17 | JavaScript | UTF-8 | Python | false | false | 947 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Based on AboutArrayAssignments in the Ruby Koans
#
from runner.koan import *
class AboutListAssignments(Koan):
def test_non_parallel_assignment(self):
names = ["John", "Smith"]
self.assertEqual(['John', 'Smith'], names)
def test_parallel_assignments(self):
first_name, last_name = ["John", "Smith"]
self.assertEqual('John', first_name)
self.assertEqual('Smith', last_name)
def test_parallel_assignments_with_sublists(self):
first_name, last_name = [["Willie", "Rae"], "Johnson"]
self.assertEqual(['Willie', 'Rae'], first_name)
self.assertEqual('Johnson', last_name)
def test_swapping_with_parallel_assignment(self):
first_name = "Roy"
last_name = "Rob"
first_name, last_name = last_name, first_name
self.assertEqual('Rob', first_name)
self.assertEqual('Roy', last_name)
| [
"[email protected]"
] | |
b883d4613806a95ab753103d22f2fcd096a20b3f | 2e682fd72e3feaa70e3f7bf2a3b83c50d783ec02 | /PyTorch/contrib/cv/others/Pix2Pix/util/__init__.py | 4de848e30b5a486d7214bec80276e35a3a4d0d04 | [
"BSD-3-Clause",
"BSD-2-Clause",
"Apache-2.0",
"MIT",
"LicenseRef-scancode-generic-cla",
"LicenseRef-scancode-unknown-license-reference",
"GPL-1.0-or-later"
] | permissive | Ascend/ModelZoo-PyTorch | 4c89414b9e2582cef9926d4670108a090c839d2d | 92acc188d3a0f634de58463b6676e70df83ef808 | refs/heads/master | 2023-07-19T12:40:00.512853 | 2023-07-17T02:48:18 | 2023-07-17T02:48:18 | 483,502,469 | 23 | 6 | Apache-2.0 | 2022-10-15T09:29:12 | 2022-04-20T04:11:18 | Python | UTF-8 | Python | false | false | 667 | py | # Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the BSD 3-Clause License (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://opensource.org/licenses/BSD-3-Clause
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This package includes a miscellaneous collection of useful helper functions."""
| [
"[email protected]"
] | |
9c439dd90703b73edc146cfbc628fab4e7984a37 | cfa08425d0a457e0c673543b6e16f0a02effe05f | /projects/admm_4bus/data/sixbus/plot.py | 86a8e56d3602f498a28cb6b6409a2bb9500fe2cf | [] | no_license | missinglpf/Distributed_optimization | 5b3dfea8b2a29225761537531322e421be83d7a8 | 84040eebd3f04acf4c09e5e4ff2e59e752bf3fae | refs/heads/master | 2020-08-01T03:42:36.455932 | 2018-06-25T15:59:44 | 2018-06-25T15:59:44 | 210,850,421 | 1 | 0 | null | 2019-09-25T13:20:11 | 2019-09-25T13:20:10 | null | UTF-8 | Python | false | false | 4,376 | py | import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
from datetime import datetime
# df0 = pd.read_csv('lamda300/data_A_0.csv')
# df1 = pd.read_csv('lamda300/data_A_0.csv')
# df0 = pd.read_csv('lamda200/data_A_0.csv')
# df3 = pd.read_csv('lamda100/data_A_0.csv')
df0 = pd.read_csv('lamda50/data_A_0.csv')
df1 = pd.read_csv('lamda50/data_A_1.csv')
df2 = pd.read_csv('lamda50/data_A_2.csv')
p_total = df0['Q'][2:502]+df1['Q'][2:502]+df2['Q'][2:502] - [2.1]*500
plt.plot(df0['ADMM_IT'][1:501], p_total, label="lamda=50", linewidth=0.5)
df0 = pd.read_csv('lamda30/data_A_0.csv')
df1 = pd.read_csv('lamda30/data_A_1.csv')
df2 = pd.read_csv('lamda30/data_A_2.csv')
p_total = df0['Q'][2:502]+df1['Q'][2:502]+df2['Q'][2:502] - [2.1]*500
plt.plot(df0['ADMM_IT'][1:501], p_total, label = "lamda=30", linewidth=0.5)
df0 = pd.read_csv('lamda25/data_A_0.csv')
df1 = pd.read_csv('lamda25/data_A_1.csv')
df2 = pd.read_csv('lamda25/data_A_2.csv')
p_total = df0['Q'][2:502]+df1['Q'][2:502]+df2['Q'][2:502] - [2.1]*500
plt.plot(df0['ADMM_IT'][1:501], p_total, label = "lamda=25", linewidth=0.5)
df0 = pd.read_csv('lamda20/data_A_0.csv')
df1 = pd.read_csv('lamda20/data_A_1.csv')
df2 = pd.read_csv('lamda20/data_A_2.csv')
p_total = df0['Q'][2:502]+df1['Q'][2:502]+df2['Q'][2:502] - [2.1]*500
plt.plot(df0['ADMM_IT'][1:501], p_total, label = "lamda=20", linewidth=0.5)
df0 = pd.read_csv('lamda15/data_A_0.csv')
df1 = pd.read_csv('lamda15/data_A_1.csv')
df2 = pd.read_csv('lamda15/data_A_2.csv')
p_total = df0['Q'][2:502]+df1['Q'][2:502]+df2['Q'][2:502] - [2.1]*500
plt.plot(df0['ADMM_IT'][1:501], p_total, label = "lamda=15", linewidth=0.5)
df0 = pd.read_csv('lamda15/data_A_0.csv')
df1 = pd.read_csv('lamda15/data_A_1.csv')
df2 = pd.read_csv('lamda15/data_A_2.csv')
p_total = [df0['Q'][502]+df1['Q'][502]+df2['Q'][502] - 2.1]*500
plt.plot(df0['ADMM_IT'][1:501], p_total, label = "Optimal value", linewidth=2, linestyle='--')
# df0 = pd.read_csv('lamda10/data_A_0.csv')
# df1 = pd.read_csv('lamda10/data_A_1.csv')
# df2 = pd.read_csv('lamda10/data_A_2.csv')
#
# p_total = df0['P'][2:502]+df1['P'][2:502]+df2['P'][2:502] - [2.1]*500
# plt.plot(df0['ADMM_IT'][1:501], p_total, label = "lamda=10", linewidth=0.5)
# df5 = pd.read_csv('lamda25/data_A_2.csv')
# df6 = pd.read_csv('lamda15/data_A_2.csv')
# df7 = pd.read_csv('lamda20/data_A_2.csv')
# df8 = pd.read_csv('lamda30/data_A_2.csv')
# df2 = pd.read_csv('lamda300/data_A_2.csv')
# df3 = pd.read_csv('lamda300/data_A_3.csv')
# df4 = pd.read_csv('lamda300/data_A_4.csv')
# df5 = pd.read_csv('lamda300/data_A_5.csv')
# df['Time'] = df['Time'].map(lambda x: datetime.strptime(str(x), '%Y/%m/%d %H:%M:%S.%f'))
# plt.plot(df0['ADMM_IT'][1:6000], df0['P'][1:6000], label = "lamda=200")
# plt.plot(df0['ADMM_IT'][1:6000], df1['P'][1:6000], label = "lamda=300")
# plt.plot(df4['ADMM_IT'][1:3000], df3['Q'][1:3000], label = "lamda=100")
# central = [df5['P'][1000]]*1001
# plt.plot(df4['ADMM_IT'][1:500], df4['P'][1:500], label = "lamda=50", linewidth=0.5)
# plt.plot(df6['ADMM_IT'][1:500], df8['P'][1:500], label = "lamda=30", linewidth=0.5)
# plt.plot(df6['ADMM_IT'][1:500], df5['P'][1:500], label = "lamda=25", linewidth=0.5)
# plt.plot(df6['ADMM_IT'][1:500], df7['P'][1:500], label = "lamda=20", linewidth=0.5)
# plt.plot(df6['ADMM_IT'][1:500], df6['P'][1:500], label = "lamda=15", linewidth=0.5)
# plt.plot(df6['ADMM_IT'][1:500], central[1:500], label = "Optimal value", linewidth=2, linestyle='--')
# central = [df5['Q'][1000]]*1001
# plt.plot(df4['ADMM_IT'][1:500], df4['Q'][1:500], label = "lamda=50", linewidth=0.5)
# plt.plot(df6['ADMM_IT'][1:500], df8['Q'][1:500], label = "lamda=30", linewidth=0.5)
# plt.plot(df6['ADMM_IT'][1:500], df5['Q'][1:500], label = "lamda=25", linewidth=0.5)
# plt.plot(df6['ADMM_IT'][1:500], df7['Q'][1:500], label = "lamda=20", linewidth=0.5)
# plt.plot(df6['ADMM_IT'][1:500], df6['Q'][1:500], label = "lamda=15", linewidth=0.5)
# plt.plot(df6['ADMM_IT'][1:500], central[1:500], label = "Optimal value", linewidth=2, linestyle='--')
# plt.plot(df1['Time'][1:15000], df1['X_real'][1:15000])
# plt.plot(df2['Time'][1:15000], df2['X_real'][1:15000])
plt.legend()
plt.xlabel("Number of iterations")
plt.ylabel("Reactive power loss(pu)")
# plt.ylabel("Reactive power(pu)")
plt.show()
| [
"[email protected]"
] | |
dcb4426c63d6078fb4f8014c3c418f8b6a9dbfa6 | 4b0a0793238d31413b95b71b3af8fc37ac883427 | /tools/crash_server.py | e10273e43c04f2656507d2911736c2f74ddc52b0 | [
"BSD-3-Clause"
] | permissive | sarthak-saxena/cef_node_webkit | a408c8b4b620923e5546dab5d05c7431de7e81f4 | cca786066cdc635d2bcfb67315a70a1c40c5d77a | refs/heads/master | 2023-05-04T22:09:59.270934 | 2021-05-16T14:19:43 | 2021-05-16T14:19:43 | 367,562,455 | 1 | 0 | NOASSERTION | 2021-05-15T07:04:05 | 2021-05-15T06:58:07 | C++ | UTF-8 | Python | false | false | 11,992 | py | #!/usr/bin/env python
# Copyright 2017 The Chromium Embedded Framework Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be found
# in the LICENSE file.
"""
This script implements a simple HTTP server for receiving crash report uploads
from a Breakpad/Crashpad client (any CEF-based application). This script is
intended for testing purposes only. An HTTPS server and a system such as Socorro
(https://wiki.mozilla.org/Socorro) should be used when uploading crash reports
from production applications.
Usage of this script is as follows:
1. Run this script from the command-line. The first argument is the server port
number and the second argument is the directory where uploaded report
information will be saved:
> python crash_server.py 8080 /path/to/dumps
2. Create a "crash_reporter.cfg" file at the required platform-specific
location. On Windows and Linux this file must be placed next to the main
application executable. On macOS this file must be placed in the top-level
app bundle Resources directory (e.g. "<appname>.app/Contents/Resources"). At
a minimum it must contain a "ServerURL=http://localhost:8080" line under the
"[Config]" section (make sure the port number matches the value specified in
step 1). See comments in include/cef_crash_util.h for a complete
specification of this file.
Example file contents:
[Config]
ServerURL=http://localhost:8080
# Disable rate limiting so that all crashes are uploaded.
RateLimitEnabled=false
MaxUploadsPerDay=0
[CrashKeys]
# The cefclient sample application sets these values (see step 5 below).
testkey_small1=small
testkey_small2=small
testkey_medium1=medium
testkey_medium2=medium
testkey_large1=large
testkey_large2=large
3. Load one of the following URLs in the CEF-based application to cause a crash:
Main (browser) process crash: chrome://inducebrowsercrashforrealz
Renderer process crash: chrome://crash
GPU process crash: chrome://gpucrash
4. When this script successfully receives a crash report upload you will see
console output like the following:
01/10/2017 12:31:23: Dump <id>
The "<id>" value is a 16 digit hexadecimal string that uniquely identifies
the dump. Crash dumps and metadata (product state, command-line flags, crash
keys, etc.) will be written to the "<id>.dmp" and "<id>.json" files
underneath the directory specified in step 1.
On Linux Breakpad uses the wget utility to upload crash dumps, so make sure
that utility is installed. If the crash is handled correctly then you should
see console output like the following when the client uploads a crash dump:
--2017-01-10 12:31:22-- http://localhost:8080/
Resolving localhost (localhost)... 127.0.0.1
Connecting to localhost (localhost)|127.0.0.1|:8080... connected.
HTTP request sent, awaiting response... 200 OK
Length: unspecified [text/html]
Saving to: '/dev/fd/3'
Crash dump id: <id>
On macOS when uploading a crash report to this script over HTTP you may
receive an error like the following:
"Transport security has blocked a cleartext HTTP (http://) resource load
since it is insecure. Temporary exceptions can be configured via your app's
Info.plist file."
You can work around this error by adding the following key to the Helper app
Info.plist file (e.g. "<appname>.app/Contents/Frameworks/
<appname> Helper.app/Contents/Info.plist"):
<key>NSAppTransportSecurity</key>
<dict>
<!--Allow all connections (for testing only!)-->
<key>NSAllowsArbitraryLoads</key>
<true/>
</dict>
5. The cefclient sample application sets test crash key values in the browser
and renderer processes. To work properly these values must also be defined
in the "[CrashKeys]" section of "crash_reporter.cfg" as shown above.
In tests/cefclient/browser/client_browser.cc (browser process):
CefSetCrashKeyValue("testkey1", "value1_browser");
CefSetCrashKeyValue("testkey2", "value2_browser");
CefSetCrashKeyValue("testkey3", "value3_browser");
In tests/cefclient/renderer/client_renderer.cc (renderer process):
CefSetCrashKeyValue("testkey1", "value1_renderer");
CefSetCrashKeyValue("testkey2", "value2_renderer");
CefSetCrashKeyValue("testkey3", "value3_renderer");
When crashing the browser or renderer processes with cefclient you should
verify that the test crash key values are included in the metadata
("<id>.json") file. Some values may be chunked as described in
include/cef_crash_util.h.
"""
from __future__ import absolute_import
from __future__ import print_function
import cgi
import datetime
import json
import os
import shutil
import sys
import uuid
import zlib
is_python2 = sys.version_info.major == 2
if is_python2:
from BaseHTTPServer import BaseHTTPRequestHandler, HTTPServer
from cStringIO import StringIO as BytesIO
else:
from http.server import BaseHTTPRequestHandler, HTTPServer
from io import BytesIO, open
def print_msg(msg):
""" Write |msg| to stdout and flush. """
timestr = datetime.datetime.now().strftime("%m/%d/%Y %H:%M:%S")
sys.stdout.write("%s: %s\n" % (timestr, msg))
sys.stdout.flush()
# Key identifying the minidump file.
minidump_key = 'upload_file_minidump'
class CrashHTTPRequestHandler(BaseHTTPRequestHandler):
def __init__(self, dump_directory, *args):
self._dump_directory = dump_directory
BaseHTTPRequestHandler.__init__(self, *args)
def _send_default_response_headers(self):
""" Send default response headers. """
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.end_headers()
def _parse_post_data(self, data):
""" Returns a cgi.FieldStorage object for this request or None if this is
not a POST request. """
if self.command != 'POST':
return None
return cgi.FieldStorage(
fp=BytesIO(data),
headers=self.headers,
environ={
'REQUEST_METHOD': 'POST',
'CONTENT_TYPE': self.headers['Content-Type'],
})
def _get_chunk_size(self):
# Read to the next "\r\n".
size_str = self.rfile.read(2)
while size_str[-2:] != b"\r\n":
size_str += self.rfile.read(1)
# Remove the trailing "\r\n".
size_str = size_str[:-2]
assert len(size_str) <= 4
return int(size_str, 16)
def _get_chunk_data(self, chunk_size):
data = self.rfile.read(chunk_size)
assert len(data) == chunk_size
# Skip the trailing "\r\n".
self.rfile.read(2)
return data
def _unchunk_request(self, compressed):
""" Read a chunked request body. Optionally decompress the result. """
if compressed:
d = zlib.decompressobj(16 + zlib.MAX_WBITS)
# Chunked format is: <size>\r\n<bytes>\r\n<size>\r\n<bytes>\r\n0\r\n
unchunked = b""
while True:
chunk_size = self._get_chunk_size()
print('Chunk size 0x%x' % chunk_size)
if (chunk_size == 0):
break
chunk_data = self._get_chunk_data(chunk_size)
if compressed:
unchunked += d.decompress(chunk_data)
else:
unchunked += chunk_data
if compressed:
unchunked += d.flush()
return unchunked
def _create_new_dump_id(self):
""" Breakpad requires a 16 digit hexadecimal dump ID. """
return uuid.uuid4().hex.upper()[0:16]
def do_GET(self):
""" Default empty implementation for handling GET requests. """
self._send_default_response_headers()
self.wfile.write("<html><body><h1>GET!</h1></body></html>")
def do_HEAD(self):
""" Default empty implementation for handling HEAD requests. """
self._send_default_response_headers()
def do_POST(self):
""" Handle a multi-part POST request submitted by Breakpad/Crashpad. """
self._send_default_response_headers()
# Create a unique ID for the dump.
dump_id = self._create_new_dump_id()
# Return the unique ID to the caller.
self.wfile.write(dump_id.encode('utf-8'))
dmp_stream = None
metadata = {}
# Request body may be chunked and/or gzip compressed. For example:
#
# 3029 branch on Windows:
# User-Agent: Crashpad/0.8.0
# Host: localhost:8080
# Connection: Keep-Alive
# Transfer-Encoding: chunked
# Content-Type: multipart/form-data; boundary=---MultipartBoundary-vp5j9HdSRYK8DvX2DhtpqEbMNjSN1wnL---
# Content-Encoding: gzip
#
# 2987 branch on Windows:
# User-Agent: Crashpad/0.8.0
# Host: localhost:8080
# Connection: Keep-Alive
# Content-Type: multipart/form-data; boundary=---MultipartBoundary-qFhorGA40vDJ1fgmc2mjorL0fRfKOqup---
# Content-Length: 609894
#
# 2883 branch on Linux:
# User-Agent: Wget/1.15 (linux-gnu)
# Host: localhost:8080
# Accept: */*
# Connection: Keep-Alive
# Content-Type: multipart/form-data; boundary=--------------------------83572861f14cc736
# Content-Length: 32237
# Content-Encoding: gzip
print(self.headers)
chunked = 'Transfer-Encoding' in self.headers and self.headers['Transfer-Encoding'].lower(
) == 'chunked'
compressed = 'Content-Encoding' in self.headers and self.headers['Content-Encoding'].lower(
) == 'gzip'
if chunked:
request_body = self._unchunk_request(compressed)
else:
content_length = int(self.headers[
'Content-Length']) if 'Content-Length' in self.headers else 0
if content_length > 0:
request_body = self.rfile.read(content_length)
else:
request_body = self.rfile.read()
if compressed:
request_body = zlib.decompress(request_body, 16 + zlib.MAX_WBITS)
# Parse the multi-part request.
form_data = self._parse_post_data(request_body)
for key in form_data.keys():
if key == minidump_key and form_data[minidump_key].file:
dmp_stream = form_data[minidump_key].file
else:
metadata[key] = form_data[key].value
if dmp_stream is None:
# Exit early if the request is invalid.
print_msg('Invalid dump %s' % dump_id)
return
print_msg('Dump %s' % dump_id)
# Write the minidump to file.
dump_file = os.path.join(self._dump_directory, dump_id + '.dmp')
with open(dump_file, 'wb') as fp:
shutil.copyfileobj(dmp_stream, fp)
# Write the metadata to file.
meta_file = os.path.join(self._dump_directory, dump_id + '.json')
if is_python2:
with open(meta_file, 'w') as fp:
json.dump(
metadata,
fp,
ensure_ascii=False,
encoding='utf-8',
indent=2,
sort_keys=True)
else:
with open(meta_file, 'w', encoding='utf-8') as fp:
json.dump(metadata, fp, indent=2, sort_keys=True)
def HandleRequestsUsing(dump_store):
return lambda *args: CrashHTTPRequestHandler(dump_directory, *args)
def RunCrashServer(port, dump_directory):
""" Run the crash handler HTTP server. """
httpd = HTTPServer(('', port), HandleRequestsUsing(dump_directory))
print_msg('Starting httpd on port %d' % port)
httpd.serve_forever()
# Program entry point.
if __name__ == "__main__":
if len(sys.argv) != 3:
print('Usage: %s <port> <dump_directory>' % os.path.basename(sys.argv[0]))
sys.exit(1)
# Create the dump directory if necessary.
dump_directory = sys.argv[2]
if not os.path.exists(dump_directory):
os.makedirs(dump_directory)
if not os.path.isdir(dump_directory):
raise Exception('Directory does not exist: %s' % dump_directory)
RunCrashServer(int(sys.argv[1]), dump_directory)
| [
"[email protected]"
] | |
61f0d940e0be707a4e8b350b845a65d6547f2c95 | 1c4a406177d7e0ae58b284fd999a21034ef7ea5a | /slack/version.py | c23ca1c4cac964caa33a86a4b6f8538659f2d17f | [
"MIT"
] | permissive | taras-y/python-slackclient | b374acc70f8d06a1c3675d7f3139064c8e243a00 | 3706b0811c3fa569f039eb9d7158f4bf8c8f0223 | refs/heads/master | 2021-07-19T12:20:23.008646 | 2020-09-19T11:23:58 | 2020-09-19T11:23:58 | 215,239,132 | 0 | 0 | MIT | 2020-09-19T11:23:59 | 2019-10-15T07:50:52 | null | UTF-8 | Python | false | false | 100 | py | # see: http://legacy.python.org/dev/peps/pep-0440/#public-version-identifiers
__version__ = "2.9.0"
| [
"[email protected]"
] | |
dff7bbe98971ffdb3c3cbf46dcd27dd587faf52e | 75f45278006fe833aab248c00a05fb099d5de96b | /20201129-pyconchina/typeschema_parser.py | fddb1a0516bca51c609914c26662e08ea5fc1cbe | [
"MIT"
] | permissive | thautwarm/Slides | 851acbed5e3f39eab1ab624a4bdda82c3f8127fe | f88832c1dcc38b9a7afdbb2986515c9a58d2b077 | refs/heads/master | 2021-07-03T13:59:51.895168 | 2020-12-02T12:33:18 | 2020-12-02T12:33:18 | 203,609,797 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 31,907 | py |
"""
Copyright thautwarm (c) 2019
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following
disclaimer in the documentation and/or other materials provided
with the distribution.
* Neither the name of thautwarm nor the names of other
contributors may be used to endorse or promote products derived
from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
from typeschema import *
from typing import Generic, TypeVar
T = TypeVar('T')
class Tokens():
__slots__ = ['array', 'offset']
def __init__(self, array):
self.array = array
self.offset = 0
class State():
def __init__(self):
pass
class AST(Generic[T]):
__slots__ = ['tag', 'contents']
def __init__(self, tag: str, contents: T):
self.tag = tag
self.contents = contents
class Nil():
nil = None
__slots__ = []
def __init__(self):
if (Nil.nil is None):
Nil.nil = self
return
raise ValueError('Nil cannot get instantiated twice.')
def __len__(self):
return 0
def __getitem__(self, n):
raise IndexError('Out of bounds')
@property
def head(self):
raise IndexError('Out of bounds')
@property
def tail(self):
raise IndexError('Out of bounds')
def __repr__(self):
return '[]'
_nil = Nil()
class Cons():
__slots__ = ['head', 'tail']
def __init__(self, _head, _tail):
self.head = _head
self.tail = _tail
def __len__(self):
nil = _nil
l = 0
while (self is not nil):
l += 1
self = self.tail
return l
def __iter__(self):
nil = _nil
while (self is not nil):
(yield self.head)
self = self.tail
def __getitem__(self, n):
while (n != 0):
self = self.tail
n -= 1
return self.head
def __repr__(self):
return repr(list(self))
try:
def mk_pretty():
from prettyprinter import register_pretty, pretty_call, pprint
@register_pretty(Tokens)
def pretty_tokens(value, ctx):
return pretty_call(ctx, Tokens, offset=value.offset, array=value.array)
@register_pretty(AST)
def pretty_ast(value, ctx):
return pretty_call(ctx, AST, tag=value.tag, contents=value.contents)
mk_pretty()
del mk_pretty
except ImportError:
pass
del T, Generic, TypeVar
builtin_cons = Cons
builtin_nil = _nil
builtin_mk_ast = AST
def mk_parser():
pass
def rbnf_named_lr_step_rbnfmacro_0(rbnf_tmp_0, builtin_state, builtin_tokens):
lcl_0 = rbnf_named_parse_classdef(builtin_state, builtin_tokens)
rbnf_named__check_1 = lcl_0
lcl_0 = rbnf_named__check_1[0]
lcl_0 = (lcl_0 == False)
if lcl_0:
lcl_0 = rbnf_named__check_1
else:
lcl_1 = rbnf_named__check_1[1]
rbnf_tmp_1 = lcl_1
lcl_1 = rbnf_tmp_0.append
lcl_1 = lcl_1(rbnf_tmp_1)
rbnf_tmp_1_ = rbnf_tmp_0
lcl_2 = (True, rbnf_tmp_1_)
lcl_0 = lcl_2
return lcl_0
def rbnf_named_lr_loop_rbnfmacro_0(rbnf_tmp_0, builtin_state, builtin_tokens):
rbnf_named_lr_rbnfmacro_0_reduce = rbnf_tmp_0
lcl_0 = builtin_tokens.offset
rbnf_named__off_0 = lcl_0
lcl_0 = rbnf_named_lr_step_rbnfmacro_0(rbnf_named_lr_rbnfmacro_0_reduce, builtin_state, builtin_tokens)
rbnf_named_lr_rbnfmacro_0_try = lcl_0
lcl_0 = rbnf_named_lr_rbnfmacro_0_try[0]
lcl_0 = (lcl_0 is not False)
while lcl_0:
lcl_1 = builtin_tokens.offset
rbnf_named__off_0 = lcl_1
lcl_1 = rbnf_named_lr_rbnfmacro_0_try[1]
rbnf_named_lr_rbnfmacro_0_reduce = lcl_1
lcl_1 = rbnf_named_lr_step_rbnfmacro_0(rbnf_named_lr_rbnfmacro_0_reduce, builtin_state, builtin_tokens)
rbnf_named_lr_rbnfmacro_0_try = lcl_1
lcl_1 = rbnf_named_lr_rbnfmacro_0_try[0]
lcl_1 = (lcl_1 is not False)
lcl_0 = lcl_1
lcl_0 = builtin_tokens.offset
lcl_0 = (lcl_0 == rbnf_named__off_0)
if lcl_0:
lcl_1 = (True, rbnf_named_lr_rbnfmacro_0_reduce)
lcl_0 = lcl_1
else:
lcl_0 = rbnf_named_lr_rbnfmacro_0_try
return lcl_0
def rbnf_named_lr_step_rbnfmacro_1(rbnf_tmp_0, builtin_state, builtin_tokens):
try:
_rbnf_cur_token = builtin_tokens.array[builtin_tokens.offset]
if (_rbnf_cur_token.idint is 6):
builtin_tokens.offset += 1
else:
_rbnf_cur_token = None
except IndexError:
_rbnf_cur_token = None
lcl_0 = _rbnf_cur_token
rbnf_tmp_1 = lcl_0
lcl_0 = (rbnf_tmp_1 is None)
if lcl_0:
lcl_1 = builtin_tokens.offset
lcl_1 = (lcl_1, 'quote , not match')
lcl_1 = builtin_cons(lcl_1, builtin_nil)
lcl_1 = (False, lcl_1)
lcl_0 = lcl_1
else:
lcl_1 = rbnf_named_parse_fieldef(builtin_state, builtin_tokens)
rbnf_named__check_2 = lcl_1
lcl_1 = rbnf_named__check_2[0]
lcl_1 = (lcl_1 == False)
if lcl_1:
lcl_1 = rbnf_named__check_2
else:
lcl_2 = rbnf_named__check_2[1]
rbnf_tmp_2 = lcl_2
lcl_2 = rbnf_tmp_0.append
lcl_2 = lcl_2(rbnf_tmp_2)
rbnf_tmp_1_ = rbnf_tmp_0
lcl_3 = (True, rbnf_tmp_1_)
lcl_1 = lcl_3
lcl_0 = lcl_1
return lcl_0
def rbnf_named_lr_loop_rbnfmacro_1(rbnf_tmp_0, builtin_state, builtin_tokens):
rbnf_named_lr_rbnfmacro_1_reduce = rbnf_tmp_0
lcl_0 = builtin_tokens.offset
rbnf_named__off_0 = lcl_0
lcl_0 = rbnf_named_lr_step_rbnfmacro_1(rbnf_named_lr_rbnfmacro_1_reduce, builtin_state, builtin_tokens)
rbnf_named_lr_rbnfmacro_1_try = lcl_0
lcl_0 = rbnf_named_lr_rbnfmacro_1_try[0]
lcl_0 = (lcl_0 is not False)
while lcl_0:
lcl_1 = builtin_tokens.offset
rbnf_named__off_0 = lcl_1
lcl_1 = rbnf_named_lr_rbnfmacro_1_try[1]
rbnf_named_lr_rbnfmacro_1_reduce = lcl_1
lcl_1 = rbnf_named_lr_step_rbnfmacro_1(rbnf_named_lr_rbnfmacro_1_reduce, builtin_state, builtin_tokens)
rbnf_named_lr_rbnfmacro_1_try = lcl_1
lcl_1 = rbnf_named_lr_rbnfmacro_1_try[0]
lcl_1 = (lcl_1 is not False)
lcl_0 = lcl_1
lcl_0 = builtin_tokens.offset
lcl_0 = (lcl_0 == rbnf_named__off_0)
if lcl_0:
lcl_1 = (True, rbnf_named_lr_rbnfmacro_1_reduce)
lcl_0 = lcl_1
else:
lcl_0 = rbnf_named_lr_rbnfmacro_1_try
return lcl_0
def rbnf_named_lr_step_rbnfmacro_2(rbnf_tmp_0, builtin_state, builtin_tokens):
try:
_rbnf_cur_token = builtin_tokens.array[builtin_tokens.offset]
if (_rbnf_cur_token.idint is 6):
builtin_tokens.offset += 1
else:
_rbnf_cur_token = None
except IndexError:
_rbnf_cur_token = None
lcl_0 = _rbnf_cur_token
rbnf_tmp_1 = lcl_0
lcl_0 = (rbnf_tmp_1 is None)
if lcl_0:
lcl_1 = builtin_tokens.offset
lcl_1 = (lcl_1, 'quote , not match')
lcl_1 = builtin_cons(lcl_1, builtin_nil)
lcl_1 = (False, lcl_1)
lcl_0 = lcl_1
else:
lcl_1 = rbnf_named_parse_type(builtin_state, builtin_tokens)
rbnf_named__check_2 = lcl_1
lcl_1 = rbnf_named__check_2[0]
lcl_1 = (lcl_1 == False)
if lcl_1:
lcl_1 = rbnf_named__check_2
else:
lcl_2 = rbnf_named__check_2[1]
rbnf_tmp_2 = lcl_2
lcl_2 = rbnf_tmp_0.append
lcl_2 = lcl_2(rbnf_tmp_2)
rbnf_tmp_1_ = rbnf_tmp_0
lcl_3 = (True, rbnf_tmp_1_)
lcl_1 = lcl_3
lcl_0 = lcl_1
return lcl_0
def rbnf_named_lr_loop_rbnfmacro_2(rbnf_tmp_0, builtin_state, builtin_tokens):
rbnf_named_lr_rbnfmacro_2_reduce = rbnf_tmp_0
lcl_0 = builtin_tokens.offset
rbnf_named__off_0 = lcl_0
lcl_0 = rbnf_named_lr_step_rbnfmacro_2(rbnf_named_lr_rbnfmacro_2_reduce, builtin_state, builtin_tokens)
rbnf_named_lr_rbnfmacro_2_try = lcl_0
lcl_0 = rbnf_named_lr_rbnfmacro_2_try[0]
lcl_0 = (lcl_0 is not False)
while lcl_0:
lcl_1 = builtin_tokens.offset
rbnf_named__off_0 = lcl_1
lcl_1 = rbnf_named_lr_rbnfmacro_2_try[1]
rbnf_named_lr_rbnfmacro_2_reduce = lcl_1
lcl_1 = rbnf_named_lr_step_rbnfmacro_2(rbnf_named_lr_rbnfmacro_2_reduce, builtin_state, builtin_tokens)
rbnf_named_lr_rbnfmacro_2_try = lcl_1
lcl_1 = rbnf_named_lr_rbnfmacro_2_try[0]
lcl_1 = (lcl_1 is not False)
lcl_0 = lcl_1
lcl_0 = builtin_tokens.offset
lcl_0 = (lcl_0 == rbnf_named__off_0)
if lcl_0:
lcl_1 = (True, rbnf_named_lr_rbnfmacro_2_reduce)
lcl_0 = lcl_1
else:
lcl_0 = rbnf_named_lr_rbnfmacro_2_try
return lcl_0
def rbnf_named_parse_START(builtin_state, builtin_tokens):
try:
_rbnf_cur_token = builtin_tokens.array[builtin_tokens.offset]
if (_rbnf_cur_token.idint is 0):
builtin_tokens.offset += 1
else:
_rbnf_cur_token = None
except IndexError:
_rbnf_cur_token = None
lcl_0 = _rbnf_cur_token
rbnf_tmp_0 = lcl_0
lcl_0 = (rbnf_tmp_0 is None)
if lcl_0:
lcl_1 = builtin_tokens.offset
lcl_1 = (lcl_1, 'BOF not match')
lcl_1 = builtin_cons(lcl_1, builtin_nil)
lcl_1 = (False, lcl_1)
lcl_0 = lcl_1
else:
try:
_rbnf_cur_token = builtin_tokens.array[builtin_tokens.offset]
if (_rbnf_cur_token.idint is 1):
builtin_tokens.offset += 1
else:
_rbnf_cur_token = None
except IndexError:
_rbnf_cur_token = None
lcl_1 = _rbnf_cur_token
rbnf_tmp_1 = lcl_1
lcl_1 = (rbnf_tmp_1 is None)
if lcl_1:
lcl_2 = builtin_tokens.offset
lcl_2 = (lcl_2, 'quote backend not match')
lcl_2 = builtin_cons(lcl_2, builtin_nil)
lcl_2 = (False, lcl_2)
lcl_1 = lcl_2
else:
try:
_rbnf_cur_token = builtin_tokens.array[builtin_tokens.offset]
if (_rbnf_cur_token.idint is 2):
builtin_tokens.offset += 1
else:
_rbnf_cur_token = None
except IndexError:
_rbnf_cur_token = None
lcl_2 = _rbnf_cur_token
rbnf_tmp_2 = lcl_2
lcl_2 = (rbnf_tmp_2 is None)
if lcl_2:
lcl_3 = builtin_tokens.offset
lcl_3 = (lcl_3, 'Ident not match')
lcl_3 = builtin_cons(lcl_3, builtin_nil)
lcl_3 = (False, lcl_3)
lcl_2 = lcl_3
else:
lcl_3 = rbnf_named_parse_typeschema(builtin_state, builtin_tokens)
rbnf_named__check_3 = lcl_3
lcl_3 = rbnf_named__check_3[0]
lcl_3 = (lcl_3 == False)
if lcl_3:
lcl_3 = rbnf_named__check_3
else:
lcl_4 = rbnf_named__check_3[1]
rbnf_tmp_3 = lcl_4
try:
_rbnf_cur_token = builtin_tokens.array[builtin_tokens.offset]
if (_rbnf_cur_token.idint is 3):
builtin_tokens.offset += 1
else:
_rbnf_cur_token = None
except IndexError:
_rbnf_cur_token = None
lcl_4 = _rbnf_cur_token
rbnf_tmp_4 = lcl_4
lcl_4 = (rbnf_tmp_4 is None)
if lcl_4:
lcl_5 = builtin_tokens.offset
lcl_5 = (lcl_5, 'EOF not match')
lcl_5 = builtin_cons(lcl_5, builtin_nil)
lcl_5 = (False, lcl_5)
lcl_4 = lcl_5
else:
lcl_5 = rbnf_tmp_2.value
lcl_5 = (lcl_5, rbnf_tmp_3)
rbnf_tmp_1_ = lcl_5
lcl_5 = (True, rbnf_tmp_1_)
lcl_4 = lcl_5
lcl_3 = lcl_4
lcl_2 = lcl_3
lcl_1 = lcl_2
lcl_0 = lcl_1
return lcl_0
def rbnf_named_parse_classdef(builtin_state, builtin_tokens):
try:
_rbnf_cur_token = builtin_tokens.array[builtin_tokens.offset]
if (_rbnf_cur_token.idint is 7):
builtin_tokens.offset += 1
else:
_rbnf_cur_token = None
except IndexError:
_rbnf_cur_token = None
lcl_0 = _rbnf_cur_token
rbnf_tmp_0 = lcl_0
lcl_0 = (rbnf_tmp_0 is None)
if lcl_0:
lcl_1 = builtin_tokens.offset
lcl_1 = (lcl_1, 'quote | not match')
lcl_1 = builtin_cons(lcl_1, builtin_nil)
lcl_1 = (False, lcl_1)
lcl_0 = lcl_1
else:
try:
_rbnf_cur_token = builtin_tokens.array[builtin_tokens.offset]
if (_rbnf_cur_token.idint is 2):
builtin_tokens.offset += 1
else:
_rbnf_cur_token = None
except IndexError:
_rbnf_cur_token = None
lcl_1 = _rbnf_cur_token
rbnf_tmp_1 = lcl_1
lcl_1 = (rbnf_tmp_1 is None)
if lcl_1:
lcl_2 = builtin_tokens.offset
lcl_2 = (lcl_2, 'Ident not match')
lcl_2 = builtin_cons(lcl_2, builtin_nil)
lcl_2 = (False, lcl_2)
lcl_1 = lcl_2
else:
try:
_rbnf_cur_token = builtin_tokens.array[builtin_tokens.offset]
if (_rbnf_cur_token.idint is 8):
builtin_tokens.offset += 1
else:
_rbnf_cur_token = None
except IndexError:
_rbnf_cur_token = None
lcl_2 = _rbnf_cur_token
rbnf_tmp_2 = lcl_2
lcl_2 = (rbnf_tmp_2 is None)
if lcl_2:
lcl_3 = builtin_tokens.offset
lcl_3 = (lcl_3, 'quote ( not match')
lcl_3 = builtin_cons(lcl_3, builtin_nil)
lcl_3 = (False, lcl_3)
lcl_2 = lcl_3
else:
lcl_3 = rbnf_named_parse_rbnfmacro_1(builtin_state, builtin_tokens)
rbnf_named__check_3 = lcl_3
lcl_3 = rbnf_named__check_3[0]
lcl_3 = (lcl_3 == False)
if lcl_3:
lcl_3 = rbnf_named__check_3
else:
lcl_4 = rbnf_named__check_3[1]
rbnf_tmp_3 = lcl_4
try:
_rbnf_cur_token = builtin_tokens.array[builtin_tokens.offset]
if (_rbnf_cur_token.idint is 9):
builtin_tokens.offset += 1
else:
_rbnf_cur_token = None
except IndexError:
_rbnf_cur_token = None
lcl_4 = _rbnf_cur_token
rbnf_tmp_4 = lcl_4
lcl_4 = (rbnf_tmp_4 is None)
if lcl_4:
lcl_5 = builtin_tokens.offset
lcl_5 = (lcl_5, 'quote ) not match')
lcl_5 = builtin_cons(lcl_5, builtin_nil)
lcl_5 = (False, lcl_5)
lcl_4 = lcl_5
else:
lcl_5 = rbnf_tmp_1.value
lcl_5 = CaseTypeDef(lcl_5, rbnf_tmp_3)
rbnf_tmp_1_ = lcl_5
lcl_5 = (True, rbnf_tmp_1_)
lcl_4 = lcl_5
lcl_3 = lcl_4
lcl_2 = lcl_3
lcl_1 = lcl_2
lcl_0 = lcl_1
return lcl_0
def rbnf_named_parse_fieldef(builtin_state, builtin_tokens):
lcl_0 = builtin_tokens.offset
rbnf_named__off_0 = lcl_0
try:
builtin_tokens.array[(builtin_tokens.offset + 0)]
_rbnf_peek_tmp = True
except IndexError:
_rbnf_peek_tmp = False
lcl_0 = _rbnf_peek_tmp
if lcl_0:
lcl_2 = builtin_tokens.array[(builtin_tokens.offset + 0)]
lcl_2 = lcl_2.idint
if (lcl_2 == 2):
lcl_3 = builtin_tokens.offset
rbnf_named__off_1 = lcl_3
try:
builtin_tokens.array[(builtin_tokens.offset + 1)]
_rbnf_peek_tmp = True
except IndexError:
_rbnf_peek_tmp = False
lcl_3 = _rbnf_peek_tmp
if lcl_3:
lcl_5 = builtin_tokens.array[(builtin_tokens.offset + 1)]
lcl_5 = lcl_5.idint
if (lcl_5 == 11):
lcl_6 = rbnf_named_parse_type(builtin_state, builtin_tokens)
rbnf_named__check_0 = lcl_6
lcl_6 = rbnf_named__check_0[0]
lcl_6 = (lcl_6 == False)
if lcl_6:
lcl_6 = rbnf_named__check_0
else:
lcl_7 = rbnf_named__check_0[1]
rbnf_tmp_0 = lcl_7
lcl_7 = FieldDef(None, rbnf_tmp_0)
rbnf_tmp_1_ = lcl_7
lcl_7 = (True, rbnf_tmp_1_)
lcl_6 = lcl_7
lcl_4 = lcl_6
elif (lcl_5 == 10):
_rbnf_old_offset = builtin_tokens.offset
_rbnf_cur_token = builtin_tokens.array[_rbnf_old_offset]
builtin_tokens.offset = (_rbnf_old_offset + 1)
lcl_6 = _rbnf_cur_token
rbnf_tmp_0 = lcl_6
_rbnf_old_offset = builtin_tokens.offset
_rbnf_cur_token = builtin_tokens.array[_rbnf_old_offset]
builtin_tokens.offset = (_rbnf_old_offset + 1)
lcl_6 = _rbnf_cur_token
rbnf_tmp_1 = lcl_6
lcl_6 = rbnf_named_parse_type(builtin_state, builtin_tokens)
rbnf_named__check_2 = lcl_6
lcl_6 = rbnf_named__check_2[0]
lcl_6 = (lcl_6 == False)
if lcl_6:
lcl_6 = rbnf_named__check_2
else:
lcl_7 = rbnf_named__check_2[1]
rbnf_tmp_2 = lcl_7
lcl_7 = rbnf_tmp_0.value
lcl_7 = FieldDef(lcl_7, rbnf_tmp_2)
rbnf_tmp_1_ = lcl_7
lcl_7 = (True, rbnf_tmp_1_)
lcl_6 = lcl_7
lcl_4 = lcl_6
else:
lcl_6 = rbnf_named_parse_type(builtin_state, builtin_tokens)
rbnf_named__check_0 = lcl_6
lcl_6 = rbnf_named__check_0[0]
lcl_6 = (lcl_6 == False)
if lcl_6:
lcl_6 = rbnf_named__check_0
else:
lcl_7 = rbnf_named__check_0[1]
rbnf_tmp_0 = lcl_7
lcl_7 = FieldDef(None, rbnf_tmp_0)
rbnf_tmp_1_ = lcl_7
lcl_7 = (True, rbnf_tmp_1_)
lcl_6 = lcl_7
lcl_4 = lcl_6
lcl_3 = lcl_4
else:
lcl_4 = (rbnf_named__off_1, 'fieldef got EOF')
lcl_4 = builtin_cons(lcl_4, builtin_nil)
lcl_4 = (False, lcl_4)
lcl_3 = lcl_4
lcl_1 = lcl_3
else:
lcl_3 = (rbnf_named__off_0, 'fieldef lookahead failed')
lcl_3 = builtin_cons(lcl_3, builtin_nil)
lcl_3 = (False, lcl_3)
lcl_1 = lcl_3
lcl_0 = lcl_1
else:
lcl_1 = (rbnf_named__off_0, 'fieldef got EOF')
lcl_1 = builtin_cons(lcl_1, builtin_nil)
lcl_1 = (False, lcl_1)
lcl_0 = lcl_1
return lcl_0
def rbnf_named_parse_rbnfmacro_0(builtin_state, builtin_tokens):
lcl_0 = rbnf_named_parse_classdef(builtin_state, builtin_tokens)
rbnf_named__check_0 = lcl_0
lcl_0 = rbnf_named__check_0[0]
lcl_0 = (lcl_0 == False)
if lcl_0:
lcl_0 = rbnf_named__check_0
else:
lcl_1 = rbnf_named__check_0[1]
rbnf_tmp_0 = lcl_1
lcl_1 = []
_rbnf_immediate_lst = lcl_1
_rbnf_immediate_lst.append(rbnf_tmp_0)
lcl_1 = _rbnf_immediate_lst
rbnf_tmp_1_ = lcl_1
lcl_1 = rbnf_named_lr_loop_rbnfmacro_0(rbnf_tmp_1_, builtin_state, builtin_tokens)
lcl_0 = lcl_1
return lcl_0
def rbnf_named_parse_rbnfmacro_1(builtin_state, builtin_tokens):
lcl_0 = rbnf_named_parse_fieldef(builtin_state, builtin_tokens)
rbnf_named__check_0 = lcl_0
lcl_0 = rbnf_named__check_0[0]
lcl_0 = (lcl_0 == False)
if lcl_0:
lcl_0 = rbnf_named__check_0
else:
lcl_1 = rbnf_named__check_0[1]
rbnf_tmp_0 = lcl_1
lcl_1 = []
_rbnf_immediate_lst = lcl_1
_rbnf_immediate_lst.append(rbnf_tmp_0)
lcl_1 = _rbnf_immediate_lst
rbnf_tmp_1_ = lcl_1
lcl_1 = rbnf_named_lr_loop_rbnfmacro_1(rbnf_tmp_1_, builtin_state, builtin_tokens)
lcl_0 = lcl_1
return lcl_0
def rbnf_named_parse_rbnfmacro_2(builtin_state, builtin_tokens):
lcl_0 = rbnf_named_parse_type(builtin_state, builtin_tokens)
rbnf_named__check_0 = lcl_0
lcl_0 = rbnf_named__check_0[0]
lcl_0 = (lcl_0 == False)
if lcl_0:
lcl_0 = rbnf_named__check_0
else:
lcl_1 = rbnf_named__check_0[1]
rbnf_tmp_0 = lcl_1
lcl_1 = []
_rbnf_immediate_lst = lcl_1
_rbnf_immediate_lst.append(rbnf_tmp_0)
lcl_1 = _rbnf_immediate_lst
rbnf_tmp_1_ = lcl_1
lcl_1 = rbnf_named_lr_loop_rbnfmacro_2(rbnf_tmp_1_, builtin_state, builtin_tokens)
lcl_0 = lcl_1
return lcl_0
def rbnf_named_parse_type(builtin_state, builtin_tokens):
try:
_rbnf_cur_token = builtin_tokens.array[builtin_tokens.offset]
if (_rbnf_cur_token.idint is 2):
builtin_tokens.offset += 1
else:
_rbnf_cur_token = None
except IndexError:
_rbnf_cur_token = None
lcl_0 = _rbnf_cur_token
rbnf_tmp_0 = lcl_0
lcl_0 = (rbnf_tmp_0 is None)
if lcl_0:
lcl_1 = builtin_tokens.offset
lcl_1 = (lcl_1, 'Ident not match')
lcl_1 = builtin_cons(lcl_1, builtin_nil)
lcl_1 = (False, lcl_1)
lcl_0 = lcl_1
else:
lcl_1 = builtin_tokens.offset
rbnf_named__off_1 = lcl_1
try:
builtin_tokens.array[(builtin_tokens.offset + 0)]
_rbnf_peek_tmp = True
except IndexError:
_rbnf_peek_tmp = False
lcl_1 = _rbnf_peek_tmp
if lcl_1:
lcl_3 = builtin_tokens.array[(builtin_tokens.offset + 0)]
lcl_3 = lcl_3.idint
if (lcl_3 == 11):
_rbnf_old_offset = builtin_tokens.offset
_rbnf_cur_token = builtin_tokens.array[_rbnf_old_offset]
builtin_tokens.offset = (_rbnf_old_offset + 1)
lcl_4 = _rbnf_cur_token
rbnf_tmp_1 = lcl_4
lcl_4 = rbnf_named_parse_rbnfmacro_2(builtin_state, builtin_tokens)
rbnf_named__check_2 = lcl_4
lcl_4 = rbnf_named__check_2[0]
lcl_4 = (lcl_4 == False)
if lcl_4:
lcl_4 = rbnf_named__check_2
else:
lcl_5 = rbnf_named__check_2[1]
rbnf_tmp_2 = lcl_5
try:
_rbnf_cur_token = builtin_tokens.array[builtin_tokens.offset]
if (_rbnf_cur_token.idint is 12):
builtin_tokens.offset += 1
else:
_rbnf_cur_token = None
except IndexError:
_rbnf_cur_token = None
lcl_5 = _rbnf_cur_token
rbnf_tmp_3 = lcl_5
lcl_5 = (rbnf_tmp_3 is None)
if lcl_5:
lcl_6 = builtin_tokens.offset
lcl_6 = (lcl_6, 'quote ] not match')
lcl_6 = builtin_cons(lcl_6, builtin_nil)
lcl_6 = (False, lcl_6)
lcl_5 = lcl_6
else:
lcl_6 = rbnf_tmp_0.value
lcl_6 = Typ(lcl_6, rbnf_tmp_2)
rbnf_tmp_1_ = lcl_6
lcl_6 = (True, rbnf_tmp_1_)
lcl_5 = lcl_6
lcl_4 = lcl_5
lcl_2 = lcl_4
else:
lcl_4 = rbnf_tmp_0.value
lcl_5 = []
lcl_4 = Typ(lcl_4, lcl_5)
rbnf_tmp_1_ = lcl_4
lcl_4 = (True, rbnf_tmp_1_)
lcl_2 = lcl_4
lcl_1 = lcl_2
else:
lcl_2 = (rbnf_named__off_1, 'type got EOF')
lcl_2 = builtin_cons(lcl_2, builtin_nil)
lcl_2 = (False, lcl_2)
lcl_1 = lcl_2
lcl_0 = lcl_1
return lcl_0
def rbnf_named_parse_typeschema(builtin_state, builtin_tokens):
try:
_rbnf_cur_token = builtin_tokens.array[builtin_tokens.offset]
if (_rbnf_cur_token.idint is 4):
builtin_tokens.offset += 1
else:
_rbnf_cur_token = None
except IndexError:
_rbnf_cur_token = None
lcl_0 = _rbnf_cur_token
rbnf_tmp_0 = lcl_0
lcl_0 = (rbnf_tmp_0 is None)
if lcl_0:
lcl_1 = builtin_tokens.offset
lcl_1 = (lcl_1, 'quote type not match')
lcl_1 = builtin_cons(lcl_1, builtin_nil)
lcl_1 = (False, lcl_1)
lcl_0 = lcl_1
else:
try:
_rbnf_cur_token = builtin_tokens.array[builtin_tokens.offset]
if (_rbnf_cur_token.idint is 2):
builtin_tokens.offset += 1
else:
_rbnf_cur_token = None
except IndexError:
_rbnf_cur_token = None
lcl_1 = _rbnf_cur_token
rbnf_tmp_1 = lcl_1
lcl_1 = (rbnf_tmp_1 is None)
if lcl_1:
lcl_2 = builtin_tokens.offset
lcl_2 = (lcl_2, 'Ident not match')
lcl_2 = builtin_cons(lcl_2, builtin_nil)
lcl_2 = (False, lcl_2)
lcl_1 = lcl_2
else:
try:
_rbnf_cur_token = builtin_tokens.array[builtin_tokens.offset]
if (_rbnf_cur_token.idint is 5):
builtin_tokens.offset += 1
else:
_rbnf_cur_token = None
except IndexError:
_rbnf_cur_token = None
lcl_2 = _rbnf_cur_token
rbnf_tmp_2 = lcl_2
lcl_2 = (rbnf_tmp_2 is None)
if lcl_2:
lcl_3 = builtin_tokens.offset
lcl_3 = (lcl_3, 'quote = not match')
lcl_3 = builtin_cons(lcl_3, builtin_nil)
lcl_3 = (False, lcl_3)
lcl_2 = lcl_3
else:
lcl_3 = rbnf_named_parse_rbnfmacro_0(builtin_state, builtin_tokens)
rbnf_named__check_3 = lcl_3
lcl_3 = rbnf_named__check_3[0]
lcl_3 = (lcl_3 == False)
if lcl_3:
lcl_3 = rbnf_named__check_3
else:
lcl_4 = rbnf_named__check_3[1]
rbnf_tmp_3 = lcl_4
lcl_4 = rbnf_tmp_1.value
lcl_4 = TypeSchema(lcl_4, rbnf_tmp_3)
rbnf_tmp_1_ = lcl_4
lcl_4 = (True, rbnf_tmp_1_)
lcl_3 = lcl_4
lcl_2 = lcl_3
lcl_1 = lcl_2
lcl_0 = lcl_1
return lcl_0
return rbnf_named_parse_START | [
"[email protected]"
] | |
f10558c3ce38710d78fcfffc21dda605820129e7 | eae4038397ea0b0b1ea56424888f53369a1e4282 | /moai/parameters/optimization/optimizers/adam_srt.py | 3c7177f5e0e17de45e0f93db114d5f7383798fe5 | [
"Apache-2.0"
] | permissive | iampakos/moai-0.1.0a2 | b2378e9e0a84b85c0e2251a419d39d3da7ea17f9 | 2f066bffc66faca0bdc9af53e7992df34d09ce5d | refs/heads/main | 2023-03-13T13:27:54.318498 | 2021-04-01T14:36:52 | 2021-04-01T14:36:52 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,401 | py | import functools
import math
import torch
import typing
__all__ = ['AdamSRT', 'AdamS']
#NOTE: from https://github.com/ymontmarin/adamsrt/blob/master/adamsrt/optimizers/adam_srt.py
class AdamSRT(torch.optim.Optimizer):
"""Implements the AdamSRT algorithm.
- **Paper**: [Spherical Perspective on Learning with Batch Norm. New methods : AdamSRT](https://arxiv.org/pdf/2006.13382.pdf)
- **Implementation**: [GitHub @ ymontmarin](https://github.com/ymontmarin/adamsrt)
General version of Adam-SRT that works for different normalization layer
if specific channel options (channel_dims, channel_wise, channel_gloabal)
are given.
It should be used on parameters that are subject to scale invariance
because they are followed by a normalization layer.
Because not all params are concern, group_parameters of pytorch
should be used.
The effect is to adapt moments of Adam to the geometry implied by
normalization layer. RT transform the order one moment ; make the
order 2 moment rescaled and by norm.
Example:
>>> par_groups = [{'params': model.conv_params(), 'channel_wise'=True},
>>> {'params': model.other_params()}]
>>> optimizer = AdamSRT(par_groups, lr=0.01, betas=(0.9, 0.9999))
>>> optimizer.zero_grad()
>>> loss_fn(model(input), target).backward()
>>> optimizer.step()
Arguments:
params (list of dict or iterator): either a list of group_parameters
which is a dict with key 'params' with a param iterator and other
key for optimizer parameters overiding for this group of param,
lr (float): learning rate,
betas (tuple(float)): momentum factor for Adam moments,
eps (float): float to avoid numerical instality in normalization,
weight_decay (float): value for L2 regularization,
channel_dims (list of int): the index of shape that represent the
distinct dim that are independently normalized. Default value is
channel_dims=shape which correspond to classic Adam.
It can be used to adapt Adam to any normalization layers that
follow conv layers,
channel_wise (bool): if True and channel_dims is None set it to [0]
which correspond to classic channel shape in 2D conv Network.
Normalization will be done over other dims which are subject to
scale invariance thanks to following normalization layer,
"""
def __init__(self,
params: typing.Iterator[torch.nn.Parameter],
lr: float=1e-3,
betas: typing.Tuple[float, float]=(0.9, 0.999),
eps: float=1e-8,
weight_decay: float=0,
channel_dims=None, # For customize the dimension for group of param
channel_wise: bool=True, # For default conv followed by BN invariance
rt: bool=True
):
if not 0.0 <= lr:
raise ValueError("Invalid learning rate: {}".format(lr))
if not 0.0 <= betas[0] < 1.0:
raise ValueError("Invalid beta parameter at index 0: {}".format(betas[0]))
if not 0.0 <= betas[1] < 1.0:
raise ValueError("Invalid beta parameter at index 1: {}".format(betas[1]))
if not 0.0 <= eps:
raise ValueError("Invalid epsilon value: {}".format(eps))
if not 0.0 <= weight_decay:
raise ValueError("Invalid weight_decay value: {}".format(weight_decay))
defaults = dict(
lr=lr,
betas=betas,
eps=eps,
weight_decay=weight_decay,
channel_dims=channel_dims,
channel_wise=channel_wise,
rt=rt,
)
super(AdamSRT, self).__init__(params, defaults)
def step(self):
"""
Performs a single optimizatino step
"""
for group in self.param_groups:
for p in group['params']:
# Get grad of params to update
if p.grad is None:
continue
grad = p.grad.data
if grad.is_sparse:
raise RuntimeError(
'RMSprop does not support sparse gradients'
)
if group['weight_decay'] != 0.:
grad.add_(p.data, alpha=group['weight_decay'])
# Get state
state = self.state[p]
# State initialization if needed
if len(state) == 0:
state['step'] = 0
# Exponential moving average of gradient values
state['exp_avg'] = torch.zeros_like(p.data)
# Exponential moving average of squared gradient values
state['exp_avg_sq'] = torch.zeros_like(p.data)
# Create the scalar product in respect of channel dims
shape = p.data.shape
channel_dims = group['channel_dims']
if channel_dims is None:
if group['channel_wise']:
# Classic meaning of channels
channel_dims = [0]
else:
# element wise : every element is a channel
# It correpond to classic adam update
channel_dims = list(range(len(shape)))
state['channel_dims'] = channel_dims
state['shape'] = shape
# Start by increment step
state['step'] += 1
# Create the appropriate dot operator for the invar groups
dot_ope = self.get_dot_operator(
state['channel_dims'], state['shape']
)
# Retrive moments and constant
exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']
b1, b2 = group['betas']
# Update momentums
exp_avg.mul_(b1).add_(grad, alpha=1 - b1)
exp_avg_sq.mul_(b2).add_(dot_ope(grad, grad), alpha=1 - b2)
# It should be d^-1 * (1 - beta2) instead
# To avoid double div with > 1 we multiply stepsize by d^1/2
# Compute bias correction
bias_correction1 = 1 - b1 ** state['step']
bias_correction2 = 1 - b2 ** state['step']
# Compute actual nom and denom for the step
nom = exp_avg / bias_correction1
denom = (
exp_avg_sq.sqrt() / math.sqrt(bias_correction2)
).add_(group['eps'])
# Prepare data temporary copy for RT transform if needed
if dot_ope.dim > 1 and group['rt']:
prev_data = p.data.clone().detach()
# Take the step on the datas
step_size = group['lr'] * math.sqrt(dot_ope.dim)
p.data.addcdiv_(nom, denom, value=-step_size)
# We are on a sphere, we do RT transform
if dot_ope.dim > 1 and group['rt']:
new_data = p.data
prev_norm_sq = dot_ope(prev_data, prev_data)
new_norm_sq = dot_ope(new_data, new_data)
scal_x1_x2 = dot_ope(prev_data, new_data)
scal_m_x2 = dot_ope(exp_avg, new_data)
# R order 2 moment
(
exp_avg_sq
.mul_(prev_norm_sq)
.div_(new_norm_sq + group['eps'])
)
# RT the order 1 moment
(
exp_avg
.mul_(scal_x1_x2)
.add_(-scal_m_x2 * prev_data)
.div_(new_norm_sq + group['eps'])
)
@staticmethod
def get_dot_operator(channel_dims, shape):
"""
Generate a function that do scalar product for each channel dims
Over the remaining dims
"""
# Other dims are the ones of groups of elem for each channel
grp_dims = list(set(range(len(shape))) - set(channel_dims))
# Compute shape and size
channel_shape = [shape[i] for i in channel_dims]
grp_shape = [shape[i] for i in grp_dims]
channel_size = functools.reduce(lambda x, y: x * y, [1] + channel_shape)
grp_size = functools.reduce(lambda x, y: x * y, [1] + grp_shape)
# Prepare the permutation to ordonate dims and its reciproc
perm = channel_dims + grp_dims
antiperm = [
e[1]
for e in sorted([(j, i) for i, j in enumerate(perm)])
]
# Prepare index query that retrieve all dimensions
slice_len = max(len(channel_shape), 1)
idx = [slice(None)] * slice_len + [None] * (len(shape) - slice_len)
# Define the scalar product channel wise over grp dims
# Output have is extend to fit initial shape
def scalar_product(tensor1, tensor2):
return (
(tensor1 * tensor2)
.permute(perm) # permute as chan_dims, grp_dims
.contiguous()
.view(channel_size, grp_size) # view as 2 dims tensor
.sum(dim=1) # norm over group dims to have scalar
.view(*(channel_shape if channel_shape else [-1]))
[idx] # restore channel shape and extend on grp dims
.permute(antiperm) # Reverse permute to retrieve shape
.contiguous()
)
scalar_product.dim = grp_size
return scalar_product
class AdamS(AdamSRT):
def __init__(self,
params: typing.Iterator[torch.nn.Parameter],
lr: float=1e-3,
betas: typing.Tuple[float, float]=(0.9, 0.999),
eps: float=1e-8,
weight_decay: float=0,
channel_dims=None, # For customize the dimension for group of param
channel_wise: bool=True, # For default conv followed by BN invariance
):
super(AdamS, self).__init__(
params,
lr=lr,
betas=betas,
eps=eps,
weight_decay=weight_decay,
channel_dims=channel_dims,
channel_wise=channel_wise,
rt=False # Never do RT
) | [
"[email protected]"
] | |
afb02e97b0e4004e14d9c672ec1972d124005491 | f1bff0e018463081513c30258a67f238f5d08396 | /finalizing.py | b5105ab4545ed0f1ea81bf1665dca4f52c311693 | [] | no_license | bellyfat/speaker-Identification | cfcedd86ea634d5df19e560acea250c5b8dbc5d0 | 34c9ce12c6400f116e04a0d1be75e0e79228d599 | refs/heads/master | 2022-04-07T12:23:59.446618 | 2019-08-26T07:59:44 | 2019-08-26T07:59:44 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,290 | py | from recoder import *
import glob, os
import wave
import pylab
def training():
get_model()
print("training complete....")
# getting the audio
def getAudio():
rec = Recorder()
print("Start recording")
rec.start()
time.sleep(11)
print("Stop recording")
rec.stop()
print("Saving")
rec.save("test.wav")
#getAudio()
def get_file(path):
#path = 'wav_file'
name = os.path.basename(path)
filename, file_extension = os.path.splitext(name)
return filename
def graph_spectrogram(wav_file):
sound_info, frame_rate = get_wav_info(wav_file)
pylab.figure(num=None, figsize=(19, 12))
pylab.subplot(111)
pylab.title('spectrogram of %r' % wav_file)
pylab.specgram(sound_info, Fs=frame_rate)
pylab.savefig(get_file(wav_file)+".png")
def get_wav_info(wav_file):
wav = wave.open(wav_file, 'r')
frames = wav.readframes(-1)
sound_info = pylab.fromstring(frames, 'int16')
frame_rate = wav.getframerate()
wav.close()
return sound_info, frame_rate
def create_img():
graph_spectrogram("test.wav")
print("img creeated")
def delete_wav(file):
if os.path.exists(file):
os.remove(file)
print("file deleted")
else:
print("The file does not exist")
def delt():
file_name = "test.wav"
delete_wav(file_name)
| [
"[email protected]"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.