blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
288
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 684
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 25
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 128
12.7k
| extension
stringclasses 142
values | content
stringlengths 128
8.19k
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
4f40417e1b3d5e7727b23349015224819e159c34
|
d0efa2026b7ed22ff4f9aa76c27ae2474c30f26d
|
/test/test_payment_method_payment_schedules_request.py
|
452cc8ec29e8473df8ec6a5a8e0ae80b14d7d5f7
|
[] |
no_license
|
begum-akbay/Python
|
2075650e0ddbf1c51823ebd749742646bf221603
|
fe8b47e29aae609b7510af2d21e53b8a575857d8
|
refs/heads/master
| 2023-03-28T00:11:00.997194 | 2021-03-25T16:38:17 | 2021-03-25T16:38:17 | 351,499,957 | 0 | 0 | null | 2021-03-25T16:38:17 | 2021-03-25T16:15:16 |
Python
|
UTF-8
|
Python
| false | false | 1,270 |
py
|
# coding: utf-8
"""
Payment Gateway API Specification.
The documentation here is designed to provide all of the technical guidance required to consume and integrate with our APIs for payment processing. To learn more about our APIs please visit https://docs.firstdata.com/org/gateway. # noqa: E501
The version of the OpenAPI document: 21.1.0.20210122.001
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import unittest
import openapi_client
from openapi_client.models.payment_method_payment_schedules_request import PaymentMethodPaymentSchedulesRequest # noqa: E501
from openapi_client.rest import ApiException
class TestPaymentMethodPaymentSchedulesRequest(unittest.TestCase):
"""PaymentMethodPaymentSchedulesRequest unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testPaymentMethodPaymentSchedulesRequest(self):
"""Test PaymentMethodPaymentSchedulesRequest"""
# FIXME: construct object with mandatory attributes with example values
# model = openapi_client.models.payment_method_payment_schedules_request.PaymentMethodPaymentSchedulesRequest() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
|
[
"[email protected]"
] | |
8147523bcb0f515c279cdd116378042b0911fd7c
|
56e469a1bfd29004fa258a54668dfbbc4459663d
|
/python3-nltk-tutorial/src/lesson2.py
|
eea468d14140f4c269abb2552dfb9c86ded6c8b6
|
[] |
no_license
|
wind86/learning
|
bfce4a6795b58b27d0148b878299cacfe96aa26f
|
4449ba0eed0a8f803a2bb9fbd663faf43148f03a
|
refs/heads/master
| 2020-04-05T23:28:40.082439 | 2017-11-04T11:36:40 | 2017-11-04T11:36:40 | 83,236,426 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 634 |
py
|
'''
Created on Apr 09, 2017
Stop words with NLTK
https://www.youtube.com/watch?v=w36-U-ccajM&index=2&list=PLQVvvaa0QuDf2JswnfiGkliBInZnIC4HL
@author: ubuntu
'''
from nltk.corpus import stopwords
from nltk.tokenize import word_tokenize
example_sent = "This is a sample sentence, showing off the stop words filtration."
stop_words = set(stopwords.words('english'))
word_tokens = word_tokenize(example_sent)
filtered_sentence = [w for w in word_tokens if not w in stop_words]
filtered_sentence = []
for w in word_tokens:
if w not in stop_words:
filtered_sentence.append(w)
print(word_tokens)
print(filtered_sentence)
|
[
"[email protected]"
] | |
ed0a7a587fa699bb3e21e4116d874fda8a2c2d5c
|
3337e9150a743e0df2898528dd1e4dfac9730b25
|
/artemis/fileman/persistent_print.py
|
13b30ccc07235563122878b4675f41b117e62124
|
[] |
no_license
|
ml-lab/artemis
|
f3353cb462b06d64e1007010db94667b4703c90e
|
b4f5f627f1798aff90b845d70fd582142a9f76c8
|
refs/heads/master
| 2021-01-22T06:49:41.346341 | 2017-09-01T15:31:13 | 2017-09-01T15:31:13 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,171 |
py
|
import sys
from artemis.fileman.local_dir import get_artemis_data_path
from artemis.general.display import CaptureStdOut
__author__ = 'peter'
"""
Save Print statements:
Useful in ipython notebooks where you lose output when printing to the browser.
On advice from:
http://stackoverflow.com/questions/4675728/redirect-stdout-to-a-file-in-python
** Note this is no longer being used. Possibly delete
"""
_ORIGINAL_STDOUT = sys.stdout
_ORIGINAL_STDERR = sys.stderr
def capture_print(log_file_path = 'logs/dump/%T-log.txt', print_to_console=True):
"""
:param log_file_path: Path of file to print to, if (state and to_file). If path does not start with a "/", it will
be relative to the data directory. You can use placeholders such as %T, %R, ... in the path name (see format
filename)
:param print_to_console:
:param print_to_console: Also continue printing to console.
:return: The absolute path to the log file.
"""
local_log_file_path = get_artemis_data_path(log_file_path)
logger = CaptureStdOut(log_file_path=local_log_file_path, print_to_console=print_to_console)
logger.__enter__()
sys.stdout = logger
sys.stderr = logger
return local_log_file_path
def stop_capturing_print():
sys.stdout = _ORIGINAL_STDOUT
sys.stderr = _ORIGINAL_STDERR
def new_log_file(log_file_path = 'dump/%T-log', print_to_console = False):
"""
Just capture-print with different defaults - intended to be called from notebooks where
you don't want all output printed, but want to be able to see it with a link.
:param log_file_path: Path to the log file - %T is replaced with time
:param print_to_console: True to continue printing to console
"""
return capture_print(log_file_path=log_file_path, print_to_console=print_to_console)
def read_print():
return sys.stdout.read()
def reprint():
assert isinstance(sys.stdout, CaptureStdOut), "Can't call reprint unless you've turned on capture_print"
# Need to avoid exponentially growing prints...
current_stdout = sys.stdout
sys.stdout = _ORIGINAL_STDOUT
print read_print()
sys.stdout = current_stdout
|
[
"[email protected]"
] | |
c367f874817b32c6f63cee71858c33cc30dede45
|
5d0fe4a9e026234fe15e6c4380355061bb4dac64
|
/tests/functional/pages/profile/individual_enter_your_personal_details.py
|
53c55f143ecca632274757bbfec1c6127897fa4a
|
[
"MIT"
] |
permissive
|
uktrade/directory-tests
|
37e243862da8ac594cf1ea06ade714db5e1aba03
|
39ec6c26203580238e65566a472cbd80916e6726
|
refs/heads/master
| 2022-08-09T16:58:56.248982 | 2022-08-01T12:25:10 | 2022-08-01T12:25:10 | 71,367,747 | 4 | 3 |
MIT
| 2022-08-01T12:26:09 | 2016-10-19T14:48:57 |
Python
|
UTF-8
|
Python
| false | false | 1,572 |
py
|
# -*- coding: utf-8 -*-
"""Profile - Enter your personal details"""
from requests import Response, Session
from directory_tests_shared import PageType, Service, URLs
from tests.functional.utils.context_utils import Actor
from tests.functional.utils.request import (
Method,
check_response,
check_url,
make_request,
)
SERVICE = Service.PROFILE
NAME = "Enter your individual details"
TYPE = PageType.FORM
URL = URLs.PROFILE_ENROL_INDIVIDUAL_ENTER_YOUR_PERSONAL_DETAILS.absolute
EXPECTED_STRINGS = [
"Enter your personal details",
"First name",
"Last name",
"Job title",
"Phone number (optional)",
]
def go_to(session: Session) -> Response:
return make_request(Method.GET, URL, session=session)
def should_be_here(response: Response):
check_url(response, URL)
check_response(response, 200, body_contains=EXPECTED_STRINGS)
def submit(actor: Actor):
session = actor.session
headers = {"Referer": URL}
data = {
"csrfmiddlewaretoken": actor.csrfmiddlewaretoken,
"individual_user_enrolment_view-current_step": "personal-details",
"personal-details-given_name": actor.alias,
"personal-details-family_name": "AUTOMATED TESTS",
"personal-details-job_title": "DIT AUTOMATED TESTS",
"personal-details-phone_number": "0987654321",
"personal-details-terms_agreed": "on",
}
return make_request(
Method.POST,
URL,
session=session,
headers=headers,
files=data,
no_filename_in_multipart_form_data=True,
)
|
[
"[email protected]"
] | |
6e3f7646454551de97bff7229a6e4a0d163b2856
|
ca231a325e8f4c18d50d89ffa7eec993d4cc68c3
|
/codility/minimal_interger_not_ocurrs.py
|
4f9b4ac785566637a02e89df334015135a5bb335
|
[] |
no_license
|
HugoPorto/PythonCodes
|
8e1597999ccd34ffa86df5ae7e91111d77dc7a22
|
539ad6891cbd49a2c011349f843ab710aad2993a
|
refs/heads/master
| 2022-02-13T05:48:24.633750 | 2017-09-12T15:44:06 | 2017-09-12T15:44:06 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 853 |
py
|
# -*- coding:utf-8 -*-
def solution(A):
''' Solve it with Pigeonhole principle.
There are N integers in the input. So for the
first N+1 positive integers, at least one of
them must be missing.
'''
# We only care about the first N+1 positive integers.
# occurrence[i] is for the integer i+1.
occurrence = [False] * (len(A) + 1)
for item in A:
if 1 <= item <= len(A) + 1:
occurrence[item - 1] = True
# Find out the missing minimal positive integer.
for index in xrange(len(A) + 1):
if occurrence[index] == False:
return index + 1
raise Exception("Should never be here.")
return -1
assert solution([-1]) == 1
assert solution([1, 3, 6, 4, 1, 2]) == 5
assert solution([1]) == 2
assert solution([-1, 0, 1, 3]) == 2
assert solution([-1, 0, 1, 2]) == 3
|
[
"[email protected]"
] | |
12ccbb6a49dc123cca42202409efb9bb333f2c8c
|
a135e6aebb4b525d090272c107d9986ed50ec919
|
/grip/__init__.py
|
263bab0ee2649d40658a1dc3023c1a3e0b27c6d5
|
[
"MIT"
] |
permissive
|
wemersondev/grip
|
2a6740d32e045cfa6639936d6640555ea81d3b53
|
8a9d7caf2f8a7cf07d8b31e030600404b4c498c7
|
refs/heads/master
| 2021-01-24T03:26:40.071776 | 2018-02-25T19:58:13 | 2018-02-25T19:58:13 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,663 |
py
|
"""\
Grip
----
Render local readme files before sending off to GitHub.
:copyright: (c) 2014-2016 by Joe Esposito.
:license: MIT, see LICENSE for more details.
"""
__version__ = '4.4.0'
import sys
# Patch for Flask 11.0+ on Python 3 (pypy3)
if not hasattr(sys, 'exc_clear'):
sys.exc_clear = lambda: None
from .api import (
clear_cache, create_app, export, render_content, render_page, serve)
from .app import Grip
from .assets import GitHubAssetManager, ReadmeAssetManager
from .command import main
from .constants import (
DEFAULT_API_URL, DEFAULT_FILENAMES, DEFAULT_FILENAME, DEFAULT_GRIPHOME,
DEFAULT_GRIPURL, STYLE_ASSET_URLS_INLINE_FORMAT, STYLE_ASSET_URLS_RE,
STYLE_ASSET_URLS_SUB_FORMAT, STYLE_URLS_RE, STYLE_URLS_SOURCE,
SUPPORTED_EXTENSIONS, SUPPORTED_TITLES)
from .exceptions import AlreadyRunningError, ReadmeNotFoundError
from .readers import ReadmeReader, DirectoryReader, StdinReader, TextReader
from .renderers import ReadmeRenderer, GitHubRenderer, OfflineRenderer
__all__ = [
'__version__',
'DEFAULT_API_URL', 'DEFAULT_FILENAMES', 'DEFAULT_FILENAME',
'DEFAULT_GRIPHOME', 'DEFAULT_GRIPURL', 'STYLE_ASSET_URLS_INLINE_FORMAT',
'STYLE_ASSET_URLS_RE', 'STYLE_ASSET_URLS_SUB_FORMAT', 'STYLE_URLS_RE',
'STYLE_URLS_SOURCE', 'SUPPORTED_EXTENSIONS', 'SUPPORTED_TITLES',
'AlreadyRunningError', 'DirectoryReader', 'GitHubAssetManager',
'GitHubRenderer', 'Grip', 'OfflineRenderer', 'ReadmeNotFoundError',
'ReadmeAssetManager', 'ReadmeReader', 'ReadmeRenderer', 'StdinReader',
'TextReader',
'clear_cache', 'create_app', 'export', 'main', 'render_content',
'render_page', 'serve',
]
|
[
"[email protected]"
] | |
18df10d8b1c09bf6663d3185bce769d2c532a8f7
|
8c6816435093cb8e9e45593d3ffdd67028a011b6
|
/tests/test_is_palindrome_permutation.py
|
8afe1e3ee3486b7078ef4211c354a84d7504048b
|
[] |
no_license
|
Keeady/daily-coding-challenge
|
6ee74a5fe639a1f5b4753dd4848d0696bef15c28
|
31eebbf4c1d0eb88a00f71bd5741adf5e07d0e94
|
refs/heads/master
| 2020-03-27T07:58:05.713290 | 2019-03-08T15:03:05 | 2019-03-08T15:03:05 | 146,210,027 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 586 |
py
|
from String import is_palindrome_permutation
def test_is_palindrome_permutation():
str = 'Tact Coa'
assert True == is_palindrome_permutation.is_palindrome_permutation(str)
str = 'Tact oCoa'
assert True == is_palindrome_permutation.is_palindrome_permutation(str)
str = 'Tact Ca'
assert True == is_palindrome_permutation.is_palindrome_permutation(str)\
str = 'Duck Duck Go'
assert False == is_palindrome_permutation.is_palindrome_permutation(str)
str = 'tactcoapapa'
assert True == is_palindrome_permutation.is_palindrome_permutation(str)
|
[
"[email protected]"
] | |
83ebf96ed9d709453f2542d0921655ff7857ce40
|
caf135d264c4c1fdd320b42bf0d019e350938b2d
|
/04_Algorithms/Leetcode/L24_Swap Nodes in Pairs.py
|
eba7c0bc0a8f2006110eb82a2b8a1604aa56fe07
|
[] |
no_license
|
coolxv/DL-Prep
|
4243c51103bdc38972b8a7cbe3db4efa93851342
|
3e6565527ee8479e178852fffc4ccd0e44166e48
|
refs/heads/master
| 2022-12-31T22:42:20.806208 | 2020-10-23T10:19:19 | 2020-10-23T10:19:19 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 807 |
py
|
# Definition for singly-linked list.
# class ListNode:
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution:
def swapPairs(self, head: ListNode) -> ListNode:
if not head or not head.next:
return head
else:
first = head
second = first.next
afternode = second.next
head = second
head.next = first
first.next = afternode
while afternode and afternode.next:
prevnode = first
first,second = afternode,afternode.next
afternode = second.next
prevnode.next = second
second.next = first
first.next = afternode
return head
|
[
"[email protected]"
] | |
f851895535c8f43ebe64751ebaf22d82378cf452
|
1e0b77feea4aa08f2aa9ff63feddbc818428a350
|
/script/dedecms/dedecms_win_find_manage.py
|
77efcee0ec9487364ba143234992930c3a5232e7
|
[] |
no_license
|
cleanmgr112/Tentacle
|
838b915430166429da3fe4ed290bef85d793fae4
|
175e143fc08d1a6884a126b7da019ef126e116fa
|
refs/heads/master
| 2022-12-08T06:36:28.706843 | 2020-08-26T14:06:35 | 2020-08-26T14:06:35 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 4,769 |
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @author: 'orleven'
import itertools
from lib.utils.connect import ClientSession
from lib.core.enums import VUL_LEVEL
from lib.core.enums import VUL_TYPE
from lib.core.enums import SERVICE_PORT_MAP
from script import Script
class POC(Script):
def __init__(self, target=None):
self.service_type = SERVICE_PORT_MAP.WEB
self.name = 'dedecms win manager'
self.keyword = ['dedecms', 'win', 'manager']
self.info = 'Find manager for dedecms'
self.type = VUL_LEVEL.MEDIUM
self.level = VUL_LEVEL.INFO
self.refer = 'https://xz.aliyun.com/t/2064'
Script.__init__(self, target=target, service_type=self.service_type)
async def prove(self):
await self.get_url()
if self.base_url:
characters = "abcdefghijklmnopqrstuvwxyz0123456789_!#"
_data = {
"_FILES[mochazz][tmp_name]": "./{p}<</images/adminico.gif",
"_FILES[mochazz][name]": 0,
"_FILES[mochazz][size]": 0,
"_FILES[mochazz][type]": "image/gif"
}
path_list = list(set([
self.url_normpath(self.base_url, '/'),
self.url_normpath(self.base_url, '../dedecms/'),
self.url_normpath(self.url, 'dedecms/'),
self.url_normpath(self.url, '../dedecms/'),
]))
async with ClientSession() as session:
for path in path_list:
url = path + 'tags.php'
back_dir = ""
flag = 0
async with session.get(url=url) as res:
if res!=None and res.status ==200:
for num in range(1, 7):
if flag ==1 :
break
for pre in itertools.permutations(characters, num):
pre = ''.join(list(pre))
_data["_FILES[mochazz][tmp_name]"] = _data["_FILES[mochazz][tmp_name]"].format(p=pre)
async with session.post(url=url, data=_data) as r:
if r!=None:
if r.status == 405:
return
text = await r.text()
if "Upload filetype not allow !" not in text and r.status == 200:
flag = 1
back_dir = pre
_data["_FILES[mochazz][tmp_name]"] = "./{p}<</images/adminico.gif"
break
else:
_data["_FILES[mochazz][tmp_name]"] = "./{p}<</images/adminico.gif"
flag = 0
x = 0
for i in range(30):
if flag == 1:
x = i
break
for ch in characters:
if ch == characters[-1]:
flag = 1
x = i
break
_data["_FILES[mochazz][tmp_name]"] = _data["_FILES[mochazz][tmp_name]"].format(p=back_dir + ch)
async with session.post(url=url, data=_data) as r:
if r!=None:
if r.status == 405:
return
text = await r.text()
if "Upload filetype not allow !" not in text and r.status == 200:
back_dir += ch
_data["_FILES[mochazz][tmp_name]"] = "./{p}<</images/adminico.gif"
break
else:
_data["_FILES[mochazz][tmp_name]"] = "./{p}<</images/adminico.gif"
if x < 29 and flag ==1:
self.flag = 1
self.req.append({"url": path+ '/'+back_dir})
self.res.append({"info": path+'/'+ back_dir, "key": 'dede_manager'})
|
[
"[email protected]"
] | |
fd665f4ee1a672d4be5eb93dc6f5a52a578af62d
|
cf297c3d66189d2bd9fd8bfdadaeff3ebe6eee05
|
/WebBrickLibs/EventHandlers/tests/DummyRouter.py
|
aeb6ebe6d84716938a3c453ac113956c324b0805
|
[
"BSD-3-Clause"
] |
permissive
|
AndyThirtover/wb_gateway
|
0cb68a1f2caf7f06942f94b867ea02f4f8695492
|
69f9c870369085f4440033201e2fb263a463a523
|
refs/heads/master
| 2022-01-19T00:07:20.456346 | 2022-01-05T21:08:16 | 2022-01-05T21:08:16 | 14,687,973 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,228 |
py
|
# Copyright L.P.Klyne 2013
# Licenced under 3 clause BSD licence
# $Id: DummyRouter.py 2612 2008-08-11 20:08:49Z graham.klyne $
#
# Some test helpers for testing event handlers. Uses a SuperGlobal to save state.
#
import logging
import sys
import unittest
from EventLib.Event import Event, makeEvent
from EventHandlers.BaseHandler import *
# a dummy router to log data
class DummyRouter(object):
def __init__( self ):
self._log = logging.getLogger( "DummyRouter" )
self._subs = list()
self._unsubs = list()
self._pubs = list()
def logMe(self):
# write all stuff to the log
self._log.debug( "logMe" )
def subscribe(self, interval, handler, evtype=None, source=None):
self._subs.append( (interval,handler,evtype,source) )
self._log.debug( "subscribe: %i, %s, %s, %s" % (interval,handler,evtype,source) )
def unsubscribe(self, handler, evtype=None, source=None):
self._unsubs.append( (handler,evtype,source) )
self._log.debug( "unsubscribe: %s, %s, %s" % (handler,evtype,source) )
def publish(self, source, event):
self._pubs.append( (source,event) )
self._log.debug( "publish: %s, %s" % (source,event) )
|
[
"[email protected]"
] | |
5c22c50092409f049081caf5752155a483abf51f
|
6656c2acc607d269870d04d310e8a35ebbad8d3f
|
/lib/python2.7/dist-packages/pr2_mechanism_controllers/msg/_Odometer.py
|
3a8c8d7ac9f8a3aacb42386f5ce327b54bf4e2bf
|
[] |
no_license
|
uml-comp4510-5490/install
|
97bd8b643773e34f3956e40ac169729a45e34bbe
|
2897bf668177aced2e58cac18e86b109716c01df
|
refs/heads/master
| 2020-04-01T05:59:56.541628 | 2018-10-14T01:52:57 | 2018-10-14T01:52:57 | 152,929,072 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,835 |
py
|
# This Python file uses the following encoding: utf-8
"""autogenerated by genpy from pr2_mechanism_controllers/Odometer.msg. Do not edit."""
import sys
python3 = True if sys.hexversion > 0x03000000 else False
import genpy
import struct
class Odometer(genpy.Message):
_md5sum = "1f1d53743f4592ee455aa3eaf9019457"
_type = "pr2_mechanism_controllers/Odometer"
_has_header = False #flag to mark the presence of a Header object
_full_text = """float64 distance #total distance traveled (meters)
float64 angle #total angle traveled (radians)"""
__slots__ = ['distance','angle']
_slot_types = ['float64','float64']
def __init__(self, *args, **kwds):
"""
Constructor. Any message fields that are implicitly/explicitly
set to None will be assigned a default value. The recommend
use is keyword arguments as this is more robust to future message
changes. You cannot mix in-order arguments and keyword arguments.
The available fields are:
distance,angle
:param args: complete set of field values, in .msg order
:param kwds: use keyword arguments corresponding to message field names
to set specific fields.
"""
if args or kwds:
super(Odometer, self).__init__(*args, **kwds)
#message fields cannot be None, assign default values for those that are
if self.distance is None:
self.distance = 0.
if self.angle is None:
self.angle = 0.
else:
self.distance = 0.
self.angle = 0.
def _get_types(self):
"""
internal API method
"""
return self._slot_types
def serialize(self, buff):
"""
serialize message into buffer
:param buff: buffer, ``StringIO``
"""
try:
_x = self
buff.write(_get_struct_2d().pack(_x.distance, _x.angle))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize(self, str):
"""
unpack serialized message in str into this message instance
:param str: byte array of serialized message, ``str``
"""
try:
end = 0
_x = self
start = end
end += 16
(_x.distance, _x.angle,) = _get_struct_2d().unpack(str[start:end])
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
def serialize_numpy(self, buff, numpy):
"""
serialize message with numpy array types into buffer
:param buff: buffer, ``StringIO``
:param numpy: numpy python module
"""
try:
_x = self
buff.write(_get_struct_2d().pack(_x.distance, _x.angle))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize_numpy(self, str, numpy):
"""
unpack serialized message in str into this message instance using numpy for array types
:param str: byte array of serialized message, ``str``
:param numpy: numpy python module
"""
try:
end = 0
_x = self
start = end
end += 16
(_x.distance, _x.angle,) = _get_struct_2d().unpack(str[start:end])
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
_struct_I = genpy.struct_I
def _get_struct_I():
global _struct_I
return _struct_I
_struct_2d = None
def _get_struct_2d():
global _struct_2d
if _struct_2d is None:
_struct_2d = struct.Struct("<2d")
return _struct_2d
|
[
"[email protected]"
] | |
61f7e1110562904492dddc8c101dfdb04a9f0b79
|
2009735d19318a3ffe8e56687efb8e7688ebaf5a
|
/models/final_experiment_scripts/MIMIC/LoS/channel_wise_lstm.py
|
672a261444acf134a165a8bd320b316b08fb5d3f
|
[
"MIT"
] |
permissive
|
weikunzz/TPC-LoS-prediction
|
7bb9865e2f0fa3b461cb6fc23ed49996bfba59c1
|
30770f3e75d6a2a725c422b837f7ec864708f5d9
|
refs/heads/master
| 2023-04-06T10:19:12.284137 | 2021-04-08T14:06:53 | 2021-04-08T14:06:53 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 834 |
py
|
from eICU_preprocessing.split_train_test import create_folder
from models.run_lstm import BaselineLSTM
from models.initialise_arguments import initialise_lstm_arguments
from models.final_experiment_scripts.best_hyperparameters import best_cw_lstm
if __name__=='__main__':
c = initialise_lstm_arguments()
c['exp_name'] = 'ChannelwiseLSTM'
c['dataset'] = 'MIMIC'
c = best_cw_lstm(c)
log_folder_path = create_folder('models/experiments/final/MIMIC/LoS', c.exp_name)
channelwise_lstm = BaselineLSTM(config=c,
n_epochs=c.n_epochs,
name=c.exp_name,
base_dir=log_folder_path,
explogger_kwargs={'folder_format': '%Y-%m-%d_%H%M%S{run_number}'})
channelwise_lstm.run()
|
[
"[email protected]"
] | |
0d7102130db2739bb99c1c008e466724c33ed4b7
|
583d03a6337df9f1e28f4ef6208491cf5fb18136
|
/dev4qx/messagepush/task/subscribe.py
|
be4fdbc0be1cdda9998b0a83fc02a876b7637185
|
[] |
no_license
|
lescpsn/lescpsn
|
ece4362a328f009931c9e4980f150d93c4916b32
|
ef83523ea1618b7e543553edd480389741e54bc4
|
refs/heads/master
| 2020-04-03T14:02:06.590299 | 2018-11-01T03:00:17 | 2018-11-01T03:00:17 | 155,309,223 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,146 |
py
|
# -*- coding: utf-8 -*-
import logging
import tornado
from core.subscribe import subscrible_direct
request_log = logging.getLogger("ms.request")
class SubscribeTask(tornado.ioloop.PeriodicCallback):
def __init__(self, application, callback_time):
super(SubscribeTask, self).__init__(self.run, callback_time)
self.application = application
self.master = self.application.sentinel.master_for('madeira')
@tornado.gen.coroutine
def run(self):
# TODO: try
try:
r = self.master
# TODO: return types is empty
types = r.smembers('types')
if types is None:
self.finish('type空')
elif types is not None:
msg_type = r.spop('types')
func = self.application.config['subscrible'].get(msg_type)
request_log.info('GET TASK_MESSAGE %s %s %s', types, msg_type, func)
if func == 'direct':
yield subscrible_direct(self.application, msg_type)
except:
request_log.exception('FAIL')
|
[
"[email protected]"
] | |
fbefcb0112cca43cc7b8a399c2dde0d4ca329f56
|
182c651a9b00b9b4d80e6d51ae574cb793958cd6
|
/widgets/stylesheet/stylesheet.py
|
f9c37d6886961ae308ad487c3780ee79e8573ba3
|
[] |
no_license
|
eudu/pyqt-examples
|
c61a7108e1fbfcf2cd918a0f99e9a5a90a3f305c
|
8e533b7b3c5e9bbe0617ef1ecb9b169dd216c181
|
refs/heads/master
| 2020-03-16T01:23:19.573347 | 2018-05-06T20:20:57 | 2018-05-06T20:20:57 | 132,438,940 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,546 |
py
|
#!/usr/bin/python3
#############################################################################
##
## Copyright (C) 2013 Riverbank Computing Limited
## Copyright (C) 2010 Hans-Peter Jansen <[email protected]>.
## Copyright (C) 2010 Nokia Corporation and/or its subsidiary(-ies).
## All rights reserved.
##
## This file is part of the examples of PyQt.
##
## $QT_BEGIN_LICENSE:BSD$
## You may use this file under the terms of the BSD license as follows:
##
## "Redistribution and use in source and binary forms, with or without
## modification, are permitted provided that the following conditions are
## met:
## * Redistributions of source code must retain the above copyright
## notice, this list of conditions and the following disclaimer.
## * Redistributions in binary form must reproduce the above copyright
## notice, this list of conditions and the following disclaimer in
## the documentation and/or other materials provided with the
## distribution.
## * Neither the name of Nokia Corporation and its Subsidiary(-ies) nor
## the names of its contributors may be used to endorse or promote
## products derived from this software without specific prior written
## permission.
##
## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
## "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
## LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
## A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
## OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
## SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
## LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
## DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
## THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
## (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
## OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE."
## $QT_END_LICENSE$
##
###########################################################################
from PyQt5.QtWidgets import QApplication, QLabel, QMainWindow, QMessageBox
import stylesheet_rc
from ui_mainwindow import Ui_MainWindow
from stylesheeteditor import StyleSheetEditor
class MainWindow(QMainWindow):
def __init__(self):
super(MainWindow, self).__init__()
self.ui = Ui_MainWindow()
self.ui.setupUi(self)
self.ui.nameLabel.setProperty('class', 'mandatory QLabel')
self.styleSheetEditor = StyleSheetEditor(self)
self.statusBar().addWidget(QLabel("Ready"))
self.ui.exitAction.triggered.connect(QApplication.instance().quit)
self.ui.aboutQtAction.triggered.connect(QApplication.instance().aboutQt)
def on_editStyleAction_triggered(self):
self.styleSheetEditor.show()
self.styleSheetEditor.activateWindow()
def on_aboutAction_triggered(self):
QMessageBox.about(self, "About Style sheet",
"The <b>Style Sheet</b> example shows how widgets can be "
"styled using "
"<a href=\"http://doc.qt.digia.com/4.5/stylesheet.html\">Qt "
"Style Sheets</a>. Click <b>File|Edit Style Sheet</b> to pop "
"up the style editor, and either choose an existing style "
"sheet or design your own.")
if __name__ == '__main__':
import sys
app = QApplication(sys.argv)
window = MainWindow()
window.show()
sys.exit(app.exec_())
|
[
"[email protected]"
] | |
1f77afe28d0cb282cba9d56049db486e0e6d1c6f
|
d39bf3e0141f39752b40ca420ec7d90204ad4219
|
/tests/test_day_02.py
|
213be9a1bfceacdaa6696775d1b77d416bee4eb0
|
[] |
no_license
|
jasonbrackman/advent_of_code_2017
|
33260d98e1c348b8d249eabe425783568c3db494
|
a50e0cf9b628da96cb365744027d1a800557d1c9
|
refs/heads/master
| 2022-02-18T18:06:58.119383 | 2019-09-12T05:00:02 | 2019-09-12T05:00:02 | 112,784,403 | 0 | 2 | null | null | null | null |
UTF-8
|
Python
| false | false | 957 |
py
|
"""
5 1 9 5
7 5 3
2 4 6 8
The first row's largest and smallest values are 9 and 1, and their difference is 8.
The second row's largest and smallest values are 7 and 3, and their difference is 4.
The third row's difference is 6.
In this example, the spreadsheet's checksum would be 8 + 4 + 6 = 18.
What is the checksum for the spreadsheet in your puzzle input?
"""
import pytest
from .. import day_02
@pytest.mark.parametrize('param, expect', [('5 1 9 5', 8),
('7 5 3', 4),
('2 4 6 8', 6)])
def test_min_max_diff(param, expect):
assert day_02.min_max_dif(param) == expect
@pytest.mark.parametrize('param, expect', [('5 9 2 8', 4),
('9 4 7 3', 3),
('3 8 6 5', 2)])
def test_get_divisible_result(param, expect):
assert day_02.get_divisible_result(param) == expect
|
[
"[email protected]"
] | |
9e8347f3ee2a079d974e2bdbee6c34880736fe6e
|
d8a9b88f4087ebfe97b462e589071222e2261e47
|
/520. Detect Capital.py
|
05ac6786a14cb0b3bec7c1c660096e885cf8269c
|
[] |
no_license
|
rohitpatwa/leetcode
|
a7a4e8a109ace53a38d613b5f898dd81d4771b1b
|
f4826763e8f154cac9134d53b154b8299acd39a8
|
refs/heads/master
| 2021-07-07T12:40:30.424243 | 2021-03-31T00:21:30 | 2021-03-31T00:21:30 | 235,003,084 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 379 |
py
|
# Check if word is all upper or all lower. elif check if word is one capital and all lower. Else return False.
class Solution:
def detectCapitalUse(self, word: str) -> bool:
if word==word.upper() or word==word.lower():
return True
if word[0] == word[0].upper() and word[1:]==word[1:].lower():
return True
return False
|
[
"[email protected]"
] | |
871132389561d6b5b48a9d5e7d876bc1654d5ee6
|
f2889a13368b59d8b82f7def1a31a6277b6518b7
|
/30.py
|
75414b4d6be012ed0fdb069967fc9cd91daa06d6
|
[] |
no_license
|
htl1126/leetcode
|
dacde03de5c9c967e527c4c3b29a4547154e11b3
|
c33559dc5e0bf6879bb3462ab65a9446a66d19f6
|
refs/heads/master
| 2023-09-01T14:57:57.302544 | 2023-08-25T15:50:56 | 2023-08-25T15:50:56 | 29,514,867 | 7 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,038 |
py
|
# ref: https://discuss.leetcode.com/topic/10665/concise-python-code-using
# -defaultdict
import collections
import copy
class Solution(object):
def findSubstring(self, s, words):
"""
:type s: str
:type words: List[str]
:rtype: List[int]
"""
if not words or not words[0]:
return None
wl, total, strlen, res = (len(words[0]), len(words) * len(words[0]),
len(s), [])
word_ctr = collections.Counter(words)
for i in xrange(wl):
j = i
count = copy.copy(word_ctr)
while j < strlen - wl + 1:
count[s[j:j + wl]] -= 1
while count[s[j:j + wl]] < 0:
count[s[i:i + wl]] += 1
i += wl
j += wl
if j - i == total:
res += i,
return res
if __name__ == '__main__':
sol = Solution()
print sol.findSubstring('barfoothefoobarman', ['foo', 'bar'])
|
[
"[email protected]"
] | |
6c8ac1427f142513c13bd7794b07ab96a6f4c884
|
751cf52d62dba7d88387fc5734d6ee3954054fc2
|
/opencv/experiments_raw/contourExperiments/contourExperiment.py
|
25321959930231caf2a2607e82fe2c8687768cfe
|
[
"MIT"
] |
permissive
|
nooralight/lab-computer-vision
|
70a4d84a47a14dc8f5e9796ff6ccb59d4451ff27
|
0c3d89b35d0468d4d3cc5ce2653b3f0ac82652a9
|
refs/heads/master
| 2023-03-17T12:45:22.700237 | 2017-07-11T22:17:09 | 2017-07-11T22:17:09 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,364 |
py
|
"""
Display three views:
original frame
mask
resultant frame
whenever user clicks in original frame, color is specified
this color becomes the new mask color
The system then creates a contour around the largest object of that color on the screen, and a crosshair follows after that object
"""
import cv2
import numpy as np
color = np.array([0,0,0])
# CRAN = 20
# CRanArr = np.array([20, 10, 10])
# try (0, 50, 10)
def findHSV(bgr):
"convert BGR array to HSV"
bgr = np.uint8([[bgr]])
hsv = cv2.cvtColor(bgr, cv2.COLOR_BGR2HSV)
return hsv
def drawXHair(img, y, x):
# 20 pt radius
color = (0,0,255)
# color = tuple(col[0][0])
# print type(col)
# print(col)
radius = 20
thickn = 2
cv2.circle(img, (int(x), int(y)), 20, color, thickn)
cv2.line(img, (x-radius, y), (x+radius, y), color, thickn)
cv2.line(img, (x, y-radius), (x, y+radius), color, thickn)
def colorSelect(event, x, y, flags, param):
global color
if event == cv2.EVENT_LBUTTONUP:
color_rgb = frame[y, x, 0:3]
color = findHSV(color_rgb)
print(color)
def doNothing(x):
pass
cap = cv2.VideoCapture(0)
cv2.namedWindow('frame')
cv2.setMouseCallback('frame', colorSelect)
cv2.namedWindow('trackbars')
cv2.createTrackbar('H', 'trackbars', 0, 50, doNothing)
cv2.createTrackbar('S', 'trackbars', 50, 50, doNothing)
cv2.createTrackbar('V', 'trackbars', 10, 50, doNothing)
while(1):
dh = cv2.getTrackbarPos('H', 'trackbars')
ds = cv2.getTrackbarPos('S', 'trackbars')
dv = cv2.getTrackbarPos('V', 'trackbars')
CRanArr = np.array([dh, ds, dv])
# take each frame
_, frame = cap.read()
print(np.shape(frame))
# convert BGR to HSV
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
# define range of blue color in HSV
# lower_color = color + np.array([-CRAN, -CRAN, -CRAN])
# upper_color = color + np.array([CRAN, CRAN, CRAN])
lower_color = color - CRanArr
upper_color = color + CRanArr
# print lower_color , '|' , upper_color
# threshold the HSV image to get only blue colors
mask = cv2.inRange(hsv, lower_color, upper_color)
# Noise removal experimentation
kernel = np.ones((20,20), np.uint8)
mask = cv2.morphologyEx(mask, cv2.MORPH_CLOSE, kernel)
# mask = cv2.erode(mask, kernel, iterations = 1)
# mask = cv2.dilate(mask, kernel, iterations=5)
ret, thresh = cv2.threshold(mask, 127, 255, 0)
im2, contours, hierarchy = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
# cv2.drawContours(mask, contours, -1, 150, 3)
area = 0
largest_contour = 0
for i in xrange(len(contours)):
if cv2.contourArea(contours[i])>area:
largest_contour = i
cv2.drawContours(mask, contours, largest_contour, 150, 3)
print len(contours)
if len(contours)>0:
M = cv2.moments(contours[largest_contour])
if M['m00']>0:
cx = int(M['m10']/(M['m00']))
cy = int(M['m01']/(M['m00']))
print cx ,'|', cy
drawXHair(frame, cy, cx)
print(color)
# bitwise-AND mask and original image
res = cv2.bitwise_and(frame, frame, mask= mask)
cv2.imshow('frame', frame)
cv2.imshow('mask', mask)
cv2.imshow('res', res)
k = cv2.waitKey(5) & 0xFF
if k == 27:
break
cv2.destroyAllWindows()
|
[
"[email protected]"
] | |
8dc31ef220e3a12803bb906e33892e6ea9a93a18
|
b00873d36e44128ce30623da0ee3b556e4e3d7e7
|
/solutions/solution725.py
|
534be36c9402b8115b72bfe0c67a417dff55304b
|
[
"MIT"
] |
permissive
|
Satily/leetcode_python_solution
|
b4aadfd1998877b5086b5423c670750bb422b2c8
|
3f05fff7758d650469862bc28df9e4aa7b1d3203
|
refs/heads/master
| 2021-07-18T07:53:10.387182 | 2021-07-17T06:30:09 | 2021-07-17T06:30:09 | 155,074,789 | 3 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,157 |
py
|
from data_structure import ListNode, build_link_list, flatten_link_list
class Solution:
def splitListToParts(self, root, k):
"""
:type root: ListNode
:type k: int
:rtype: List[ListNode]
"""
def split(h, lh):
if h is not None:
p = h
for _ in range(lh - 1):
p = p.next
q = p.next
p.next = None
return q, h
else:
return None, None
lr, p = 0, root
while p is not None:
p, lr = p.next, lr + 1
n, r = lr // k, lr % k
result = []
for i in range(k):
l = n
if i < r:
l += 1
root, head = split(root, l)
result.append(head)
return result
if __name__ == "__main__":
inputs = [
# ([1, 2, 3], 5),
([1, 2, 3, 4, 5, 6, 7, 8, 9, 10], 3),
]
for root_list, k in inputs:
root = build_link_list(root_list)
result = [flatten_link_list(head) for head in Solution().splitListToParts(root, k)]
print(result)
|
[
"[email protected]"
] | |
c3784b117e770c6c6948e80849e5bd8cf0457254
|
7727187a009e4b9c46c2fe06609372ec8814cd23
|
/test/test_augment_data.py
|
d83bec586121132d679dc61a95c78929cece6eea
|
[] |
no_license
|
govtmirror/freemix-akara
|
ebf204554f4effc0543e60083698f2ea012413b8
|
1d10c3f02afbd4268852e2c52afdf77809176bdd
|
refs/heads/master
| 2021-01-12T07:47:08.183429 | 2014-06-05T18:53:56 | 2014-06-05T18:53:56 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 809 |
py
|
import os
import urllib, urllib2
from urllib2 import urlopen
from freemix_akara import __version__
from server_support import server
RESOURCE_DIR = os.path.join(os.path.dirname(__file__), "resource")
def test_augment():
import simplejson
url = server() + "augment.freemix.json"
req = urllib2.Request(url)
data = open(os.path.join(RESOURCE_DIR, "augment", "augment_test1.js")).read()
response = urllib2.urlopen(req, data)
results = simplejson.load(response)
assert "items" in results
def test_mix():
import simplejson
url = server() + "mix.freemix.json"
req = urllib2.Request(url)
data = open(os.path.join(RESOURCE_DIR, "mix", "mix.js")).read()
response = urllib2.urlopen(req, data)
results = simplejson.load(response)
assert "items" in results
|
[
"[email protected]"
] | |
f1f4be0600c0a96312d2b00339681c2c5efff41b
|
9743d5fd24822f79c156ad112229e25adb9ed6f6
|
/xai/brain/wordbase/otherforms/_cackles.py
|
b56545b1c6dceb5e279e87bc0ba44c4f57263de2
|
[
"MIT"
] |
permissive
|
cash2one/xai
|
de7adad1758f50dd6786bf0111e71a903f039b64
|
e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6
|
refs/heads/master
| 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 222 |
py
|
#calss header
class _CACKLES():
def __init__(self,):
self.name = "CACKLES"
self.definitions = cackle
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.basic = ['cackle']
|
[
"[email protected]"
] | |
0f7ddbc55809f101e6c51e745fc682ec6439b74a
|
edbf8601ae771031ad8ab27b19c2bf450ca7df76
|
/283-Move-Zeroes/MoveZeroes.py
|
b7e24b42230eae378975aceeeb96569feb6628fa
|
[] |
no_license
|
gxwangdi/Leetcode
|
ec619fba272a29ebf8b8c7f0038aefd747ccf44a
|
29c4c703d18c6ff2e16b9f912210399be427c1e8
|
refs/heads/master
| 2022-07-02T22:08:32.556252 | 2022-06-21T16:58:28 | 2022-06-21T16:58:28 | 54,813,467 | 3 | 2 | null | 2022-06-21T16:58:29 | 2016-03-27T05:02:36 |
Java
|
UTF-8
|
Python
| false | false | 548 |
py
|
class Solution(object):
def moveZeroes(self, nums):
"""
:type nums: List[int]
:rtype: None Do not return anything, modify nums in-place instead.
"""
if not nums or len(nums)<2:
return
slow=0
fast=0
size=len(nums)
while fast < size:
if nums[fast]==0:
fast+=1
continue
nums[slow] = nums[fast]
slow+=1
fast+=1
while slow < size:
nums[slow] =0
slow+=1
|
[
"[email protected]"
] | |
f8ba0392696152c9b0153c42e7340ebb511a2e0a
|
32bfc07c9661b0820e525158ef9a03c1d3256ecd
|
/Week 2/mysite-link1/django-polls/polls/migrations/0001_initial.py
|
8f55db1363fc94de11712f49c0f9b7f97cca9bdc
|
[] |
no_license
|
Aktoty00/BFDjango
|
c4d42d0f8d11a14813dbf2d67830531193b81417
|
95e28e9c56b1c1a3a286a1919b942512efdd585a
|
refs/heads/master
| 2021-09-25T15:35:16.722971 | 2020-04-19T11:43:27 | 2020-04-19T11:43:27 | 234,919,812 | 0 | 0 | null | 2021-09-22T18:39:00 | 2020-01-19T15:16:34 |
Python
|
UTF-8
|
Python
| false | false | 1,178 |
py
|
# Generated by Django 2.1.7 on 2020-01-22 15:19
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Choice',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('choice_text', models.CharField(max_length=200)),
('votes', models.IntegerField(default=0)),
],
),
migrations.CreateModel(
name='Question',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('question_text', models.CharField(max_length=200)),
('pub_date', models.DateTimeField(verbose_name='date published')),
],
),
migrations.AddField(
model_name='choice',
name='question',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='django-polls.polls.Question'),
),
]
|
[
"[email protected]"
] | |
a789ad6f90b611c1ab8c53baa204e144607c2690
|
e7dfccc8136776443461b6580752c7f0f50556b3
|
/matrix_webhook/__main__.py
|
18d4fccae9584210927760e0ca5fa6e165449fa1
|
[
"BSD-2-Clause"
] |
permissive
|
nim65s/matrix-webhook
|
f223e404922860dfae711b3017664b976fd9d4e2
|
ad74f632c630a748577ba201c5e89dfa02eece4d
|
refs/heads/master
| 2023-09-01T01:02:28.097429 | 2023-08-01T11:09:14 | 2023-08-01T11:09:14 | 171,114,171 | 97 | 32 |
NOASSERTION
| 2023-09-06T13:53:04 | 2019-02-17T11:29:31 |
Python
|
UTF-8
|
Python
| false | false | 334 |
py
|
"""Matrix Webhook module entrypoint."""
import logging
from . import app, conf
def main():
"""Start everything."""
log_format = "%(asctime)s - %(name)s - %(lineno)d - %(levelname)s - %(message)s"
logging.basicConfig(level=50 - 10 * conf.VERBOSE, format=log_format)
app.run()
if __name__ == "__main__":
main()
|
[
"[email protected]"
] | |
15c83f62c9fd56c469799186fc20478de46552d4
|
054eefaa17157b32869ea986347b3e539d2bf06b
|
/big_o_coding/Blue_13/Homework/day_12_eko_spoj.py
|
23dcd5c8db290cfe538fb92b5da5ca59e51c778e
|
[] |
no_license
|
baocogn/self-learning
|
f2cb2f45f05575b6d195fc3c407daf4edcfe7d0e
|
f50a3946966354c793cac6b28d09cb5dba2ec57a
|
refs/heads/master
| 2021-07-12T23:32:14.728163 | 2019-02-10T14:24:46 | 2019-02-10T14:24:46 | 143,170,276 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 389 |
py
|
import sys
input = sys.stdin.readline
N, M = map(int, input().split())
heights = list(map(int, input().split()))
def getCutted(height):
return sum(max(0, h - height) for h in heights)
left = 0
right = max(heights)
res = 0
while (left <= right):
mid = left + (right - left) // 2
if getCutted(mid) >= M:
res = mid
left = mid + 1
else:
right = mid - 1
print(res)
|
[
"[email protected]"
] | |
064bb76c7c62f304ae205b982893d13f9243fac9
|
1c4110a0bdbb888fd7a82579810cda2c73b52dba
|
/20210715 Pycharm/Pycharm/venv/Lib/site-packages/bamboo/common/colours.py
|
389df001c9cd8b21e7310bebdda8bb08960fbeee
|
[] |
no_license
|
DrillND/python
|
d09786e2937a10c9c67170826131b8ee204e0b37
|
f6aa1d4d29e4519f89a63af4c3c8f83ed60630ea
|
refs/heads/main
| 2023-06-19T11:51:14.307597 | 2021-07-16T07:18:52 | 2021-07-16T07:18:52 | 355,095,502 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 205 |
py
|
class bcolours:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
|
[
"[email protected]"
] | |
a44a8301d9cf018c0b5ff5bc64748a1262eda343
|
b9eb496c4551fd091954675a61382636fc68e715
|
/src/ABC1xx/ABC14x/ABC140/ABC140B.py
|
8357fa46e4c56c5d78f10b2adcc2a1f6074cfb70
|
[] |
no_license
|
kttaroha/AtCoder
|
af4c5783d89a61bc6a40f59be5e0992980cc8467
|
dc65ce640954da8c2ad0d1b97580da50fba98a55
|
refs/heads/master
| 2021-04-17T16:52:09.508706 | 2020-11-22T05:45:08 | 2020-11-22T05:45:08 | 249,460,649 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 339 |
py
|
def main():
_ = int(input())
A = list(map(int, input().split()))
B = list(map(int, input().split()))
C = list(map(int, input().split()))
prev = -100
s = 0
for a in A:
s += B[a-1]
if a - prev == 1:
s += C[prev-1]
prev = a
print(s)
if __name__ == '__main__':
main()
|
[
"[email protected]"
] | |
2b67e235a3490fd768faa695ff32d76ed01f6f61
|
a6bd25c3508d45134436bc3a39345e2565debec0
|
/Assignment1/urls.py
|
1fec19327932a4d972c8807b1a1ec09c09df8b86
|
[] |
no_license
|
gitNikhilsahu/Django-Business-Employee-Management
|
2a869dbf9c0aac078662b09db708b7c03b372c5c
|
e1c6d1588561abf193d70ca4cb91c912c3ea66d1
|
refs/heads/master
| 2022-12-17T07:58:25.655611 | 2020-09-25T08:43:18 | 2020-09-25T08:43:18 | 298,517,130 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 478 |
py
|
from django.contrib import admin
from django.urls import path, include
from django.conf import settings
from django.conf.urls.static import static
urlpatterns = [
path('', include('business.urls')),
path('employee/', include('employee.urls')),
path('admin/', admin.site.urls),
]
if settings.DEBUG:
urlpatterns += static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
|
[
"[email protected]"
] | |
bf63fe697c539ec382672dc75ea18cf93dae240b
|
71d4cc88c68f957a37a2db8234f8178ad2c1c769
|
/graphgallery/data/npz_dataset.py
|
8e769181796d82f0fa694a5ba370dd41a5b82c3e
|
[
"MIT"
] |
permissive
|
MLDL/GraphGallery
|
3159e0b8ddb1d2fa6b7cea4a27ba075f97db0a03
|
2474622286f135ca693c62981f5a4c4b31bcd2e6
|
refs/heads/master
| 2022-12-28T03:03:48.516408 | 2020-09-26T16:08:05 | 2020-09-26T16:08:05 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,080 |
py
|
import os
import zipfile
import os.path as osp
import numpy as np
from graphgallery.data import Dataset
from graphgallery.data.io import makedirs, files_exist, download_file
from graphgallery.data.graph import Graph, load_dataset
_DATASETS = ('citeseer', 'cora', 'cora_ml', 'cora_full', 'amazon_cs', 'amazon_photo',
'coauthor_cs', 'coauthor_phy', 'polblogs', 'pubmed', 'flickr', 'flickr_sdm', 'blogcatalog')
class NPZDataset(Dataset):
github_url = "https://raw.githubusercontent.com/EdisonLeeeee/GraphData/master/datasets/npz/{}.npz"
supported_datasets = _DATASETS
def __init__(self, name, root=None, url=None, standardize=False, verbose=True):
if not name.lower() in self.supported_datasets:
print(f"Dataset not Found. Using custom dataset: {name}.\n")
super().__init__(name, root, verbose)
self._url = url
self.download_dir = osp.join(self.root, "npz")
self.standardize = standardize
makedirs(self.download_dir)
self.download()
self.process()
def download(self):
if files_exist(self.raw_paths):
print(f"Downloaded dataset files have existed.")
if self.verbose:
self.print_files(self.raw_paths)
return
self.print_files(self.raw_paths)
print("Downloading...")
download_file(self.raw_paths, self.urls)
if self.verbose:
self.print_files(self.raw_paths)
print("Downloading completed.")
def process(self):
print("Processing...")
graph = load_dataset(
self.raw_paths[0]).eliminate_selfloops().to_undirected()
if self.standardize:
graph = graph.standardize()
self.graph = graph
print("Processing completed.")
@property
def url(self):
if isinstance(self._url, str):
return self._url
else:
return self.github_url.format(self.name)
@property
def raw_paths(self):
return [f"{osp.join(self.download_dir, self.name)}.npz"]
|
[
"[email protected]"
] | |
59c65295bbf233c1466985d1aa33bafac20aa3fe
|
15f321878face2af9317363c5f6de1e5ddd9b749
|
/solutions_python/Problem_95/1152.py
|
a5a2f7fcec24d2ae43109115e3074698189fdd34
|
[] |
no_license
|
dr-dos-ok/Code_Jam_Webscraper
|
c06fd59870842664cd79c41eb460a09553e1c80a
|
26a35bf114a3aa30fc4c677ef069d95f41665cc0
|
refs/heads/master
| 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,234 |
py
|
#!/usr/bin/env python
#-*- coding:utf-8 -*-
from string import ascii_lowercase
from pprint import pprint
import sys, os
sample_googlerese = """ejp mysljylc kd kxveddknmc re jsicpdrysi
rbcpc ypc rtcsra dkh wyfrepkym veddknkmkrkcd
de kr kd eoya kw aej tysr re ujdr lkgc jv
"""
sample_answer = """our language is impossible to understand
there are twenty six factorial possibilities
so it is okay if you want to just give up
"""
char_map = dict()
for c in ascii_lowercase:
char_map[c] = ""
char_map['q'] = 'z'
char_map[' '] = ' '
def make_char_mapping():
for a,g in zip(sample_answer, sample_googlerese):
if g in ascii_lowercase:
char_map[g] = a
for c in ascii_lowercase:
if not c in char_map.values():
char_map['z'] = c
def decode(input_str):
output = list()
for c in input_str:
if not c == '\n':
output.append(char_map[c])
return ''.join(output)
if __name__ == "__main__":
make_char_mapping()
filename = sys.argv[1]
template = "Case #%d: %s"
with open(filename) as r:
casenum = int(r.readline())
for i in xrange(casenum):
input_str = r.readline()
print template % (i + 1, decode(input_str))
|
[
"[email protected]"
] | |
77dcd58897fa39cc6326e1fc2178a0adc30ff87b
|
cbdef2e8ed259adc4653ade34db12d8bcc0cea9f
|
/dominion/cards/Wizard_Student.py
|
fa8a42e3234ec5e594f4503326b3c3dd61788893
|
[] |
no_license
|
dwagon/pydominion
|
8dd5afef8ec89c63ade74c4ae6c7473cd676799f
|
545709f0a41529de74f33aa83b106c456900fa5b
|
refs/heads/main
| 2023-08-29T10:02:26.652032 | 2023-08-23T02:25:00 | 2023-08-23T02:25:00 | 18,776,204 | 1 | 0 | null | 2023-08-23T02:25:02 | 2014-04-14T20:49:28 |
Python
|
UTF-8
|
Python
| false | false | 3,568 |
py
|
#!/usr/bin/env python
import unittest
from dominion import Game, Card, Piles
###############################################################################
class Card_Student(Card.Card):
def __init__(self):
Card.Card.__init__(self)
self.cardtype = [
Card.CardType.ACTION,
Card.CardType.WIZARD, # pylint: disable=no-member
Card.CardType.LIAISON,
]
self.base = Card.CardExpansion.ALLIES
self.cost = 3
self.name = "Student"
self.actions = 1
self.desc = """+1 Action;
You may rotate the Wizards;
Trash a card from your hand. If it's a Treasure, +1 Favor and put this onto your deck."""
def special(self, game, player):
opt = player.plr_choose_options(
"Do you want to rotate the Wizards?",
("Don't change", False),
("Rotate", True),
)
if opt:
game["Wizards"].rotate()
trashed = player.plr_trash_card(
prompt="Pick a card to trash", num=1, force=True
)
if trashed and trashed[0].isTreasure():
player.favors.add(1)
player.piles[Piles.PLAYED].remove(self)
player.add_card(self, "deck")
###############################################################################
class TestStudent(unittest.TestCase):
def setUp(self):
self.g = Game.TestGame(numplayers=1, initcards=["Wizards"], use_liaisons=True)
self.g.start_game()
self.plr = self.g.player_list()[0]
def test_play_trash_treas(self):
"""Play a student - don't rotate, but trash treasure"""
while True:
card = self.g["Wizards"].remove()
if card.name == "Student":
break
self.plr.piles[Piles.HAND].set("Copper", "Silver", "Gold", "Estate")
self.plr.add_card(card, Piles.HAND)
self.plr.test_input = ["Don't change", "Trash Copper"]
favors = self.plr.favors.get()
self.plr.play_card(card)
self.assertIn("Copper", self.g.trashpile)
self.assertIn("Student", self.plr.piles[Piles.DECK])
self.assertEqual(self.plr.favors.get(), favors + 1)
def test_play_trash_non_treas(self):
"""Play a student - don't rotate, but trash a non treasure"""
while True:
card = self.g["Wizards"].remove()
if card.name == "Student":
break
self.plr.piles[Piles.HAND].set("Copper", "Silver", "Gold", "Estate")
self.plr.add_card(card, Piles.HAND)
self.plr.test_input = ["Don't change", "Trash Estate"]
favors = self.plr.favors.get()
self.plr.play_card(card)
self.assertIn("Estate", self.g.trashpile)
self.assertNotIn("Student", self.plr.piles[Piles.DECK])
self.assertEqual(self.plr.favors.get(), favors)
def test_play_trash_rotate(self):
"""Play a student - rotate, and trash a non treasure"""
while True:
card = self.g["Wizards"].remove()
if card.name == "Student":
break
self.plr.piles[Piles.HAND].set("Copper", "Silver", "Gold", "Estate")
self.plr.add_card(card, Piles.HAND)
self.plr.test_input = ["Rotate", "Trash Estate"]
self.plr.play_card(card)
card = self.g["Wizards"].remove()
self.assertEqual(card.name, "Conjurer")
###############################################################################
if __name__ == "__main__": # pragma: no cover
unittest.main()
# EOF
|
[
"[email protected]"
] | |
01f32c1f857b3e6cb6206443d4778d3411fa38fa
|
85de10a9467b3cd88ce83227bee0d71706e2c2b0
|
/c15/point1.py
|
bcd93dd9a81cdd42b3999ae7c53212cba3aa9078
|
[] |
no_license
|
sreejithev/thinkpythonsolutions
|
f0bbfc0951e57e9b81f50aabf968860484081524
|
59481fd3d2976e73691a3fff97e083c336070cea
|
refs/heads/master
| 2019-07-22T14:08:54.890004 | 2017-09-15T05:06:26 | 2017-09-15T05:06:26 | 94,759,672 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,480 |
py
|
"""
Code example from Think Python, by Allen B. Downey.
Available from http://thinkpython.com
Copyright 2012 Allen B. Downey.
Distributed under the GNU General Public License at gnu.org/licenses/gpl.html.
"""
class Point(object):
"""Represents a point in 2-D space."""
def print_point(p):
"""Print a Point object in human-readable format."""
print '(%g, %g)' % (p.x, p.y)
class Rectangle(object):
"""Represents a rectangle.
attributes: width, height, corner.
"""
def find_center(rect):
"""Returns a Point at the center of a Rectangle."""
p = Point()
p.x = rect.corner.x + rect.width/2.0
p.y = rect.corner.y + rect.height/2.0
return p
def grow_rectangle(rect, dwidth, dheight):
"""Modify the Rectangle by adding to its width and height.
rect: Rectangle object.
dwidth: change in width (can be negative).
dheight: change in height (can be negative).
"""
rect.width += dwidth
rect.height += dheight
def main():
blank = Point()
blank.x = 3
blank.y = 4
print 'blank',
print_point(blank)
box = Rectangle()
box.width = 100.0
box.height = 200.0
box.corner = Point()
box.corner.x = 0.0
box.corner.y = 0.0
center = find_center(box)
print 'center',
print_point(center)
print box.width
print box.height
print 'grow'
grow_rectangle(box, 50, 100)
print box.width
print box.height
if __name__ == '__main__':
main()
|
[
"[email protected]"
] | |
d7a3715564bf502e8f7675515f39437fd16aea6d
|
1adc05008f0caa9a81cc4fc3a737fcbcebb68995
|
/hardhat/recipes/libsecret.py
|
b42a31342843ec94f44a8536408092b7348707ab
|
[
"MIT",
"BSD-3-Clause"
] |
permissive
|
stangelandcl/hardhat
|
4aa995518697d19b179c64751108963fa656cfca
|
1ad0c5dec16728c0243023acb9594f435ef18f9c
|
refs/heads/master
| 2021-01-11T17:19:41.988477 | 2019-03-22T22:18:44 | 2019-03-22T22:18:52 | 79,742,340 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 672 |
py
|
from .base import GnuRecipe
class LibSecretRecipe(GnuRecipe):
def __init__(self, *args, **kwargs):
super(LibSecretRecipe, self).__init__(*args, **kwargs)
self.sha256 = '9ce7bd8dd5831f2786c935d82638ac42' \
'8fa085057cc6780aba0e39375887ccb3'
self.name = 'libsecret'
self.version = '0.18.5'
self.version_regex = r'(?P<version>\d+\.\d+(\.\d+)?)'
self.depends = ['gcrypt', 'glib', 'gobject-introspection', 'vala']
self.url = 'http://ftp.gnome.org/pub/gnome/sources/libsecret/' \
'$short_version/libsecret-$version.tar.xz'
self.configure_args += ['--enable-vala=no']
|
[
"[email protected]"
] | |
da3f14eb4676c866d47a2784491765e6f5abcac8
|
0bbeb0bbe788ec5a8ba15acf159e4b913985bba4
|
/tests/testsuite/a_basic/tests_03_networking.py
|
5173cf96368f92efba523e5f790107970eeb035a
|
[
"Apache-2.0"
] |
permissive
|
GlenDC/0-core
|
629bd9836ab4ff2fe0c40628419b58205bb64648
|
807fa1939199fa3aa3b3e57679f61bb6c72cc57f
|
refs/heads/master
| 2021-06-17T19:52:40.405225 | 2017-06-14T16:42:39 | 2017-06-14T16:42:39 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,190 |
py
|
from utils.utils import BaseTest
import time
import unittest
class BasicNetworking(BaseTest):
def setUp(self):
super(BasicNetworking, self).setUp()
self.check_g8os_connection(BasicNetworking)
def test001_join_leave_list_zerotier(self):
""" g8os-012
*Test case for testing joining, listing, leaving zerotier networks*
**Test Scenario:**
#. Get NetworkId using zerotier API
#. Join zerotier network (N1), should succeed
#. List zerotier network
#. Join fake zerotier network (N1), should fail
#. Leave zerotier network (N1), should succeed
#. List zerotier networks, N1 should be gone
#. Leave zerotier network (N1), should fail
"""
self.lg('{} STARTED'.format(self._testID))
self.lg('Get NetworkId using zerotier API')
networkId = self.getZtNetworkID()
self.lg('Join zerotier network (N1), should succeed')
self.client.zerotier.join(networkId)
self.lg('List zerotier network')
r = self.client.zerotier.list()
self.assertIn(networkId, [x['nwid'] for x in r])
self.lg('Join fake zerotier network (N1), should fail')
with self.assertRaises(RuntimeError):
self.client.zerotier.join(self.rand_str())
self.lg('Leave zerotier network (N1), should succeed')
self.client.zerotier.leave(networkId)
self.lg('List zerotier networks, N1 should be gone')
r = self.client.zerotier.list()
self.assertNotIn(networkId, [x['nwid'] for x in r])
self.lg('Leave zerotier network (N1), should fail')
with self.assertRaises(RuntimeError):
self.client.zerotier.leave(networkId)
self.lg('{} ENDED'.format(self._testID))
def test002_create_delete_list_bridges(self):
""" g8os-013
*Test case for testing creating, listing, deleting bridges*
**Test Scenario:**
#. Create bridge (B1), should succeed
#. List bridges, B1 should be listed
#. Create bridge with same name of (B1), should fail
#. Delete bridge B1, should succeed
#. List bridges, B1 should be gone
#. Delete bridge B1, should fail
"""
self.lg('{} STARTED'.format(self._testID))
self.lg('Create bridge (B1), should succeed')
bridge_name = self.rand_str()
self.client.bridge.create(bridge_name)
self.lg('List bridges, B1 should be listed')
response = self.client.bridge.list()
self.assertIn(bridge_name, response)
self.lg('Create bridge with same name of (B1), should fail')
with self.assertRaises(RuntimeError):
self.client.bridge.create(bridge_name)
self.lg('Delete bridge B1, should succeed')
self.client.bridge.delete(bridge_name)
self.lg('List bridges, B1 should be gone')
response = self.client.bridge.list()
self.assertNotIn(bridge_name, response)
self.lg('Delete bridge B1, should fail')
with self.assertRaises(RuntimeError):
self.client.bridge.delete(bridge_name)
self.lg('{} ENDED'.format(self._testID))
|
[
"[email protected]"
] | |
6b3e10704b67a05bbd5fc73fe408618d870f0728
|
262311e60529868e38c2c57ee3db573f8e11c458
|
/qa-automated/runner.py
|
c841c2e6d2e393b0fa9c3ef97393f624bae447f1
|
[] |
no_license
|
huileizhan227/untitled
|
1c5604736d9ffcce6f7cb7e308cdc0ebd07e116a
|
07df74c89291b1664a28e3c8dcba51a917f1835f
|
refs/heads/master
| 2023-01-27T11:51:37.609210 | 2020-04-16T11:49:59 | 2020-04-16T11:49:59 | 150,606,504 | 1 | 0 | null | 2023-01-09T12:00:12 | 2018-09-27T15:12:18 |
HTML
|
UTF-8
|
Python
| false | false | 2,700 |
py
|
import os
import sys
import time
import qasite
import pytest
import config
from multiprocessing import Pool
from performance import Report as Perf
from common import devicectl
from common import serverctl
from common import utils
def run(project_name=None, build_id=None, test_name_filter=None):
# before
if (not project_name) or (not build_id):
log_folder = os.path.join(config.LOG_FOLDER, utils.get_formated_time())
else:
log_folder = os.path.join(config.LOG_FOLDER, project_name, str(build_id))
# run server
serverctl.run_servers(log_folder=log_folder)
devicectl.uninstall_apk()
devicectl.uninstall_ua2()
devicectl.wakeup()
# run cases
devices = config.devices
# case_process_list = []
args_list = []
for device in devices:
report_folder = os.path.join(log_folder, device['name'])
if not os.path.exists(report_folder):
os.makedirs(report_folder)
perf_log = os.path.join(report_folder, 'performance.csv')
perf_report = os.path.join(report_folder, 'performance.html')
ui_report = os.path.join(report_folder, 'report.html')
device['perf_report'] = perf_report
device['ui_report'] = ui_report
args=(perf_log, perf_report, ui_report, device['id'], test_name_filter)
args_list.append(args)
pool = Pool(len(args_list))
pool.starmap(run_cases, args_list)
pool.close()
pool.join()
# stop server
print('run cases over, killing servers...')
serverctl.stop_servers()
# upload report
# todo 先上传一个测试报告,多报告需qasite支持
if (project_name is not None) and (build_id is not None):
print('uploading aotomated testing report...')
if not qasite.upload_report(devices[0]['ui_report'], 0, project_name, build_id):
print('upload failed')
print('uploading performance testing report...')
if not qasite.upload_report(devices[0]['perf_report'], 1, project_name, build_id):
print('upload failed')
print('test finished.')
def run_cases(perf_log, perf_report, ui_report, device_id, test_name_filter):
# runpytest
arg_list = [
'cases/app',
'--html={}'.format(ui_report),
'--self-contained-html',
'--device-id={}'.format(device_id),
'--perf-log={}'.format(perf_log),
'--perf-report={}'.format(perf_report)
]
if test_name_filter:
arg_list.extend(['-k', test_name_filter])
pytest.main(arg_list)
if __name__ == "__main__":
test_name_filter = None
if len(sys.argv) > 1:
test_name_filter = sys.argv[1]
run(test_name_filter=test_name_filter)
|
[
"[email protected]"
] | |
4926ffe92721d5b449773c2caff35eabfbef1e6a
|
b410490f4249b4075eab92e3a16000a8b839e18c
|
/object_detection/YOLOv3/dataset.py
|
835b5b26d48edf15af90e53cc530340dfc619848
|
[] |
no_license
|
TaeYeon-kim-ai/Pytorch
|
5936145643a2b36b5c52e43f735bda81264ed6d5
|
452e5543a959f2b280b088635953985e1101041d
|
refs/heads/master
| 2023-07-01T17:29:30.558774 | 2021-08-12T19:01:36 | 2021-08-12T19:01:36 | 387,499,162 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,282 |
py
|
#import config
import numpy as np
import os
import pandas as pd
import torch
from PIL import Image, ImageFile
from torch.utils.data import Dataset, DataLoader
from utils import (
iou_width_height as iou,
non_max_suppression_as_nms,
)
ImageFile.LOAD_TRUNCATED_IMAGES = True
class YOLODataset(Dataset) :
def __init__(
self,
csv_file,
img_dir, label_dir,
anchors,
image_size = 416,
S = [13, 26, 52],
C = 20,
transform = None,
):
self.annotations = pd.read_csv(csv_file)
self.img_dir = img_dir
self.label_dir = label_dir
self.transform = transform
self.S = S
self.anchors
self.num_anchors = self.anchors.shape[0]
self.num_anchors_per_scale = self.num_anchors // 3
self.C = C
self.ignore_iou_thresh = 0.5
def __len__(self) :
return len(self.annotation)
def __getitem__(self, index) :
label_path = os.path.join(self.label_dir, self.annotations.iloc[index, 1])
bboxes = np.roll(np.loadtxt(fname=label_path, delimiter=" ", ndmim = 2), 4, axis = 1).tolist()
img_path = os.path.join(self.img_dir, self.annotations.iloc[index, 0])
image = np.array(Image.open(img_path).convert("RGB"))
if self.transform :
augmentation = self.transform(iamge = image, bboxes = bboxes)
image = augmentation["image"]
bboxes = augmentation["bboxes"]
targets = [torch.zeros((self.num_anchors // 3, S, S, 6)) for S in self.S] # [p_o, x, y , w, h, c]
for box in bboxes :
iou_anchors = iou(torch.Tensor(box[2:4]), self.anchors)
anchor_indices = iou_anchors.argsort(descending = True, dim = 0)
x, y, width, height, class_label = box
has_anchor = [False, False, False]
for anchor_idx in anchor_indices :
scale_idx = anchor_idx // self.num_anchors_per_scale # 0, 1, 2
anchor_on_scale = anchor_idx % self.num_anchors_per_scale # 0, 1, 2
S = self.S[scale_idx]
i, j = int(S*y), int(S * x) # x = 0.5, S = 13 --> int(6.5) = 6 .. 중심값 ?? roI
anchor_taken = targets[scale_idx][anchor_on_scale, i, j, 0]
if not anchor_taken and not has_anchor[scale_idx] : #anchor
targets[scale_idx][anchor_on_scale, i , j, 0] = 1
x_cell, y_cell = S*x - j, S*y - i # 6.5 both are between [0, 1]
width_cell, height_cell = (
width * S,
height * S,
)
box_coordinates = torch.tensor(
[x_cell, y_cell, width_cell, height_cell]
)
targets[scale_idx][anchor_on_scale, i, j, 1:5] = box_coordinates
targets[scale_idx][anchor_on_scale, i, j, 5] = int(class_label)
has_anchor[scale_idx] = True
elif not anchor_taken and iou_anchors[anchor_idx] > self.ignore_iou_thresh:
targets[scale_idx][anchor_on_scale, i, j, 0] = -1 #ignore this prediction
return image, tuple(targets)
|
[
"[email protected]"
] | |
fa653f9c0963489e50b7ebe54873f2359c9252e1
|
3d19e1a316de4d6d96471c64332fff7acfaf1308
|
/Users/P/pere/postliste-ruter.py
|
6868c570393c7a8e844c70e499b5f1ed041bc480
|
[] |
no_license
|
BerilBBJ/scraperwiki-scraper-vault
|
4e98837ac3b1cc3a3edb01b8954ed00f341c8fcc
|
65ea6a943cc348a9caf3782b900b36446f7e137d
|
refs/heads/master
| 2021-12-02T23:55:58.481210 | 2013-09-30T17:02:59 | 2013-09-30T17:02:59 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 4,876 |
py
|
# -*- coding: UTF-8 -*-
import scraperwiki
import json
from BeautifulSoup import BeautifulSoup
import datetime
import dateutil.parser
import lxml.html
import resource
import sys
import urlparse
import re
scraperwiki.scrape('http://www2.ruter.no/verdt-a-vite/presse/offentlig-journal/')
lazycache=scraperwiki.swimport('lazycache')
postlistelib=scraperwiki.swimport('postliste-python-lib')
agency = 'Ruter AS'
def report_errors(errors):
if 0 < len(errors):
print "Errors:"
for e in errors:
print e
exit(1)
def out_of_cpu(arg, spent, hard, soft):
report_errors(arg)
def process_pdf(parser, pdfurl, errors):
errors = []
postlistelib.exit_if_no_cpu_left(0, out_of_cpu, errors)
try:
pdfcontent = scraperwiki.scrape(pdfurl)
parser.preprocess(pdfurl, pdfcontent)
pdfcontent = None
# except ValueError, e:
# errors.append(e)
except IndexError, e:
errors.append(e)
def process_page_queue(parser, errors):
try:
parser.process_pages()
postlistelib.exit_if_no_cpu_left(0, out_of_cpu, errors)
except scraperwiki.CPUTimeExceededError, e:
errors.append("Processing pages interrupted")
def process_journal_pdfs(parser, listurl, errors):
# print "Finding PDFs on " + listurl
# u = urllib.parse.urlparse(listurl)
html = scraperwiki.scrape(listurl)
root = lxml.html.fromstring(html)
html = None
for ahref in root.cssselect("div.vedlegg a"):
href = ahref.attrib['href']
url = urlparse.urljoin(listurl, href)
if -1 != href.find("file://") or -1 == url.find(".pdf"):
# print "Skipping non-http URL " + url
continue
if parser.is_already_scraped(url):
True
# print "Skipping already scraped " + url
else:
# print "Will process " + url
process_pdf(parser, url, errors)
def test_small_pdfs(parser):
# Test with some smaller PDFs
errors = []
process_pdf(parser, "http://www2.ruter.no/Documents/Offentlig_journal/2012_Uke_24.pdf?epslanguage=no", errors)
process_page_queue(parser, errors)
report_errors(errors)
exit(0)
errors = []
parser = postlistelib.PDFJournalParser(agency=agency)
#test_small_pdfs(parser)
process_journal_pdfs(parser, "http://www2.ruter.no/verdt-a-vite/presse/offentlig-journal/", errors)
process_page_queue(parser, errors)
report_errors(errors)
# -*- coding: UTF-8 -*-
import scraperwiki
import json
from BeautifulSoup import BeautifulSoup
import datetime
import dateutil.parser
import lxml.html
import resource
import sys
import urlparse
import re
scraperwiki.scrape('http://www2.ruter.no/verdt-a-vite/presse/offentlig-journal/')
lazycache=scraperwiki.swimport('lazycache')
postlistelib=scraperwiki.swimport('postliste-python-lib')
agency = 'Ruter AS'
def report_errors(errors):
if 0 < len(errors):
print "Errors:"
for e in errors:
print e
exit(1)
def out_of_cpu(arg, spent, hard, soft):
report_errors(arg)
def process_pdf(parser, pdfurl, errors):
errors = []
postlistelib.exit_if_no_cpu_left(0, out_of_cpu, errors)
try:
pdfcontent = scraperwiki.scrape(pdfurl)
parser.preprocess(pdfurl, pdfcontent)
pdfcontent = None
# except ValueError, e:
# errors.append(e)
except IndexError, e:
errors.append(e)
def process_page_queue(parser, errors):
try:
parser.process_pages()
postlistelib.exit_if_no_cpu_left(0, out_of_cpu, errors)
except scraperwiki.CPUTimeExceededError, e:
errors.append("Processing pages interrupted")
def process_journal_pdfs(parser, listurl, errors):
# print "Finding PDFs on " + listurl
# u = urllib.parse.urlparse(listurl)
html = scraperwiki.scrape(listurl)
root = lxml.html.fromstring(html)
html = None
for ahref in root.cssselect("div.vedlegg a"):
href = ahref.attrib['href']
url = urlparse.urljoin(listurl, href)
if -1 != href.find("file://") or -1 == url.find(".pdf"):
# print "Skipping non-http URL " + url
continue
if parser.is_already_scraped(url):
True
# print "Skipping already scraped " + url
else:
# print "Will process " + url
process_pdf(parser, url, errors)
def test_small_pdfs(parser):
# Test with some smaller PDFs
errors = []
process_pdf(parser, "http://www2.ruter.no/Documents/Offentlig_journal/2012_Uke_24.pdf?epslanguage=no", errors)
process_page_queue(parser, errors)
report_errors(errors)
exit(0)
errors = []
parser = postlistelib.PDFJournalParser(agency=agency)
#test_small_pdfs(parser)
process_journal_pdfs(parser, "http://www2.ruter.no/verdt-a-vite/presse/offentlig-journal/", errors)
process_page_queue(parser, errors)
report_errors(errors)
|
[
"[email protected]"
] | |
c151f1cb971c5514c93deb2d3355846a22aa6971
|
6f21068b31084e81f38db304a51a2609d8af37cd
|
/2_Scientific_Libraries/plotsine.py
|
13f08b42e470e8a434e801048a9ba254ea8288aa
|
[] |
no_license
|
vickyf/eurocontrol_datascience
|
374b889cac7b8d377caa78079fb57098e73bba0a
|
0a7c09002e3b5f22ad563b05a6b4afe4cb6791d7
|
refs/heads/master
| 2020-03-19T06:03:14.864839 | 2018-06-04T07:24:25 | 2018-06-04T07:24:25 | 135,986,678 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 142 |
py
|
%matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
x = np.linspace(0,2*np.pi, 100)
y = np.sin(x)
plt.plot(x,y)
plt.show()
|
[
"[email protected]"
] | |
07bac4b0659c7151d22ec455cb5bbb340db2a1c5
|
6219e6536774e8eeb4cadc4a84f6f2bea376c1b0
|
/common/util_vietnamese_test.py
|
3fa3dfa1e91ecf9cc5553850f8be6ef7c293dfd5
|
[
"MIT"
] |
permissive
|
nguyenminhthai/choinho
|
109d354b410b92784a9737f020894d073bea1534
|
d2a216fe7a5064d73cdee3e928a7beef7f511fd1
|
refs/heads/master
| 2023-05-07T16:51:46.667755 | 2019-10-22T07:53:41 | 2019-10-22T07:53:41 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,948 |
py
|
#!/usr/bin/env python
# encoding: utf-8
import unittest
from common import util_vietnamese as uv
class TestUtilVietnamese(unittest.TestCase):
def testConvert2Unsign(self):
self.assertEquals(uv.convert2Unsign(u'Dĩ độc trị độc'), u'Di doc tri doc')
self.assertEquals(uv.convert2Unsign(u'Ông ăn ổi Ạ'), u'Ong an oi A')
self.assertEquals(uv.convert2Unsign(u'Giầy thể thao nữ'), u'Giay the thao nu')
self.assertEquals(uv.convert2Unsign(u'Thử xem ổn không nhé: Lưu Vĩnh Toàn, Phạm Kim Cương'), u'Thu xem on khong nhe: Luu Vinh Toan, Pham Kim Cuong')
def testTokenized(self):
s = u'Lưu Vĩnh+Toàn, Pham; Kim.Cuong A-B. A_B'
expect = [u'Lưu', u'Vĩnh', u'Toàn', u'Pham', u'Kim', u'Cuong', u'A', u'B', 'A_B']
self.assertEquals(uv.tokenized(s), expect)
def testMakePhraseToken(self):
self.assertEquals(uv.makePhraseToken(u'Lưu Vĩnh+Toàn, Pham; Kim.Cuong'), u'_lưu_vĩnh_toàn_pham_kim_cuong')
self.assertEquals(uv.makePhraseToken(u'Toàn'), u'_toàn')
self.assertEquals(uv.makePhraseToken(u';'), u'__')
self.assertEquals(uv.makePhraseToken(u''), u'_')
def testMakeSuffixNGramToken(self):
expect = set()
expect.add(u'_lưu_vĩnh_toàn_pham_kim_cuong')
expect.add(u'_luu_vinh_toan_pham_kim_cuong')
expect.add(u'_vĩnh_toàn_pham_kim_cuong')
expect.add(u'_vinh_toan_pham_kim_cuong')
expect.add(u'_toàn_pham_kim_cuong')
expect.add(u'_toan_pham_kim_cuong')
expect.add(u'_pham_kim_cuong')
expect.add(u'_kim_cuong')
expect.add(u'_cuong')
self.assertEquals(uv.makeSuffixNGramToken(u'Lưu Vĩnh+Toàn, Pham; Kim.Cuong'), expect)
def testMakeNGramToken(self):
expect = set()
expect.add(u'_lưu_vĩnh_toàn_pham')
expect.add(u'_vĩnh_toàn_pham_kim')
expect.add(u'_toàn_pham_kim_cuong')
expect.add(u'_lưu_vĩnh_toàn')
expect.add(u'_vĩnh_toàn_pham')
expect.add(u'_toàn_pham_kim')
expect.add(u'_pham_kim_cuong')
expect.add(u'_lưu_vĩnh')
expect.add(u'_vĩnh_toàn')
expect.add(u'_toàn_pham')
expect.add(u'_pham_kim')
expect.add(u'_kim_cuong')
expect.add(u'_lưu')
expect.add(u'_vĩnh')
expect.add(u'_toàn')
expect.add(u'_pham')
expect.add(u'_kim')
expect.add(u'_cuong')
self.assertEquals(uv.makeNGramToken(u'Lưu Vĩnh+Toàn, Pham; Kim.Cuong'), expect)
def testSimpleTokenized(self):
self.assertEquals(uv.simpleTokenized(u'hello \tw'), ['hello', 'w'])
self.assertEquals(uv.simpleTokenized(u't-mobile'), ['t','mobile'])
self.assertEquals(uv.simpleTokenized(u'o to, xe may'), ['o', 'to','xe', 'may'])
if __name__ == '__main__':
unittest.main()
|
[
"[email protected]"
] | |
47114303d4036a4aeb4733f34ef927d7095bb970
|
ac2c3e8c278d0aac250d31fd023c645fa3984a1b
|
/saleor/saleor/core/payments.py
|
777cdcf229f3af0436638628319a4ed5f6c33a12
|
[
"CC-BY-4.0",
"BSD-3-Clause"
] |
permissive
|
jonndoe/saleor-test-shop
|
152bc8bef615382a45ca5f4f86f3527398bd1ef9
|
1e83176684f418a96260c276f6a0d72adf7dcbe6
|
refs/heads/master
| 2023-01-21T16:54:36.372313 | 2020-12-02T10:19:13 | 2020-12-02T10:19:13 | 316,514,489 | 1 | 1 |
BSD-3-Clause
| 2020-11-27T23:29:20 | 2020-11-27T13:52:33 |
TypeScript
|
UTF-8
|
Python
| false | false | 1,983 |
py
|
from abc import ABC, abstractmethod
from typing import TYPE_CHECKING, List, Optional
if TYPE_CHECKING:
# flake8: noqa
from ..checkout.models import Checkout, CheckoutLine
from ..discount import DiscountInfo
from ..payment.interface import (
PaymentData,
GatewayResponse,
TokenConfig,
CustomerSource,
PaymentGateway,
)
class PaymentInterface(ABC):
@abstractmethod
def list_payment_gateways(
self, currency: Optional[str] = None, active_only: bool = True
) -> List["PaymentGateway"]:
pass
@abstractmethod
def checkout_available_payment_gateways(
self, checkout: "Checkout",
) -> List["PaymentGateway"]:
pass
@abstractmethod
def authorize_payment(
self, gateway: str, payment_information: "PaymentData"
) -> "GatewayResponse":
pass
@abstractmethod
def capture_payment(
self, gateway: str, payment_information: "PaymentData"
) -> "GatewayResponse":
pass
@abstractmethod
def refund_payment(
self, gateway: str, payment_information: "PaymentData"
) -> "GatewayResponse":
pass
@abstractmethod
def void_payment(
self, gateway: str, payment_information: "PaymentData"
) -> "GatewayResponse":
pass
@abstractmethod
def confirm_payment(
self, gateway: str, payment_information: "PaymentData"
) -> "GatewayResponse":
pass
@abstractmethod
def token_is_required_as_payment_input(self, gateway) -> bool:
pass
@abstractmethod
def process_payment(
self, gateway: str, payment_information: "PaymentData"
) -> "GatewayResponse":
pass
@abstractmethod
def get_client_token(self, gateway: str, token_config: "TokenConfig") -> str:
pass
@abstractmethod
def list_payment_sources(
self, gateway: str, customer_id: str
) -> List["CustomerSource"]:
pass
|
[
"[email protected]"
] | |
09e35450b6520f6def9cc7c4b3196fd617f912dc
|
f7b3c098db4dcea347eac5ee18fc19b84cbf2059
|
/scrubadub/scrubbers.py
|
fa06a388bb3e10f0b9bdd5a8bc93ad220ffe8f15
|
[
"MIT"
] |
permissive
|
jb08/scrubadub
|
f625a4bc265dfb743ab91f0a1449629392233cb2
|
7e7b6acc3938ded1e596960b6f095b7e79ae503e
|
refs/heads/master
| 2021-01-16T22:03:02.271663 | 2016-01-14T20:25:32 | 2016-01-14T20:25:32 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,748 |
py
|
import re
import operator
import textblob
import nltk
from . import exceptions
from . import detectors
from .filth import Filth, MergedFilth
class Scrubber(object):
"""The Scrubber class is used to clean personal information out of dirty
dirty text. It manages a set of ``Detector``'s that are each responsible
for identifying their particular kind of ``Filth``.
"""
def __init__(self, *args, **kwargs):
super(Scrubber, self).__init__(*args, **kwargs)
# instantiate all of the detectors
self.detectors = {}
for type, detector_cls in detectors.types.iteritems():
self.detectors[type] = detector_cls()
def clean(self, text, **kwargs):
"""This is the master method that cleans all of the filth out of the
dirty dirty ``text``. All keyword arguments to this function are passed
through to the ``Filth.replace_with`` method to fine-tune how the
``Filth`` is cleaned.
"""
if not isinstance(text, unicode):
raise exceptions.UnicodeRequired
clean_chunks = []
filth = Filth()
for next_filth in self.iter_filth(text):
clean_chunks.append(text[filth.end:next_filth.beg])
clean_chunks.append(next_filth.replace_with(**kwargs))
filth = next_filth
clean_chunks.append(text[filth.end:])
return u''.join(clean_chunks)
def iter_filth(self, text):
"""Iterate over the different types of filth that can exist.
"""
# currently doing this by aggregating all_filths and then sorting
# inline instead of with a Filth.__cmp__ method, which is apparently
# much slower http://stackoverflow.com/a/988728/564709
#
# NOTE: we could probably do this in a more efficient way by iterating
# over all detectors simultaneously. just trying to get something
# working right now and we can worry about efficiency later
all_filths = []
for detector in self.detectors.itervalues():
for filth in detector.iter_filth(text):
if not isinstance(filth, Filth):
raise TypeError('iter_filth must always yield Filth')
all_filths.append(filth)
all_filths.sort(key=operator.attrgetter("beg"))
# this is where the Scrubber does its hard work and merges any
# overlapping filths.
if not all_filths:
raise StopIteration
filth = all_filths[0]
for next_filth in all_filths[1:]:
if filth.end < next_filth.beg:
yield filth
filth = next_filth
else:
filth = filth.merge(next_filth)
yield filth
|
[
"[email protected]"
] | |
9e8d55b19f819bc5d3bd1235d4e62225b2271730
|
b7b2f80ab5e1ee0ea028576e3014b62b8d3a8d7e
|
/pyedit/pyedit-032/pyedlib/pedync.py
|
0ba8b937ecb2f51e498cc3516a5f9b0a422ebcc7
|
[] |
no_license
|
pglen/pgpygtk
|
4d1405478a714f003984cf3e3db04ff1f767470b
|
33f58010e304f1a312f2356de453ecedb7aa21ef
|
refs/heads/master
| 2021-01-22T01:18:52.238415 | 2019-01-01T01:37:24 | 2019-01-01T01:37:24 | 102,215,955 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,693 |
py
|
#!/usr/bin/env python
# Prompt Handler for pyedit
import os, string, gtk, gobject
import pyedlib.pedconfig
# ------------------------------------------------------------------------
def yes_no_cancel(title, message, cancel = True):
dialog = gtk.Dialog(title,
None,
gtk.DIALOG_MODAL | gtk.DIALOG_DESTROY_WITH_PARENT)
dialog.set_default_response(gtk.RESPONSE_YES)
dialog.set_position(gtk.WIN_POS_CENTER)
sp = " "
label = gtk.Label(message);
label2 = gtk.Label(sp); label3 = gtk.Label(sp)
hbox = gtk.HBox() ; hbox.pack_start(label2);
hbox.pack_start(label); hbox.pack_start(label3)
dialog.vbox.pack_start(hbox)
dialog.add_button("_Yes", gtk.RESPONSE_YES)
dialog.add_button("_No", gtk.RESPONSE_NO)
if cancel:
dialog.add_button("_Cancel", gtk.RESPONSE_CANCEL)
dialog.connect("key-press-event", area_key, cancel)
#dialog.connect("key-release-event", area_key, cancel)
dialog.show_all()
response = dialog.run()
# Convert all responses to cancel
if response == gtk.RESPONSE_CANCEL or \
response == gtk.RESPONSE_REJECT or \
response == gtk.RESPONSE_CLOSE or \
response == gtk.RESPONSE_DELETE_EVENT:
response = gtk.RESPONSE_CANCEL
dialog.destroy()
return response
def area_key(win, event, cancel):
#print event
if event.keyval == gtk.keysyms.y or \
event.keyval == gtk.keysyms.Y:
win.response(gtk.RESPONSE_YES)
if event.keyval == gtk.keysyms.n or \
event.keyval == gtk.keysyms.N:
win.response(gtk.RESPONSE_NO)
if cancel:
if event.keyval == gtk.keysyms.c or \
event.keyval == gtk.keysyms.C:
win.response(gtk.RESPONSE_CANCEL)
# ------------------------------------------------------------------------
# Show About dialog:
import platform
def about():
dialog = gtk.AboutDialog()
dialog.set_name(" PyEdit - Python Editor ")
dialog.set_version(str(pyedlib.pedconfig.conf.version));
comm = "\nPython based easily configurable editor.\n"\
"\nRunning PyGtk %d.%d.%d" % gtk.pygtk_version +\
"\nRunning GTK %d.%d.%d\n" % gtk.gtk_version +\
"\nRunning Python %s\n" % platform.python_version()
dialog.set_comments(comm);
dialog.set_copyright("Portions \302\251 Copyright Peter Glen\n"
"Project placed in the Public Domain.")
img_dir = os.path.join(os.path.dirname(__file__), 'images')
img_path = os.path.join(img_dir, 'gtk-logo-rgb.gif')
try:
pixbuf = gtk.gdk.pixbuf_new_from_file(img_path)
#print "loaded pixbuf"
dialog.set_logo(pixbuf)
except gobject.GError, error:
print "Cannot load logo for about dialog";
#dialog.set_website("")
## Close dialog on user response
dialog.connect ("response", lambda d, r: d.destroy())
dialog.connect("key-press-event", about_key)
dialog.show()
def about_key(win, event):
#print "about_key", event
if event.type == gtk.gdk.KEY_PRESS:
if event.keyval == gtk.keysyms.x or event.keyval == gtk.keysyms.X:
if event.state & gtk.gdk.MOD1_MASK:
win.destroy()
# Show a regular message:
def message(strx, title = None, icon = gtk.MESSAGE_INFO):
dialog = gtk.MessageDialog(None, gtk.DIALOG_DESTROY_WITH_PARENT,
icon, gtk.BUTTONS_CLOSE, strx)
if title:
dialog.set_title(title)
else:
dialog.set_title("pyedit")
# Close dialog on user response
dialog.connect("response", lambda d, r: d.destroy())
dialog.show()
|
[
"[email protected]"
] | |
a58a9d7303bef7ea14954d5a6376cf8f18b14d02
|
fe91ffa11707887e4cdddde8f386a8c8e724aa58
|
/chrome/test/enterprise/e2e/policy/safe_browsing/safe_browsing_ui_test.py
|
296faf0623b41a371544722ac0962d719d89d5de
|
[
"BSD-3-Clause"
] |
permissive
|
akshaymarch7/chromium
|
78baac2b45526031846ccbaeca96c639d1d60ace
|
d273c844a313b1e527dec0d59ce70c95fd2bd458
|
refs/heads/master
| 2023-02-26T23:48:03.686055 | 2020-04-15T01:20:07 | 2020-04-15T01:20:07 | 255,778,651 | 2 | 1 |
BSD-3-Clause
| 2020-04-15T02:04:56 | 2020-04-15T02:04:55 | null |
UTF-8
|
Python
| false | false | 2,371 |
py
|
# Copyright 2019 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import re
import test_util
import time
from absl import app
from selenium import webdriver
from pywinauto.application import Application
UnsafePageLink = "http://testsafebrowsing.appspot.com/s/malware.html"
UnsafePageLinkTabText = "Security error"
UnsafeDownloadLink = "http://testsafebrowsing.appspot.com/s/badrep.exe"
UnsafeDownloadTextRe = ".* is dangerous,\s*so\s*Chrom.* has blocked it"
def visit(window, url):
"""Visit a specific URL through pywinauto.Application.
SafeBrowsing intercepts HTTP requests & hangs WebDriver.get(), which prevents
us from getting the page source. Using pywinauto to visit the pages instead.
"""
window.Edit.set_edit_text(url).type_keys("%{ENTER}")
time.sleep(10)
def main(argv):
exclude_switches = ["disable-background-networking"]
chrome_options = webdriver.ChromeOptions()
chrome_options.add_experimental_option("excludeSwitches", exclude_switches)
driver = test_util.create_chrome_webdriver(chrome_options=chrome_options)
app = Application(backend="uia")
app.connect(title_re='.*Chrome|.*Chromium')
window = app.top_window()
# Wait for Chrome to download SafeBrowsing lists in the background.
# There's no trigger to force this operation or synchronize on it, but quick
# experiments have shown 3-4 minutes in most cases, so 5 should be plenty.
time.sleep(60 * 5)
print "Visiting unsafe page: %s" % UnsafePageLink
visit(window, UnsafePageLink)
unsafe_page = False
for desc in app.top_window().descendants():
if desc.window_text():
print "unsafe_page.item: %s" % desc.window_text()
if UnsafePageLinkTabText in desc.window_text():
unsafe_page = True
break
print "Downloading unsafe file: %s" % UnsafeDownloadLink
visit(window, UnsafeDownloadLink)
unsafe_download = False
for desc in app.top_window().descendants():
if desc.window_text():
print "unsafe_download.item: %s" % desc.window_text()
if re.search(UnsafeDownloadTextRe, desc.window_text()):
unsafe_download = True
break
print "RESULTS.unsafe_page: %s" % unsafe_page
print "RESULTS.unsafe_download: %s" % unsafe_download
driver.quit()
if __name__ == '__main__':
app.run(main)
|
[
"[email protected]"
] | |
b1a541ae2823325189c5b0f803ec117c9df66d07
|
de69d99db8be567d97060149481091c25907d4ef
|
/src/trees/binary_trees.py
|
84f2bcde422555256b4619c0ba4e877f5b7f152d
|
[] |
no_license
|
chalam/Pynaconda
|
0dd5acdb19c38352ee5d4b92c002d05bd75e452d
|
e24600d26afbc685e3853a6037f50dfc3fe077d2
|
refs/heads/master
| 2021-01-10T13:37:54.811250 | 2018-10-13T20:48:44 | 2018-10-13T20:48:44 | 36,340,529 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,556 |
py
|
class Node:
"""
Class Node
"""
def __init__(self, value):
self.left = None # No self-referential Node in python
self.data = value
self.right = None
class Tree:
"""
Class tree will provide a tree as well as utility functions.
"""
def createNode(self, data):
"""
Utility function to create a node.
"""
return Node(data)
def insert(self, node, data):
"""
Insert function will insert a node into tree.
Duplicate keys are not allowed.
"""
# if tree is empty , return a root node
if node is None:
return self.createNode(data)
# if data is smaller than parent , insert it into left side
if data < node.data:
node.left = self.insert(node.left, data)
elif data > node.data:
node.right = self.insert(node.right, data)
return node
def search(self, node, data):
"""
Search function will search a node into tree.
"""
# if root is None or root is the search data.
if node is None or node.data == data:
return node
if node.data < data:
return self.search(node.right, data)
else:
return self.search(node.left, data)
def deleteNode(self, node, data):
"""
Delete function will delete a node into tree.
Not complete , may need some more scenarion that we can handle
Now it is handling only leaf.
"""
# Check if tree is empty.
if node is None:
return None
# searching key into BST.
if data < node.data:
node.left = self.deleteNode(node.left, data)
elif data > node.data:
node.right = self.deleteNode(node.right, data)
else: # reach to the node that need to delete from BST.
if node.left is None and node.right is None:
del node
if node.left == None:
temp = node.right
del node
return temp
elif node.right == None:
temp = node.left
del node
return temp
return node
def traverseInorder(self, root):
"""
traverse function will print all the node in the tree.
"""
if root is not None:
self.traverseInorder(root.left)
print(root.data)
self.traverseInorder(root.right)
def traversePreorder(self, root):
"""
traverse function will print all the node in the tree.
"""
if root is not None:
print(root.data)
self.traversePreorder(root.left)
self.traversePreorder(root.right)
def traversePostorder(self, root):
"""
traverse function will print all the node in the tree.
"""
if root is not None:
self.traversePreorder(root.left)
self.traversePreorder(root.right)
print(root.data)
def main():
root = None
tree = Tree()
root = tree.insert(root, 10)
print(root)
tree.insert(root, 20)
tree.insert(root, 30)
tree.insert(root, 40)
tree.insert(root, 70)
tree.insert(root, 60)
tree.insert(root, 80)
print("Traverse Inorder")
tree.traverseInorder(root)
print("Traverse Preorder")
tree.traversePreorder(root)
print("Traverse Postorder")
tree.traversePostorder(root)
if __name__ == "__main__":
main()
|
[
"[email protected]"
] | |
c9d62cd28eb6a98c113b079864bf0553c983be35
|
284f4f56aed56573eb5516aa67c99bf41e595522
|
/Leetcode/Arrays/p3574.py
|
4261a42c73d1469fdff5a35d33f807e57238da87
|
[] |
no_license
|
rohangoli/PythonAdvanced
|
537a05eff9ec305a6ec32fa2d0962a64976cd097
|
6448a5f0d82c7e951b5e476638e15a3c34966cd9
|
refs/heads/develop
| 2023-07-20T04:33:50.764104 | 2023-07-14T04:04:18 | 2023-07-14T04:04:18 | 126,811,520 | 0 | 0 | null | 2022-06-10T23:07:10 | 2018-03-26T10:20:16 |
Jupyter Notebook
|
UTF-8
|
Python
| false | false | 514 |
py
|
## Squares of a Sorted Array
# Example 1:
# Input: nums = [-4,-1,0,3,10]
# Output: [0,1,9,16,100]
# Explanation: After squaring, the array becomes [16,1,0,9,100].
# After sorting, it becomes [0,1,9,16,100].
# Example 2:
# Input: nums = [-7,-3,2,3,11]
# Output: [4,9,9,49,121]
class Solution:
def sortedSquares(self, nums: List[int]) -> List[int]:
N=len(nums)
i=0
while i<N:
nums[i]=nums[i]**2
i+=1
nums.sort()
return nums
|
[
"[email protected]"
] | |
1525fa01ca88e86a1491f6968ca7daf25bda962c
|
c086a38a366b0724d7339ae94d6bfb489413d2f4
|
/PythonEnv/Lib/site-packages/win32com/server/exception.py
|
f84cccdf5e349025e91ae2f9bdf4e87a0bb9e8d9
|
[] |
no_license
|
FlowkoHinti/Dionysos
|
2dc06651a4fc9b4c8c90d264b2f820f34d736650
|
d9f8fbf3bb0713527dc33383a7f3e135b2041638
|
refs/heads/master
| 2021-03-02T01:14:18.622703 | 2020-06-09T08:28:44 | 2020-06-09T08:28:44 | 245,826,041 | 2 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,452 |
py
|
"""Exception Handling
Exceptions
To better support COM exceptions, the framework allows for an instance to be
raised. This instance may have a certain number of known attributes, which are
translated into COM exception details.
This means, for example, that Python could raise a COM exception that includes details
on a Help file and location, and a description for the user.
This module provides a class which provides the necessary attributes.
"""
import sys, pythoncom
# Note that we derive from com_error, which derives from exceptions.Exception
# Also note that we dont support "self.args", as we dont support tuple-unpacking
class COMException(pythoncom.com_error):
"""An Exception object that is understood by the framework.
If the framework is presented with an exception of type class,
it looks for certain known attributes on this class to provide rich
error information to the caller.
It should be noted that the framework supports providing this error
information via COM Exceptions, or via the ISupportErrorInfo interface.
By using this class, you automatically provide rich error information to the
server.
"""
def __init__(self, description=None, scode=None,
source=None, helpfile=None, helpContext=None,
desc=None, hresult=None):
"""Initialize an exception
**Params**
description -- A string description for the exception.
scode -- An integer scode to be returned to the server, if necessary.
The pythoncom framework defaults this to be DISP_E_EXCEPTION if not specified otherwise.
source -- A string which identifies the source of the error.
helpfile -- A string which points to a help file which contains details on the error.
helpContext -- An integer context in the help file.
desc -- A short-cut for description.
hresult -- A short-cut for scode.
"""
# convert a WIN32 error into an HRESULT
scode = scode or hresult
if scode and scode != 1: # We dont want S_FALSE mapped!
if scode >= -32768 and scode < 32768:
# this is HRESULT_FROM_WIN32()
scode = -2147024896 | (scode & 0x0000FFFF)
self.scode = scode
self.description = description or desc
if scode == 1 and not self.description:
self.description = "S_FALSE"
elif scode and not self.description:
self.description = pythoncom.GetScodeString(scode)
self.source = source
self.helpfile = helpfile
self.helpcontext = helpContext
# todo - fill in the exception value
pythoncom.com_error.__init__(self, scode, self.description, None, -1)
def __repr__(self):
return "<COM Exception - scode=%s, desc=%s>" % (self.scode, self.description)
# Old name for the COMException class.
# Do NOT use the name Exception, as it is now a built-in
# COMException is the new, official name.
Exception = COMException
def IsCOMException(t=None):
if t is None:
t = sys.exc_info()[0]
try:
return issubclass(t, pythoncom.com_error)
except TypeError: # 1.5 in -X mode?
return t is pythoncon.com_error
def IsCOMServerException(t=None):
if t is None:
t = sys.exc_info()[0]
try:
return issubclass(t, COMException)
except TypeError: # String exception
return 0
|
[
"="
] |
=
|
f1de4f284f6ae6dcbf0e216dae4bd4020b7fe948
|
cd5746f8cc7aee1f20606a65b4fae0d5e8ee78dc
|
/Python Books/Mastering-Machine-Learning-scikit-learn/NumPy-Cookbook/NumPy Cookbook 2nd Edition_CodeBundle/Final Code/0945OS_05_Final Code/ch5code/sobel.py
|
7a60c93500bba9e0a6d9825f564f9b66bfa7ba43
|
[] |
no_license
|
theGreenJedi/Path
|
df24fca355590efef0c6cb5c52e7216c6b5d2464
|
b5ed2805dbb046480929e49e550bfd8af5bb4d6f
|
refs/heads/master
| 2023-07-27T14:23:37.694546 | 2021-07-16T01:38:55 | 2021-07-16T01:38:55 | 87,686,563 | 8 | 2 | null | 2023-07-11T22:49:03 | 2017-04-09T05:57:30 |
Jupyter Notebook
|
UTF-8
|
Python
| false | false | 623 |
py
|
import scipy
import scipy.ndimage
import matplotlib.pyplot as plt
lena = scipy.misc.lena()
plt.subplot(221)
plt.imshow(lena)
plt.title('Original')
plt.axis('off')
# Sobel X filter
sobelx = scipy.ndimage.sobel(lena, axis=0, mode='constant')
plt.subplot(222)
plt.imshow(sobelx)
plt.title('Sobel X')
plt.axis('off')
# Sobel Y filter
sobely = scipy.ndimage.sobel(lena, axis=1, mode='constant')
plt.subplot(223)
plt.imshow(sobely)
plt.title('Sobel Y')
plt.axis('off')
# Default Sobel filter
default = scipy.ndimage.sobel(lena)
plt.subplot(224)
plt.imshow(default)
plt.title('Default Filter')
plt.axis('off')
plt.show()
|
[
"[email protected]"
] | |
3f2079b1e4c24c815959e7a54257986eb1c35628
|
82199bfad7b77d62aa265c8ea463e20df6901801
|
/global_variables.py
|
0349063285f925772377b500255d2fdee5a359ce
|
[] |
no_license
|
hyzcn/interactive-behaviour-design
|
6119f8685b91226916f06678735fcfea5e6c27ab
|
26faa63f0d1494dedd7dd9c3757ab08ec6473119
|
refs/heads/master
| 2020-05-16T09:04:42.342957 | 2019-04-22T19:26:27 | 2019-04-22T19:38:41 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 425 |
py
|
# ALE is generally safe to use from multiple threads, but we do need to be careful about
# two threads creating environments at the same time:
# https://github.com/mgbellemare/Arcade-Learning-Environment/issues/86
# Any thread which creates environments (which includes restoring from a reset state)
# should acquire this lock before attempting the creation.
env_creation_lock = None
segment_save_mode = None
max_segs = None
|
[
"[email protected]"
] | |
9b43ee53672fb7b8aa059524c4d04d2b92fd2289
|
689a557b32161faafeb0b68076bca96b65c320ce
|
/restourant/migrations/0003_auto_20170726_1525.py
|
3156e7bad15655147d6acc6853903542146c11b9
|
[] |
no_license
|
FNSalimov/new
|
5d957a5e2543bcecece2fa88e4ff61030eb58203
|
e2b15e5e83dbc22d776112fc5859219d7f625e4f
|
refs/heads/master
| 2021-01-01T18:36:54.171096 | 2017-07-27T06:27:24 | 2017-07-27T06:27:24 | 98,386,102 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 740 |
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2017-07-26 12:25
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('restourant', '0002_order_orderdish'),
]
operations = [
migrations.RemoveField(
model_name='orderdish',
name='dish',
),
migrations.RemoveField(
model_name='orderdish',
name='order',
),
migrations.AddField(
model_name='order',
name='dishes',
field=models.ManyToManyField(to='restourant.Dish'),
),
migrations.DeleteModel(
name='OrderDish',
),
]
|
[
"[email protected]"
] | |
87db130e21a172d48ce24cd1480dd27f518ba1f0
|
8313b823a755694cfd71e57ad63760ba1c7009d4
|
/Classification/kernal_SVM.py
|
adcd73f8c99e84b0ddc56f69991b888dba8e9c20
|
[] |
no_license
|
KRBhavaniSankar/Machine-Learning
|
49063374a8b243563212cf52a933da03b41bb576
|
339f146362aa5960794d8ddcef50d502955c24c4
|
refs/heads/master
| 2021-06-07T17:09:57.259971 | 2020-02-18T13:40:03 | 2020-02-18T13:40:03 | 143,809,260 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,638 |
py
|
# Kernal-SVM Classification
# Importing the libraries
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
# Importing the dataset
dataset = pd.read_csv('Social_Network_Ads.csv')
X = dataset.iloc[:, [2, 3]].values
y = dataset.iloc[:, 4].values
# Splitting the dataset into the Training set and Test set
from sklearn.cross_validation import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.25, random_state = 0)
# Feature Scaling
from sklearn.preprocessing import StandardScaler
sc = StandardScaler()
X_train = sc.fit_transform(X_train)
X_test = sc.transform(X_test)
# Fitting classifier to the Training set
from sklearn.svm import SVC
classifier = SVC(kernel="rbf",random_state=0)
classifier.fit(X_train,y_train)
# Predicting the Test set results
y_pred = classifier.predict(X_test)
# Making the Confusion Matrix
from sklearn.metrics import confusion_matrix
cm = confusion_matrix(y_test, y_pred)
# Visualising the Training set results
from matplotlib.colors import ListedColormap
X_set, y_set = X_train, y_train
X1, X2 = np.meshgrid(np.arange(start = X_set[:, 0].min() - 1, stop = X_set[:, 0].max() + 1, step = 0.01),
np.arange(start = X_set[:, 1].min() - 1, stop = X_set[:, 1].max() + 1, step = 0.01))
plt.contourf(X1, X2, classifier.predict(np.array([X1.ravel(), X2.ravel()]).T).reshape(X1.shape),
alpha = 0.75, cmap = ListedColormap(('red', 'green')))
plt.xlim(X1.min(), X1.max())
plt.ylim(X2.min(), X2.max())
for i, j in enumerate(np.unique(y_set)):
plt.scatter(X_set[y_set == j, 0], X_set[y_set == j, 1],
c = ListedColormap(('red', 'green'))(i), label = j)
plt.title('Kernal-SVM Classifier (Training set)')
plt.xlabel('Age')
plt.ylabel('Estimated Salary')
plt.legend()
plt.show()
# Visualising the Test set results
from matplotlib.colors import ListedColormap
X_set, y_set = X_test, y_test
X1, X2 = np.meshgrid(np.arange(start = X_set[:, 0].min() - 1, stop = X_set[:, 0].max() + 1, step = 0.01),
np.arange(start = X_set[:, 1].min() - 1, stop = X_set[:, 1].max() + 1, step = 0.01))
plt.contourf(X1, X2, classifier.predict(np.array([X1.ravel(), X2.ravel()]).T).reshape(X1.shape),
alpha = 0.75, cmap = ListedColormap(('red', 'green')))
plt.xlim(X1.min(), X1.max())
plt.ylim(X2.min(), X2.max())
for i, j in enumerate(np.unique(y_set)):
plt.scatter(X_set[y_set == j, 0], X_set[y_set == j, 1],
c = ListedColormap(('red', 'green'))(i), label = j)
plt.title('Kernal-SVM Classifier (Test set)')
plt.xlabel('Age')
plt.ylabel('Estimated Salary')
plt.legend()
plt.show()
|
[
"[email protected]"
] | |
0dad5e1d305a873fa56187c074313e2abafcd989
|
a57a79bd2cb2397c6d879751e7041e9142390acc
|
/apps/tags/management/commands/migrate_tags.py
|
ba82af97368ac66dbcffd52844782f5c57617454
|
[] |
no_license
|
cephey/country
|
b41e85bfd5df20caec5d6f54b409ffe4f1b11ac3
|
774800e79417122876119246bb5b6e9b2e186891
|
refs/heads/master
| 2021-01-22T23:15:46.934125 | 2017-09-10T21:53:16 | 2017-09-10T21:53:16 | 85,618,298 | 0 | 0 | null | 2017-05-11T11:34:16 | 2017-03-20T19:36:45 |
Python
|
UTF-8
|
Python
| false | false | 911 |
py
|
import csv
from django.conf import settings
from django.core.management.base import BaseCommand, CommandError
from apps.tags.models import Tag
class Command(BaseCommand):
help = 'Migrate tags from csv'
def add_arguments(self, parser):
parser.add_argument('--path', help='/path/to/file.csv')
def handle(self, *args, **kwargs):
self.stdout.write('Start...')
path = kwargs.get('path')
if not path:
raise CommandError('Path is required')
with open(path, 'r', encoding=settings.MIGRATE_FILE_ENCODING) as csvfile:
reader = csv.reader(csvfile)
tags = []
for row in reader:
tags.append(
Tag(
name=row[7], ext_id=row[0]
)
)
Tag.objects.bulk_create(tags, batch_size=100)
self.stdout.write('End...')
|
[
"[email protected]"
] | |
65f5d2a5f15722582ddbc314d4a85e0b2b534645
|
99ea33e3b36d3da52d3817c28fd60696e4d36c91
|
/config/settings.py
|
e7d355059e8274925475bf1ab8ef560a7afa450e
|
[] |
no_license
|
ghostnoop/WhatToWatch-ml-telegram
|
2628c97a62f24ac149f540386d0d14a2091d97d9
|
da7bb1386dab789641d9245544e89cf5d983fb50
|
refs/heads/main
| 2023-03-09T07:15:15.707105 | 2021-02-22T19:00:13 | 2021-02-22T19:00:13 | 341,305,969 | 0 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 190 |
py
|
import tmdbsimple as tmdb
class BotSettings:
API_TOKEN = "1083477387:AAHtlo1ngC61ZFA8rVadPut15CUjX92h79U"
ADMIN_ID = 153621836
tmdb.API_KEY = 'ad11be6ccbdb27f9a1f4530c5848891f'
|
[
"[email protected]"
] | |
ed4170fd87c23a603adf961d9030d73d0b004cf1
|
2b1448085c5ad44e78772dde1dcc2fae9cc4c3cc
|
/botorch/sampling/__init__.py
|
d27b244ea432efe02fe8a14dadd028d62b99e381
|
[
"MIT"
] |
permissive
|
leelasd/botorch
|
47fa0ff9c5f6c534ecfcba59f5b1bf52eea0d62e
|
c48bfc822940ee8a6e5e2604d4ff282033dbe892
|
refs/heads/master
| 2022-12-17T04:42:41.591444 | 2020-09-10T23:45:05 | 2020-09-10T23:46:41 | 294,561,185 | 1 | 0 |
MIT
| 2020-09-11T01:19:36 | 2020-09-11T01:19:35 | null |
UTF-8
|
Python
| false | false | 806 |
py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from botorch.sampling.pairwise_samplers import (
PairwiseIIDNormalSampler,
PairwiseMCSampler,
PairwiseSobolQMCNormalSampler,
)
from botorch.sampling.qmc import MultivariateNormalQMCEngine, NormalQMCEngine
from botorch.sampling.samplers import IIDNormalSampler, MCSampler, SobolQMCNormalSampler
from torch.quasirandom import SobolEngine
__all__ = [
"IIDNormalSampler",
"MCSampler",
"MultivariateNormalQMCEngine",
"NormalQMCEngine",
"SobolEngine",
"SobolQMCNormalSampler",
"PairwiseIIDNormalSampler",
"PairwiseMCSampler",
"PairwiseSobolQMCNormalSampler",
]
|
[
"[email protected]"
] | |
884dd2e27584897fc76bd41c4be519872d0ebcf0
|
07a42b46fe9f154c32c1cfe4e7ef878d5c653ae7
|
/simple_skeleton/urls.py
|
9d3acfd8f816549f4662656cda55eb48a7def3ea
|
[
"MIT"
] |
permissive
|
Mamacitapunto/simple-django-skeleton
|
0d2fe60616a2df7829f1fdf05b57754f464d6e9f
|
0babb4aa6bfcf6b9a803caed3a4167cbf4d9113f
|
refs/heads/master
| 2021-01-19T04:25:03.444748 | 2015-11-30T20:16:02 | 2015-11-30T20:16:02 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 246 |
py
|
from django.conf.urls import include, url
from django.contrib import admin
from simple_skeleton.apps.core import views as core_views
urlpatterns = [
url(r'^$', core_views.home, name='home'),
url(r'^admin/', include(admin.site.urls)),
]
|
[
"[email protected]"
] | |
61c120b8fd81352b68514fa25a7440b9e07c6d13
|
7807d8d9d109a3e272fffed91bf841201da39256
|
/trans_NTL_1_C/aaa119_NTL_1_C_kotonoha.py
|
b982f55f4bfddd7e6698b1ac8f94bef517d3ba62
|
[] |
no_license
|
y-akinobu/AOJ_to_Kotonoha
|
0e8df43393964fcdd5df06c75545091bd6c0c2e2
|
5a694a55a3d85e3fbc4a07b57edc4374556db9a1
|
refs/heads/main
| 2023-02-05T15:33:16.581177 | 2020-12-30T16:14:44 | 2020-12-30T16:14:44 | 325,524,216 | 0 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 613 |
py
|
[#Document [# 'coding: utf-8']][#Document [# '76']]
# fractionsモジュールを用いる
import fractions
# 入力された文字列の整数値をnとする
n = int(input())
# 'map(整数,{{入力された文字列を空白で分割した列}})のリストをaとする
a = list(map(int,input().split()))
# aの最初値をansとする
ans = a[0]
# '1からn未満までの数列の各要素を順にiとして、繰り返す
for i in range(1,n) :
# ansにa(i)を掛けた値をfractions.gcd(ans,a[i])で割った商をansとする
ans = ans*a[i] // fractions.gcd(ans,a[i])
# ansを出力する
print(ans)
|
[
"[email protected]"
] | |
8f8f3812524da3845410fcca49e1304a214732b9
|
33421188df7d7dcf2ee9be0771b0f2fe1ffad4f5
|
/2014/Codemotion/celery/examples/canvas/tasks.py
|
852cca47ec68a4b448671ea1b3a13cf41af94abc
|
[
"CC-BY-4.0"
] |
permissive
|
Gustavo17/ponencias
|
c0482fc7a72d7d4d829a54b94775e77c81ca5d97
|
effb002b0300fe57d26776654b61a2396010da40
|
refs/heads/master
| 2021-01-13T09:18:13.837313 | 2014-11-21T04:58:11 | 2014-11-21T04:58:11 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 713 |
py
|
from celery import Celery, group, chain, chord
import time
app = Celery('tasks', backend='amqp', broker='amqp://guest@localhost//')
@app.task
def fetch_url(url):
return "CONTENT DATA"
@app.task
def lowcase(content):
return content.lower()
@app.task
def split(content):
return content.split()
@app.task
def flat(data):
return [item for sublist in data for item in sublist]
@app.task
def sleeper(data):
time.sleep(1)
return data
@app.task
def join(data):
return "#".join(data)
if __name__ == "__main__":
res = chord([chain(fetch_url.s(url), lowcase.s(), split.s()) for url in ["www.google.com", "www.facebook.com"]], flat.s() | sleeper.s() | join.s())()
print(res.get())
|
[
"[email protected]"
] | |
be8a44a141a5d792643c73427964c8088de152e4
|
448756d7ff6c9cbdf0a8b3b4ff8309207a6bb504
|
/scripts/howtofit/chapter_database/profiles.py
|
7621d81f9dc7090851b2ab1977513de4f4476fbc
|
[] |
no_license
|
jonathanfrawley/autofit_workspace_copy
|
f84c8ed8d8106cbd0735601b54d35104976219cf
|
4631ac452f62cd9c3d5257b4d0b2a64630c51ecf
|
refs/heads/master
| 2023-04-21T04:06:20.140963 | 2021-05-13T16:02:59 | 2021-05-13T16:02:59 | 367,427,102 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 5,552 |
py
|
import numpy as np
"""
In tutorial 5, we perform modeling using multiple profiles, in particular the `Gaussian` profile from the previous
tutorials and an Exponential profile. In analysis.py, we will edit how model-data is generated from profiles such
that it is the sum of all profiles in our model.
In this module, we thus now have two classes following the PyAutoFit model component format. We have renamed the
module from `gaussian.py` to `profiles.py` to reflect this. We have created an abstract base class `Profile` from
which all profiles inherit.
If you are not familiar with Python classes, in particular inheritance and the `super` method below, you may
be unsure what the classes are doing below. I have included comments describing what these command do.
The Profile class is a base class from which all profiles we add (e.g Gaussian, Exponential, additional profiles
added down the line) will inherit. This is useful, as it signifinies which aspects of our model are different ways of
representing the same thing.
"""
class Profile:
def __init__(self, centre=0.0, intensity=0.01):
"""
Represents an Abstract 1D profile.
Parameters
----------
centre : float
The x coordinate of the profile centre.
intensity : float
Overall intensity normalisation of the profile.
Every profile class we add below (e.g. Gaussian, Exponential) will call this __init__ method of the Profile
base class. Given that every profile will have a centre and intensity, this means we can set these parameters
in the Profile class`s init method instead of repeating the two lines of code for every individual profile.
"""
self.centre = centre
self.intensity = intensity
"""
The inclusion of (Profile) in the `Gaussian` below instructs Python that the `Gaussian` class is going to inherit from
the Profile class.
"""
class Gaussian(Profile):
def __init__(
self,
centre=0.0, # <- PyAutoFit recognises these constructor arguments
intensity=0.1, # <- are the Gaussian`s model parameters.
sigma=0.01,
):
"""Represents a 1D `Gaussian` profile, which may be treated as a model-component of PyAutoFit the
parameters of which are fitted for by a `NonLinearSearch`.
Parameters
----------
centre : float
The x coordinate of the profile centre.
intensity : float
Overall intensity normalisation of the `Gaussian` profile.
sigma : float
The sigma value controlling the size of the Gaussian.
Writing (Profile) above does not mean the `Gaussian` class will call the Profile class`s __init__ method. To
achieve this we have the call the `super` method following the format below.
"""
super(Gaussian, self).__init__(centre=centre, intensity=intensity)
"""
This super method calls the __init__ method of the Profile class above, which means we do not need
to write the two lines of code below (which are commented out given they are not necessary).
"""
# self.centre = centre
# self.intensity = intensity
self.sigma = sigma # We still need to set sigma for the Gaussian, of course.
def profile_from_xvalues(self, xvalues):
"""
Calculate the intensity of the profile on a line of Cartesian x coordinates.
The input xvalues are translated to a coordinate system centred on the Gaussian, using its centre.
Parameters
----------
values : np.ndarray
The x coordinates in the original reference frame of the grid.
"""
transformed_xvalues = np.subtract(xvalues, self.centre)
return np.multiply(
np.divide(self.intensity, self.sigma * np.sqrt(2.0 * np.pi)),
np.exp(-0.5 * np.square(np.divide(transformed_xvalues, self.sigma))),
)
class Exponential(Profile):
def __init__(
self,
centre=0.0, # <- PyAutoFit recognises these constructor arguments are the model
intensity=0.1, # <- parameters of the Gaussian.
rate=0.01,
):
"""Represents a 1D Exponential profile, which may be treated as a model-component of PyAutoFit the
parameters of which are fitted for by a `NonLinearSearch`.
Parameters
----------
centre : float
The x coordinate of the profile centre.
intensity : float
Overall intensity normalisation of the `Gaussian` profile.
ratw : float
The decay rate controlling has fast the Exponential declines.
"""
super(Exponential, self).__init__(centre=centre, intensity=intensity)
self.rate = rate
def profile_from_xvalues(self, xvalues):
"""
Calculate the intensity of the profile on a line of Cartesian x coordinates.
The input xvalues are translated to a coordinate system centred on the Exponential, using its centre.
Parameters
----------
values : np.ndarray
The x coordinates in the original reference frame of the grid.
"""
transformed_xvalues = np.subtract(xvalues, self.centre)
return self.intensity * np.multiply(
self.rate, np.exp(-1.0 * self.rate * abs(transformed_xvalues))
)
|
[
"[email protected]"
] | |
6a2a59d480d7535ce9790d74f76a9ff441a76d8a
|
8c5c74f6f0d19111f2873fcf7763ad1529110cb7
|
/Examples/game4.py
|
a514e08d8424e79f30572b4f0255928ac58bc963
|
[] |
no_license
|
lostboy1/cps-108
|
44ba4e4aa6e224f73b8b82ab91c2216bdb821026
|
4264567557ba772f1b5e62ce380cf540af40d5d3
|
refs/heads/master
| 2023-01-19T13:19:10.469778 | 2020-12-01T07:39:29 | 2020-12-01T07:39:29 | 288,768,661 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,161 |
py
|
from tkinter import Tk, Canvas, mainloop
SPEED = 10
root = Tk()
c = Canvas(root, width=500, height=500)
c.pack()
# Put drawing here!
c.create_rectangle(0, 0, 500, 300, fill='blue')
c.create_rectangle(0, 300, 500, 500, fill='yellow')
c.create_rectangle(347, 380, 353, 450, fill='white')
c.create_polygon(350, 360, 400, 400, 300, 400, fill='green')
c.create_oval(80, 320, 140, 380, fill='white')
c.create_oval(85, 320, 135, 380, fill='blue')
c.create_oval(90, 320, 130, 380, fill='red')
c.create_oval(95, 320, 125, 380, fill='white')
c.create_oval(100, 320, 120, 380, fill='blue')
c.create_oval(105, 320, 115, 380, fill='red')
c.create_oval(109, 320, 111, 380, fill='white')
c.create_oval(440, 0, 550, 110, fill='yellow')
c.create_rectangle(0, 0, 505, 50, fill='light grey')
birds = [
c.create_polygon(300, 175, 335, 200, 300, 185, 265, 200, fill='white'),
c.create_polygon(165, 125, 200, 150, 165, 135, 130, 150, fill='white'),
]
def animate():
# Make bird wings flap.
if c.count % 5 == 0:
for bird in birds:
b = c.coords(bird)
yc = (b[1] + b[5]) / 2
for i in 3, 7:
yw = b[i]
if yw > yc:
b[i] = yc - 20
else:
b[i] = yc + 20
c.coords(bird, b)
# Move missiles.
for x, y, shape in missiles:
coords = c.coords(shape)
mx = coords[0]
my = coords[1]
dx = x - mx
if dx > SPEED:
dx = +SPEED
elif dx < -SPEED:
dx = -SPEED
dy = y - my
if dy > SPEED:
dy = +SPEED
elif dy < -SPEED:
dy = -SPEED
c.move(shape, dx, dy)
root.after(42, animate)
c.count = c.count + 1
c.count = 0
missiles = []
animate()
def launch_missile(event):
missile = c.create_polygon(
250,450, 245,455, 245,480, 240,485, 240,495, 245,490, 245,480, 245,490,
255,490, 255,480, 255,490, 260,495, 260,485, 255,480, 255,455,
fill='white', outline='black', width=3,
)
missiles.append([event.x, event.y, missile])
c.bind('<Button-1>', launch_missile)
mainloop()
|
[
"[email protected]"
] | |
798b7d8a6302939a34469359265942e49b7adc81
|
9839b73a6c09ac8a110feb692ef0c001d93f8cbf
|
/examples/advanced/preprocessGapFill.py
|
fddd2531aa700c42c6ca76aff7b6d658df654398
|
[
"MIT"
] |
permissive
|
GeoMTMan/resistics
|
c253caa9a70295a462756625261f93349475908f
|
942afe45456f63657267020749d723f7eee89934
|
refs/heads/master
| 2020-08-01T10:25:12.535890 | 2019-09-25T23:36:39 | 2019-09-25T23:36:39 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,255 |
py
|
from pathlib import Path
from resistics.project.projectIO import loadProject
projectPath = Path("preprocessProject")
proj = loadProject(projectPath)
proj.printInfo()
from resistics.utilities.utilsPlotter import plotOptionsTime, getPresentationFonts
plotOptions = plotOptionsTime(plotfonts=getPresentationFonts())
from resistics.ioHandlers.dataReaderATS import DataReaderATS
site1 = proj.getSiteData("site1")
readerATS = DataReaderATS(site1.getMeasurementTimePath("meas_2012-02-10_11-05-00"))
# headers of recording
headers = readerATS.getHeaders()
chanHeaders, chanMap = readerATS.getChanHeaders()
# separate out two datasets
timeOriginal1 = readerATS.getPhysicalData(
"2012-02-10 11:05:00", "2012-02-10 11:09:00", remaverage=False
)
timeOriginal2 = readerATS.getPhysicalData(
"2012-02-10 11:10:00", "2012-02-10 11:14:00", remaverage=False
)
from resistics.ioHandlers.dataWriterInternal import DataWriterInternal
# create a new site
proj.createSite("site1_gaps")
proj.refresh()
writer = DataWriterInternal()
writer.setOutPath(
Path(proj.timePath, "site1_gaps", "meas_2012-02-10_11-05-00_section1")
)
writer.writeData(headers, chanHeaders, timeOriginal1, physical=True)
writer.setOutPath(
Path(proj.timePath, "site1_gaps", "meas_2012-02-10_11-05-00_section2")
)
writer.writeData(headers, chanHeaders, timeOriginal2, physical=True)
from resistics.project.projectTime import viewTime
# now view time
fig = viewTime(
proj,
"2012-02-10 11:05:00",
"2012-02-10 11:14:00",
sites=["site1", "site1_gaps"],
filter={"lpfilt": 16},
chans=["Ex", "Hy"],
show=False,
plotoptions=plotOptions,
)
fig.savefig(Path(proj.imagePath, "viewTimeGaps.png"))
from resistics.ioHandlers.dataReaderInternal import DataReaderInternal
siteGaps = proj.getSiteData("site1_gaps")
readerSection1 = DataReaderInternal(
siteGaps.getMeasurementTimePath("meas_2012-02-10_11-05-00_section1")
)
timeData1 = readerSection1.getPhysicalSamples(remaverage=False)
timeData1.printInfo()
readerSection2 = DataReaderInternal(
siteGaps.getMeasurementTimePath("meas_2012-02-10_11-05-00_section2")
)
timeData2 = readerSection2.getPhysicalSamples(remaverage=False)
timeData2.printInfo()
from resistics.utilities.utilsInterp import fillGap
timeDataFilled = fillGap(timeData1, timeData2)
timeDataFilled.printInfo()
samplesToView = 14 * 60 * 4096
fig = timeDataFilled.view(sampleStop=samplesToView, chans=["Ex", "Hy"])
fig.savefig(Path(proj.imagePath, "timeDataFilled.png"))
# create a new site to write out to
proj.createSite("site1_filled")
proj.refresh()
# use channel headers from one of the datasets, stop date will be automatically amended
writer = DataWriterInternal()
writer.setOutPath(
Path(proj.timePath, "site1_filled", "meas_2012-02-10_11-05-00_filled")
)
headers = readerSection1.getHeaders()
chanHeaders, chanMap = readerSection1.getChanHeaders()
writer.writeData(headers, chanHeaders, timeDataFilled, physical=True)
proj.refresh()
# now view time
fig = viewTime(
proj,
"2012-02-10 11:05:00",
"2012-02-10 11:14:00",
sites=["site1", "site1_filled"],
filter={"lpfilt": 16},
chans=["Ex", "Hy"],
show=False,
plotoptions=plotOptions,
)
fig.savefig(Path(proj.imagePath, "viewTimeGapsFilled.png"))
|
[
"[email protected]"
] | |
55a42b6ae85147bc6d3fec90d6653672efb28b4e
|
3ae38471ca4ff70e30d8eeb0508b9b0aab5e19a2
|
/web/models.py
|
c7e701ae4777b6eeb1dcdcbf6d7c130c9268d9b0
|
[] |
no_license
|
Hamidnet220/Chapar
|
5596d6b703aa4c01c1010f0067cde5a57c33e336
|
3654601f34f0c58a17813851448889ccbf2c1c90
|
refs/heads/master
| 2020-05-03T04:22:51.140098 | 2019-04-02T05:15:50 | 2019-04-02T05:15:50 | 178,419,739 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,502 |
py
|
from django.db import models
from django.core.exceptions import ValidationError
from django.contrib.auth.admin import User
# Create your models here.
def empty_validate_event(value):
if len(value)<=2:
raise ValidationError(("You can not leave this field empty!"),params={'value':value},)
class Organization(models.Model):
add_by_usr = models.ForeignKey(User,on_delete=models.CASCADE,related_name='add_by_usr_org')
title = models.CharField(max_length = 150,validators=[empty_validate_event])
tel = models.CharField(max_length = 19,blank=True)
fax = models.CharField(max_length = 19,blank=True)
is_deleted = models.BooleanField(default=False)
del_by_usr = models.ForeignKey(User,blank=True,null=True,on_delete=models.SET_NULL,related_name='del_by_usr_org')
def __str__(self):
return self.title
class Recive(models.Model):
add_by_usr = models.ForeignKey(User,on_delete=models.CASCADE,related_name='add_by_usr')
organization = models.ForeignKey(Organization,on_delete=models.SET_NULL,null=True)
title = models.CharField(max_length = 150,validators=[empty_validate_event])
summery = models.TextField(blank=True)
recive_date = models.DateTimeField()
recive_number= models.CharField(max_length = 150)
description = models.TextField(blank=True,null=True)
recive_file = models.FileField(upload_to='recives')
is_deleted = models.BooleanField(default=False)
del_by_usr = models.ForeignKey(User,blank=True,null=True,on_delete=models.SET_NULL,related_name='del_by_usr')
def __str__(self):
return "{}-{}".format(self.title,self.recive_date)
class Send(models.Model):
add_by_usr = models.ForeignKey(User,on_delete=models.CASCADE,related_name='add_by_usr_send')
organization = models.ForeignKey(Organization,on_delete=models.SET_NULL,null=True)
title = models.CharField(max_length = 150,validators=[empty_validate_event])
summery = models.TextField(blank=True)
send_date = models.DateTimeField()
send_number = models.CharField(max_length = 150)
description = models.TextField(blank=True,null=True)
send_file = models.FileField(upload_to='sends')
is_deleted = models.BooleanField(default=False)
del_by_usr = models.ForeignKey(User,blank=True,null=True,on_delete=models.SET_NULL,related_name='del_by_usr_send')
def __str__(self):
return "{}-{}".format(self.title,self.send_date)
|
[
"[email protected]"
] | |
9a18da4e47bb28850eb94b19cf46de0c7858bff1
|
008c065391d766fec2f2af252dd8a5e9bf5cb815
|
/Compress the Lis.py
|
177600548b994ba7e7b61511e9f4aed3f2340214
|
[] |
no_license
|
22Rahul22/Codechef
|
b261ab43ff5ff64648a75ad1195e33cac2cfec52
|
1f645c779a250a71d75598e1eabad7e52dd6b031
|
refs/heads/master
| 2022-11-29T21:51:09.578798 | 2020-08-19T06:20:23 | 2020-08-19T06:20:23 | 288,650,009 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 625 |
py
|
t = int(input())
for _ in range(t):
n = int(input())
arr = list(map(int, input().split()))
c = 0
a = []
k = 0
s = ""
j = 0
arr.append(0)
for i in range(n):
if arr[i] + 1 == arr[i + 1]:
c += 1
else:
if c >= 2:
a.append(i - c)
a.append(i)
s += str(arr[a[j]]) + "..." + str(arr[a[j+1]])+","
j += 2
elif c == 1:
s += str(arr[i-1]) + "," + str(arr[i])+","
else:
s += str(arr[i])+","
c = 0
s = s[:len(s)-1]
print(s)
|
[
"[email protected]"
] | |
2af7d3345e7878e5745045fa9d0cc15efba802d3
|
8e7e9aaf06fed4e5be52f61462a40539c55d0f76
|
/Chapter03/wifi_lookup.py
|
191b4060e6064134d820b48c18240f5a011382c0
|
[] |
no_license
|
CodedQuen/Python-Digital-Forensics-Cookbookk
|
2809ed1680958250a139c22f8a33b5512b608d98
|
f69e56d4f2f88e71a74dc538c7b3a934ee014369
|
refs/heads/master
| 2022-06-06T10:10:36.288517 | 2020-05-04T03:06:39 | 2020-05-04T03:06:39 | 261,067,063 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 7,612 |
py
|
from __future__ import print_function
import argparse
import csv
import os
import sys
import xml.etree.ElementTree as ET
import requests
"""
MIT License
Copyright (c) 2017 Chapin Bryce, Preston Miller
Please share comments and questions at:
https://github.com/PythonForensics/PythonForensicsCookbook
or email [email protected]
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
__authors__ = ["Chapin Bryce", "Preston Miller"]
__date__ = 20170815
__description__ = "Wifi MAC Address lookup utility"
def main(in_file, out_csv, type, api_key):
if type == 'xml':
wifi = parse_xml(in_file)
else:
wifi = parse_txt(in_file)
query_wigle(wifi, out_csv, api_key)
def parse_xml(xml_file):
wifi = {}
xmlns = "{http://pa.cellebrite.com/report/2.0}"
print("[+] Opening {} report".format(xml_file))
xml_tree = ET.parse(xml_file)
print("[+] Parsing report for all connected WiFi addresses")
root = xml_tree.getroot()
for child in root.iter():
if child.tag == xmlns + "model":
if child.get("type") == "Location":
for field in child.findall(xmlns + "field"):
if field.get("name") == "TimeStamp":
ts_value = field.find(xmlns + "value")
try:
ts = ts_value.text
except AttributeError:
continue
if field.get("name") == "Description":
value = field.find(xmlns + "value")
try:
value_text = value.text
except AttributeError:
continue
if "SSID" in value.text:
bssid, ssid = value.text.split("\t")
bssid = bssid[7:]
ssid = ssid[6:]
if bssid in wifi.keys():
wifi[bssid]["Timestamps"].append(ts)
wifi[bssid]["SSID"].append(ssid)
else:
wifi[bssid] = {
"Timestamps": [ts], "SSID": [ssid],
"Wigle": {}}
return wifi
def parse_txt(txt_file):
wifi = {}
print("[+] Extracting MAC addresses from {}".format(txt_file))
with open(txt_file) as mac_file:
for line in mac_file:
wifi[line.strip()] = {"Timestamps": ["N/A"], "SSID": ["N/A"],
"Wigle": {}}
return wifi
def query_mac_addr(mac_addr, api_key):
query_url = "https://api.wigle.net/api/v2/network/search?" \
"onlymine=false&freenet=false&paynet=false" \
"&netid={}".format(mac_addr)
req = requests.get(query_url, auth=(api_key[0], api_key[1]))
return req.json()
def query_wigle(wifi_dictionary, out_csv, api_key):
print("[+] Querying Wigle.net through Python API for {} "
"APs".format(len(wifi_dictionary)))
for mac in wifi_dictionary:
wigle_results = query_mac_addr(mac, api_key)
try:
if wigle_results["resultCount"] == 0:
wifi_dictionary[mac]["Wigle"]["results"] = []
continue
else:
wifi_dictionary[mac]["Wigle"] = wigle_results
except KeyError:
if wigle_results["error"] == "too many queries today":
print("[-] Wigle daily query limit exceeded")
wifi_dictionary[mac]["Wigle"]["results"] = []
continue
else:
print("[-] Other error encountered for "
"address {}: {}".format(mac, wigle_results['error']))
wifi_dictionary[mac]["Wigle"]["results"] = []
continue
prep_output(out_csv, wifi_dictionary)
def prep_output(output, data):
csv_data = {}
google_map = "https://www.google.com/maps/search/"
for x, mac in enumerate(data):
for y, ts in enumerate(data[mac]["Timestamps"]):
for z, result in enumerate(data[mac]["Wigle"]["results"]):
shortres = data[mac]["Wigle"]["results"][z]
g_map_url = "{}{},{}".format(
google_map, shortres["trilat"], shortres["trilong"])
csv_data["{}-{}-{}".format(x, y, z)] = {
**{
"BSSID": mac, "SSID": data[mac]["SSID"][y],
"Cellebrite Connection Time": ts,
"Google Map URL": g_map_url},
**shortres
}
write_csv(output, csv_data)
def write_csv(output, data):
print("[+] Writing data to {}".format(output))
field_list = set()
for row in data:
for field in data[row]:
field_list.add(field)
with open(output, "w", newline="") as csvfile:
csv_writer = csv.DictWriter(csvfile, fieldnames=sorted(
field_list), extrasaction='ignore')
csv_writer.writeheader()
for csv_row in data:
csv_writer.writerow(data[csv_row])
if __name__ == "__main__":
# Command-line Argument Parser
parser = argparse.ArgumentParser(
description=__description__,
epilog="Developed by {} on {}".format(
", ".join(__authors__), __date__),
formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument("INPUT_FILE", help="INPUT FILE with MAC Addresses")
parser.add_argument("OUTPUT_CSV", help="Output CSV File")
parser.add_argument(
"-t", help="Input type: Cellebrite XML report or TXT file",
choices=('xml', 'txt'), default="xml")
parser.add_argument('--api', help="Path to API key file",
default=os.path.expanduser("~/.wigle_api"),
type=argparse.FileType('r'))
args = parser.parse_args()
if not os.path.exists(args.INPUT_FILE) or \
not os.path.isfile(args.INPUT_FILE):
print("[-] {} does not exist or is not a file".format(
args.INPUT_FILE))
sys.exit(1)
directory = os.path.dirname(args.OUTPUT_CSV)
if directory != '' and not os.path.exists(directory):
os.makedirs(directory)
api_key = args.api.readline().strip().split(":")
main(args.INPUT_FILE, args.OUTPUT_CSV, args.t, api_key)
|
[
"[email protected]"
] | |
be166e9147de70c62d3f58a25394cfa3dbf8ba87
|
6469e5689c5888481a5eb1c2d37e057c42e2afc3
|
/biolink/api/link/endpoints/associations_from.py
|
068338308ebe6c4ba78cebb9d5f045be2ad9c4f7
|
[] |
no_license
|
othreecodes/biolink-api
|
66e2b171da0f112f124c5adc8bf2a3a23d78ce1a
|
37761acba24ff5045fb65b16b009fdaa0fafa03e
|
refs/heads/master
| 2021-05-09T00:34:32.199397 | 2018-01-31T22:10:08 | 2018-01-31T22:10:08 | 119,743,095 | 2 | 0 | null | 2018-01-31T21:03:53 | 2018-01-31T21:03:52 | null |
UTF-8
|
Python
| false | false | 3,655 |
py
|
import logging
from flask import request
from flask_restplus import Resource
from biolink.datamodel.serializers import association, association_results
from biolink.api.restplus import api
from ontobio.golr.golr_associations import get_association, search_associations
import pysolr
log = logging.getLogger(__name__)
ns = api.namespace('association', description='Retrieve associations between entities')
parser = api.parser()
parser.add_argument('subject_taxon', help='SUBJECT TAXON id, e.g. NCBITaxon:9606. Includes inferences by default (higher level taxa can be used)')
parser.add_argument('evidence', help="""Object id, e.g. ECO:0000501 (for IEA; Includes inferred by default)
or a specific publication or other supporting ibject, e.g. ZFIN:ZDB-PUB-060503-2.
""")
parser.add_argument('graphize', type=bool, help='If set, includes graph object in response')
parser.add_argument('fl_excludes_evidence', type=bool, help='If set, excludes evidence objects in response')
parser.add_argument('page', type=int, required=False, default=1, help='Return results starting with this row number')
parser.add_argument('rows', type=int, required=False, default=10, help='limit on number of rows')
parser.add_argument('map_identifiers', help='Prefix to map all IDs to. E.g. NCBIGene, HP, OMIM, DOID')
parser.add_argument('subject_category', help='e.g. gene, genotype, disease, function (todo: use enum)')
parser.add_argument('object_category', help='e.g. disease, phenotype, gene')
parser.add_argument('slim', action='append', help='Map objects up (slim) to a higher level category. Value can be ontology class ID or subset ID')
parser.add_argument('use_compact_associations', type=bool, help='If true, returns results in compact associations format')
@ns.route('/from/<subject>')
@api.doc(params={'subject': 'Return associations emanating from this node, e.g. specifying NCBIGene:84570 will return gene-phenotype, gene-function etc for this gene'})
class AssociationsFrom(Resource):
@api.expect(parser)
@api.marshal_list_with(association_results)
def get(self, subject):
"""
Returns list of matching associations starting from a given subject (source)
"""
args = parser.parse_args()
return search_associations(subject=subject, **args)
@ns.route('/to/<object>')
@api.doc(params={'object': 'Return associations pointing to this node. E.g. specifying MP:0013765 will return all genes, variants, strains etc annotated with this term. Can also be a biological entity such as a gene'})
class AssociationsTo(Resource):
@api.expect(parser)
@api.marshal_list_with(association_results)
def get(self, object):
"""
Returns list of matching associations pointing to a given object (target)
"""
args = parser.parse_args()
return search_associations(object=object, **args)
@ns.route('/between/<subject>/<object>')
@api.doc(params={'subject': 'E.g. e.g. MGI:1342287'})
@api.doc(params={'object': 'E.g. e.g. MP:0013765, can also be a biological entity such as a gene'})
class AssociationsBetween(Resource):
@api.expect(parser)
@api.marshal_list_with(association_results)
def get(self, subject, object):
"""
Returns associations connecting two entities
Given two entities (e.g. a particular gene and a particular disease), if these two entities
are connected (directly or indirectly), then return the association objects describing
the connection.
"""
args = parser.parse_args()
return search_associations(object=object, **args)
|
[
"[email protected]"
] | |
d6e53802abe18fd58b79466f4e5b581f8311cc8f
|
b864b992187e2e1c5c8da6fdabeeab5040058fe9
|
/Python Example/python 100 examples/064.py
|
5d0b0242ccab80007fa668c36eec6b037ee30270
|
[] |
no_license
|
Mr-Phoebe/ProgramLanguage
|
5384afeef20c8a12cd89cf3720beb0337bd38fc9
|
1588aea62e15304339efb73d55653be1b4e57156
|
refs/heads/master
| 2023-02-06T11:59:06.272680 | 2023-02-06T04:00:14 | 2023-02-06T04:00:14 | 65,252,634 | 52 | 37 | null | null | null | null |
GB18030
|
Python
| false | false | 630 |
py
|
# -*- coding: UTF-8 -*-
'''
题目:利用ellipse and rectangle 画图。
1.程序分析:
2.程序源代码:
'''
if __name__ == '__main__':
from Tkinter import *
canvas = Canvas(width = 400,height = 600,bg = 'white')
left = 20
right = 50
top = 50
num = 15
for i in range(num):
canvas.create_oval(250 - right,250 - left,250 + right,250 + left)
canvas.create_oval(250 - 20,250 - top,250 + 20,250 + top)
canvas.create_rectangle(20 - 2 * i,20 - 2 * i,10 * (i + 2),10 * ( i + 2))
right += 5
left += 5
top += 10
canvas.pack()
mainloop()
|
[
"[email protected]"
] | |
84d1b66a1d65710dbf72630462b771d0caabbd2d
|
5a1e5603a42ff27e648fad307d60957cb95f0185
|
/dask/dataframe/tests/test_csv.py
|
5e6550be7b23c583897a4f98ca267e45c276d05a
|
[
"LicenseRef-scancode-unknown-license-reference",
"BSD-3-Clause"
] |
permissive
|
jseabold/dask
|
1937931e7951f776b253432f6b5beedee90892a0
|
f6332dec1ead4034540bc2c3c1010a9783099752
|
refs/heads/master
| 2021-01-23T04:23:10.852740 | 2016-04-29T00:14:34 | 2016-04-29T00:14:34 | 57,442,996 | 0 | 0 | null | 2016-04-30T13:29:31 | 2016-04-30T13:29:30 |
Python
|
UTF-8
|
Python
| false | false | 6,358 |
py
|
from __future__ import print_function, division, absolute_import
from io import BytesIO
import pytest
pd = pytest.importorskip('pandas')
dd = pytest.importorskip('dask.dataframe')
from toolz import partition_all, valmap, partial
from dask import compute
from dask.async import get_sync
from dask.dataframe.csv import read_csv_from_bytes, bytes_read_csv, read_csv
from dask.dataframe.utils import eq
from dask.utils import filetexts, filetext
compute = partial(compute, get=get_sync)
files = {'2014-01-01.csv': (b'name,amount,id\n'
b'Alice,100,1\n'
b'Bob,200,2\n'
b'Charlie,300,3\n'),
'2014-01-02.csv': (b'name,amount,id\n'),
'2014-01-03.csv': (b'name,amount,id\n'
b'Dennis,400,4\n'
b'Edith,500,5\n'
b'Frank,600,6\n')}
header = files['2014-01-01.csv'].split(b'\n')[0] + b'\n'
expected = pd.concat([pd.read_csv(BytesIO(files[k])) for k in sorted(files)])
def test_bytes_read_csv():
b = files['2014-01-01.csv']
df = bytes_read_csv(b, b'', {})
assert list(df.columns) == ['name', 'amount', 'id']
assert len(df) == 3
assert df.id.sum() == 1 + 2 + 3
def test_bytes_read_csv_kwargs():
b = files['2014-01-01.csv']
df = bytes_read_csv(b, b'', {'usecols': ['name', 'id']})
assert list(df.columns) == ['name', 'id']
def test_bytes_read_csv_dtype_coercion():
b = files['2014-01-01.csv']
df = bytes_read_csv(b, b'', {}, {'amount': 'float'})
assert df.amount.dtype == 'float'
def test_bytes_read_csv_with_header():
b = files['2014-01-01.csv']
header, b = b.split(b'\n', 1)
header = header + b'\n'
df = bytes_read_csv(b, header, {})
assert list(df.columns) == ['name', 'amount', 'id']
assert len(df) == 3
assert df.id.sum() == 1 + 2 + 3
def test_read_csv_simple():
blocks = [[files[k]] for k in sorted(files)]
kwargs = {}
head = bytes_read_csv(files['2014-01-01.csv'], b'', {})
df = read_csv_from_bytes(blocks, header, head, kwargs, collection=True)
assert isinstance(df, dd.DataFrame)
assert list(df.columns) == ['name', 'amount', 'id']
values = read_csv_from_bytes(blocks, header, head, kwargs,
collection=False)
assert isinstance(values, list)
assert len(values) == 3
assert all(hasattr(item, 'dask') for item in values)
result = df.amount.sum().compute(get=get_sync)
assert result == (100 + 200 + 300 + 400 + 500 + 600)
def test_kwargs():
blocks = [files[k] for k in sorted(files)]
blocks = [[b] for b in blocks]
kwargs = {'usecols': ['name', 'id']}
head = bytes_read_csv(files['2014-01-01.csv'], b'', kwargs)
df = read_csv_from_bytes(blocks, header, head, kwargs, collection=True)
assert list(df.columns) == ['name', 'id']
result = df.compute()
assert (result.columns == df.columns).all()
def test_blocked():
blocks = []
for k in sorted(files):
b = files[k]
lines = b.split(b'\n')
blocks.append([b'\n'.join(bs) for bs in partition_all(2, lines)])
df = read_csv_from_bytes(blocks, header, expected.head(), {})
eq(df.compute().reset_index(drop=True),
expected.reset_index(drop=True), check_dtype=False)
expected2 = expected[['name', 'id']]
df = read_csv_from_bytes(blocks, header, expected2.head(),
{'usecols': ['name', 'id']})
eq(df.compute().reset_index(drop=True),
expected2.reset_index(drop=True), check_dtype=False)
def test_enforce_dtypes():
blocks = [[b'aa,bb\n1,1.0\n2.2.0', b'10,20\n30,40'],
[b'aa,bb\n1,1.0\n2.2.0', b'10,20\n30,40']]
head = pd.read_csv(BytesIO(blocks[0][0]), header=0)
dfs = read_csv_from_bytes(blocks, b'aa,bb\n', head, {},
enforce_dtypes=True, collection=False)
dfs = compute(*dfs)
assert all(df.dtypes.to_dict() == head.dtypes.to_dict() for df in dfs)
def test_read_csv_files():
with filetexts(files, mode='b'):
df = read_csv('2014-01-*.csv')
eq(df, expected, check_dtype=False)
fn = '2014-01-01.csv'
df = read_csv(fn)
expected2 = pd.read_csv(BytesIO(files[fn]))
eq(df, expected2, check_dtype=False)
from dask.bytes.compression import compress, files as cfiles, seekable_files
fmt_bs = [(fmt, None) for fmt in cfiles] + [(fmt, 10) for fmt in seekable_files]
@pytest.mark.parametrize('fmt,blocksize', fmt_bs)
def test_read_csv_compression(fmt, blocksize):
files2 = valmap(compress[fmt], files)
with filetexts(files2, mode='b'):
df = read_csv('2014-01-*.csv', compression=fmt, blocksize=blocksize)
eq(df.compute(get=get_sync).reset_index(drop=True),
expected.reset_index(drop=True), check_dtype=False)
def test_warn_non_seekable_files(capsys):
files2 = valmap(compress['gzip'], files)
with filetexts(files2, mode='b'):
df = read_csv('2014-01-*.csv', compression='gzip')
assert df.npartitions == 3
out, err = capsys.readouterr()
assert 'gzip' in err
assert 'blocksize=None' in err
df = read_csv('2014-01-*.csv', compression='gzip', blocksize=None)
out, err = capsys.readouterr()
assert not err and not out
with pytest.raises(NotImplementedError):
df = read_csv('2014-01-*.csv', compression='foo')
def test_windows_line_terminator():
text = 'a,b\r\n1,2\r\n2,3\r\n3,4\r\n4,5\r\n5,6\r\n6,7'
with filetext(text) as fn:
df = read_csv(fn, blocksize=5, lineterminator='\r\n')
assert df.b.sum().compute() == 2 + 3 + 4 + 5 + 6 + 7
assert df.a.sum().compute() == 1 + 2 + 3 + 4 + 5 + 6
def test_late_dtypes():
text = 'a,b\n1,2\n2,3\n3,4\n4,5\n5.5,6\n6,7.5'
with filetext(text) as fn:
df = read_csv(fn, blocksize=5, sample=10)
try:
df.b.sum().compute()
assert False
except TypeError as e:
assert ("'b': float" in str(e) or
"'a': float" in str(e))
df = read_csv(fn, blocksize=5, sample=10,
dtype={'a': float, 'b': float})
assert df.a.sum().compute() == 1 + 2 + 3 + 4 + 5.5 + 6
assert df.b.sum().compute() == 2 + 3 + 4 + 5 + 6 + 7.5
|
[
"[email protected]"
] | |
8c69b04818eb1c529b6ad11ac1a9de153b213ba5
|
68747ba592c252c952823ff4973c9508b7c8c5e9
|
/Ensemble/BeamVelocity.py
|
dfcd6b48965024dc062ad756bb54688fbce1a739
|
[] |
no_license
|
ricorx7/rti_python-1
|
50ce01e7acf60ad6d57c26cfe5d79ecd1fc84563
|
384edef9c14ae5296d7e123eec473b29905a8a58
|
refs/heads/master
| 2023-02-01T04:33:48.585793 | 2020-12-16T23:25:22 | 2020-12-16T23:25:22 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 4,649 |
py
|
from rti_python.Ensemble.Ensemble import Ensemble
import logging
class BeamVelocity:
"""
Beam Velocity DataSet.
[Bin x Beam] data.
"""
def __init__(self, num_elements, element_multiplier):
"""
Beam Velocity data.
:param num_elements: Number of bins
:param element_multiplier: Number of beams.
"""
self.ds_type = 10
self.num_elements = num_elements
self.element_multiplier = element_multiplier
self.image = 0
self.name_len = 8
self.Name = "E000001\0"
self.Velocities = []
# Create enough entries for all the (bins x beams)
# Initialize with bad values
for bins in range(num_elements):
bins = []
for beams in range(element_multiplier):
bins.append([Ensemble().BadVelocity])
self.Velocities.append(bins)
def decode(self, data):
"""
Take the data bytearray. Decode the data to populate
the velocities.
:param data: Bytearray for the dataset.
"""
packet_pointer = Ensemble.GetBaseDataSize(self.name_len)
for beam in range(self.element_multiplier):
for bin_num in range(self.num_elements):
self.Velocities[bin_num][beam] = Ensemble.GetFloat(packet_pointer, Ensemble().BytesInFloat, data)
packet_pointer += Ensemble().BytesInFloat
logging.debug(self.Velocities)
def encode(self):
"""
Encode the data into RTB format.
:return:
"""
result = []
# Generate header
result += Ensemble.generate_header(self.ds_type,
self.num_elements,
self.element_multiplier,
self.image,
self.name_len,
self.Name)
# Add the data
for beam in range(self.element_multiplier):
for bin_num in range(self.num_elements):
val = self.Velocities[bin_num][beam]
result += Ensemble.float_to_bytes(val)
return result
def encode_csv(self, dt, ss_code, ss_config, blank, bin_size):
"""
Encode into CSV format.
:param dt: Datetime object.
:param ss_code: Subsystem code.
:param ss_config: Subsystem Configuration
:param blank: Blank or first bin position in meters.
:param bin_size: Bin size in meters.
:return: List of CSV lines.
"""
str_result = []
for beam in range(self.element_multiplier):
for bin_num in range(self.num_elements):
# Get the value
val = self.Velocities[bin_num][beam]
# Create the CSV string
str_result.append(Ensemble.gen_csv_line(dt, Ensemble.CSV_BEAM_VEL, ss_code, ss_config, bin_num, beam, blank, bin_size, val))
return str_result
def pd0_mm_per_sec(self, pd0_beam_num: int):
"""
Convert the Beam Velocity from m/s to mm/s and as an integer.
Also remap the Beam numbers to match PD0 beams.
RTB and PD0 do not share the same Beam Order
RTB BEAM 0,1,2,3 = PD0 BEAM 3,2,0,1
:param pd0_beam_num: PD0 Beam number.
:type pd0_beam_num: Integer
:return: A list of all the velocities for the given PD0 beam, converted to mm/s for the beam. The beam will be based on reordering for PD0
:rtype: List or None if beam number is not correct.
"""
# Remap the beam number
# beam order 3,2,0,1
rti_beam_num = 0
if self.element_multiplier == 1: # Vertical beam
rti_beam_num = 0
elif pd0_beam_num == 0:
rti_beam_num = 2
elif pd0_beam_num == 1:
rti_beam_num = 3
elif pd0_beam_num == 2:
rti_beam_num = 1
elif pd0_beam_num == 3:
rti_beam_num = 0
# Replace the RTB BAD_Velocity (88.888) to PD0 BAD_VELOCITY (-32768)
pd0_vel_data = []
for bin_idx in range(self.num_elements):
if Ensemble.is_bad_velocity(self.Velocities[bin_idx][rti_beam_num]):
pd0_vel_data.append(-32768)
else:
pd0_vel_data.append(round(self.Velocities[bin_idx][rti_beam_num] * 1000.0)) # Convert to mm/s and integer
return pd0_vel_data
|
[
"[email protected]"
] | |
b35b6265d2f87725bbf04c7a1a015b18573508d8
|
db7b618e7d9f7c2af001678e7bc7dd703cb86e61
|
/TelegramBot/settings.py
|
5bcca8b1e223f39bd5a3ac86c3f826827b3c5348
|
[] |
no_license
|
abdullakn/impress.ai
|
6f24403b70130d8b6440ceab22931cd1cdcb9aeb
|
c9033970d33304a306fd6dd5e8cc9c1e39ddf1d8
|
refs/heads/master
| 2023-08-10T18:07:19.462283 | 2021-09-29T17:27:52 | 2021-09-29T17:27:52 | 411,642,025 | 0 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,388 |
py
|
"""
Django settings for TelegramBot project.
Generated by 'django-admin startproject' using Django 3.1.7.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.1/ref/settings/
"""
import os
from pathlib import Path
from decouple import config
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '$iep-wj!47lovz7ui4i27t7e5c8d9o$pnmw#@l27sb_t-c5pig'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
TOKEN = config('TOKEN')
TELEGRAM = {
'bot_token': TOKEN,
'channel_name': 'Impress',
}
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'telegramApp',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'TelegramBot.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [BASE_DIR / 'templates'],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'TelegramBot.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql',
'NAME':'impressDB',
'USER':'postgres',
'PASSWORD':'Abdulla@123',
'HOST':'localhost'
}
}
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = [os.path.join(BASE_DIR, 'static'),]
|
[
"[email protected]"
] | |
e4542b8b5cbb4bef96ff785702f56111f3fe58f4
|
768058e7f347231e06a28879922690c0b6870ed4
|
/venv/lib/python3.7/site-packages/pip-10.0.1-py3.7.egg/pip/_internal/commands/help.py
|
386278cf5f1461bd429c082c29df90b3b868a0cf
|
[] |
no_license
|
jciech/HeisenbergSpinChains
|
58b4238281d8c158b11c6c22dd0da82025fd7284
|
e43942bbd09f6675e7e2ff277f8930dc0518d08e
|
refs/heads/master
| 2022-12-18T08:04:08.052966 | 2020-09-29T12:55:00 | 2020-09-29T12:55:00 | 258,476,448 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,044 |
py
|
from __future__ import absolute_import
from pip._internal.basecommand import SUCCESS, Command
from pip._internal.exceptions import CommandError
class HelpCommand(Command):
"""Show help for commands"""
name = "help"
usage = """
%prog <command>"""
summary = "Show help for commands."
ignore_require_venv = True
def run(self, options, args):
from pip._internal.commands import commands_dict, get_similar_commands
try:
# 'pip help' with no args is handled by pip.__init__.parseopt()
cmd_name = args[0] # the command we need help for
except IndexError:
return SUCCESS
if cmd_name not in commands_dict:
guess = get_similar_commands(cmd_name)
msg = ['unknown command "%s"' % cmd_name]
if guess:
msg.append('maybe you meant "%s"' % guess)
raise CommandError(" - ".join(msg))
command = commands_dict[cmd_name]()
command.parser.print_help()
return SUCCESS
|
[
"[email protected]"
] | |
6383c420b4d765598ded8fa8b7e09a41780ee859
|
5761eca23af5ad071a9b15e2052958f2c9de60c0
|
/generated-stubs/allauth/socialaccount/providers/weixin/views.pyi
|
ab4087168efbf7f077d1dc53cf0dcb35eb434d7a
|
[] |
no_license
|
d-kimuson/drf-iframe-token-example
|
3ed68aa4463531f0bc416fa66d22ee2aaf72b199
|
dd4a1ce8e38de9e2bf90455e3d0842a6760ce05b
|
refs/heads/master
| 2023-03-16T13:52:45.596818 | 2021-03-09T22:09:49 | 2021-03-09T22:09:49 | 346,156,450 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 973 |
pyi
|
from .client import WeixinOAuth2Client as WeixinOAuth2Client
from .provider import WeixinProvider as WeixinProvider
from allauth.account import app_settings as app_settings
from allauth.socialaccount.providers.oauth2.views import OAuth2Adapter as OAuth2Adapter, OAuth2CallbackView as OAuth2CallbackView, OAuth2LoginView as OAuth2LoginView
from allauth.utils import build_absolute_uri as build_absolute_uri
from typing import Any
class WeixinOAuth2Adapter(OAuth2Adapter):
provider_id: Any = ...
access_token_url: str = ...
profile_url: str = ...
@property
def authorize_url(self): ...
def complete_login(self, request: Any, app: Any, token: Any, **kwargs: Any): ...
class WeixinOAuth2ClientMixin:
def get_client(self, request: Any, app: Any): ...
class WeixinOAuth2LoginView(WeixinOAuth2ClientMixin, OAuth2LoginView): ...
class WeixinOAuth2CallbackView(WeixinOAuth2ClientMixin, OAuth2CallbackView): ...
oauth2_login: Any
oauth2_callback: Any
|
[
"[email protected]"
] | |
5ac9b4d7308eaba4eff0b9657389f4c3652b5b94
|
ebdeaa70f6e30abab03a1589bcdd56d1339151ef
|
/day14Python对象3/02-添加子类属性.py
|
e7ac08d531ca166a266198f0171a8931da24f600
|
[] |
no_license
|
gilgameshzzz/learn
|
490d8eb408d064473fdbfa3f1f854c2f163a7ef6
|
d476af77a6163ef4f273087582cbecd7f2ec15e6
|
refs/heads/master
| 2020-03-31T11:32:42.909453 | 2018-11-22T03:34:45 | 2018-11-22T03:34:45 | 152,181,143 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,547 |
py
|
# Filename : 02-添加子类属性.py
# Date : 2018/8/2
"""
对象属性的继承:是通过继承init方法来继承的对象属性
给当前类添加对象属性:重写init方法,如果需要保留父类的对象属性,需要使用
super()去调用父类的init方法
多态:同一个事物有多种形态,子类继承父类的方法,可以对方法进行重写,
一个方法就有多种形态(多态的表现)
类的多态:继承产生多态
"""
class Person:
def __init__(self, name='', age=2):
self.name = name
self.age = age
class Staff(Person):
# init方法的参数:保证在创建对象的时候就可以给某些属性赋值
def __init__(self, name):
super().__init__(name)
self.salary = 0
if __name__ == '__main__':
s1 = Person()
s1.__init__('wd', 12)
print(s1.name, s1.age)
# 练习
"""
声明人类,有属性,名字、年龄、性别。身高
要求创建人的对象的时候可以给名字、性别、年龄赋初值
再创建学生类继承自人类,拥有人类的所有的属性,再添加学号、
成绩、电话属性
要求创建学生对象的时候可以给名字、年龄和电话赋初值
"""
class Human:
def __init__(self, name, age=0, sex='男'):
self.name = name
self.height = 0
self.age = age
self.sex = sex
class Student(Human):
def __init__(self, name, age, tel):
super().__init__(self, name, age)
self.score = 0
self.id_num = 0
self.tel = 13
|
[
"[email protected]"
] | |
2d1003eb12e4578cbb09e2a2b23226e356bffd3e
|
80c8d4e84f2ea188a375ff920a4adbd9edaed3a1
|
/bigdata_study/pyflink1.x/pyflink_learn/examples/4_window/sink_monitor.py
|
f9435ee7aaed197828b8fafad6f66d9fa6cace97
|
[
"MIT"
] |
permissive
|
Birkid/penter
|
3a4b67801d366db15ca887c31f545c8cda2b0766
|
0200f40c9d01a84c758ddcb6a9c84871d6f628c0
|
refs/heads/master
| 2023-08-22T14:05:43.106499 | 2021-10-20T07:10:10 | 2021-10-20T07:10:10 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,191 |
py
|
"""
读取 kafka 的用户操作数据并打印
"""
from kafka import KafkaConsumer
from reprint import output
import json
topic = 'click_rank'
bootstrap_servers = ['localhost:9092']
group_id = 'group7'
consumer = KafkaConsumer(
topic, # topic的名称
group_id=group_id, # 指定此消费者实例属于的组名,可以不指定
bootstrap_servers=bootstrap_servers, # 指定kafka服务器
auto_offset_reset='latest', # 'smallest': 'earliest', 'largest': 'latest'
)
with output(output_type="list", initial_len=22, interval=0) as output_lines:
# 初始化打印行
output_lines[0] = '=== 男 ==='
output_lines[6] = '=== 女 ==='
for msg in consumer:
# 解析结果
data = json.loads(msg.value)
start_index = 1 if data['sex'] == '男' else 7
rank = json.loads('[' + data['top10'] + ']')
# 逐行打印
for i in range(5):
index = start_index + i
if i < len(rank):
name = list(rank[i].keys())[0]
value = list(rank[i].values())[0]
output_lines[index] = f'{name:6s} {value}'
else:
output_lines[index] = ''
|
[
"[email protected]"
] | |
25a39bfe0961decc5e8a5665dfe83a66b05dbd27
|
18430833920b3193d2f26ed526ca8f6d7e3df4c8
|
/src/notifications/context_processors.py
|
f80de60ee43e53ffde101052edf945953ac0c19e
|
[
"MIT"
] |
permissive
|
providenz/phase
|
ed8b48ea51d4b359f8012e603b328adf13d5e535
|
b0c46a5468eda6d4eae7b2b959c6210c8d1bbc60
|
refs/heads/master
| 2021-01-17T06:56:07.842719 | 2016-06-28T11:17:53 | 2016-06-28T11:17:53 | 47,676,508 | 0 | 0 | null | 2015-12-09T07:45:19 | 2015-12-09T07:45:18 | null |
UTF-8
|
Python
| false | false | 991 |
py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.contrib.auth.models import AnonymousUser
from django.conf import settings
from notifications.models import Notification
def notifications(request):
"""Fetches data required to render navigation menu.
The main menu contains the list of user categories to choose.
Here is the query to fetch them.
"""
user = getattr(request, 'user')
context = {}
if not isinstance(user, AnonymousUser):
qs = Notification.objects \
.filter(user=user) \
.order_by('-created_on')
notifications = list(qs[0:settings.DISPLAY_NOTIFICATION_COUNT])
if len(notifications) > 0:
has_new_notifications = (not notifications[0].seen)
else:
has_new_notifications = False
context.update({
'notifications': notifications,
'has_new_notifications': has_new_notifications,
})
return context
|
[
"[email protected]"
] | |
e270360c2e7314eb2a69a82872043984e52ce1b4
|
70ba2c6f45bf036cf8e2860003ee03ef2de7842c
|
/apps/registro_hora_extra/models.py
|
c2e70f1ef58c47a87e4baec3d3f2f58225e2e7a5
|
[] |
no_license
|
Izaiasjun1Dev/gestao_rh
|
b99d0ba767ad136ba596c8da388ec184e19b5aae
|
29830e5d7e1eed5eec93548ee31b19a4c6d62797
|
refs/heads/master
| 2022-01-26T00:57:10.561760 | 2019-07-31T17:56:25 | 2019-07-31T17:56:25 | 199,683,872 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 167 |
py
|
from django.db import models
class Registro_Hora_Extra(models.Model):
motivo = models.CharField(max_length=100)
def __str__(self):
return self.motivo
|
[
"[email protected]"
] | |
db5fc913c50c24d9c3bb985ff8799f82103afce3
|
de24f83a5e3768a2638ebcf13cbe717e75740168
|
/moodledata/vpl_data/454/usersdata/302/106280/submittedfiles/programa.py
|
ca76871b6b4cba21f2e253a7a5ef79930a322905
|
[] |
no_license
|
rafaelperazzo/programacao-web
|
95643423a35c44613b0f64bed05bd34780fe2436
|
170dd5440afb9ee68a973f3de13a99aa4c735d79
|
refs/heads/master
| 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 164 |
py
|
# -*- coding: utf-8 -*-
c = int(input('Digite o número de consultas:'))
for i in range(c):
c.append(int(input('Digite a consulta %d: %(i+1))))
print(c)
|
[
"[email protected]"
] | |
20a5737230bac56977780a12595c131b8523268d
|
9fa8c280571c099c5264960ab2e93255d20b3186
|
/system/scientist/panel/control/stop_criterion/view.py
|
76a6fe614d545726fcac47b3131dbcdefb304689
|
[
"MIT"
] |
permissive
|
thuchula6792/AutoOED
|
8dc97191a758200dbd39cd850309b0250ac77cdb
|
272d88be7ab617a58d3f241d10f4f9fd17b91cbc
|
refs/heads/master
| 2023-07-23T16:06:13.820272 | 2021-09-08T14:22:18 | 2021-09-08T14:22:18 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 5,417 |
py
|
import tkinter as tk
from system.gui.widgets.factory import create_widget
class StopCriterionView:
def __init__(self, root_view):
self.root_view = root_view
self.window = create_widget('toplevel', master=self.root_view.root_view.root, title='Stopping Criterion')
self.widget = {
'var': {},
'entry': {},
}
frame_options = create_widget('frame', master=self.window, row=0, column=0, padx=0, pady=0)
self.name_options = {'time': 'Time', 'n_iter': 'Number of iterations', 'n_sample': 'Number of samples', 'hv': 'Hypervolume value', 'hv_conv': 'Hypervolume convergence'}
def check(var, entry):
if var.get() == 1:
entry.enable()
else:
entry.disable()
frame_time = create_widget('frame', master=frame_options, row=0, column=0)
self.widget['var']['time'] = tk.IntVar()
cb_time = tk.Checkbutton(master=frame_time, variable=self.widget['var']['time'], highlightthickness=0, bd=0)
cb_time.grid(row=0, column=0, sticky='W')
tk.Label(master=frame_time, text=self.name_options['time'] + ': stop after').grid(row=0, column=1, sticky='W')
self.widget['entry']['time'] = create_widget('entry', master=frame_time, row=0, column=2, class_type='float',
required=True, valid_check=lambda x: x > 0, error_msg='time limit must be positive', pady=0)
tk.Label(master=frame_time, text='seconds').grid(row=0, column=3, sticky='W')
cb_time.configure(command=lambda: check(self.widget['var']['time'], self.widget['entry']['time']))
frame_n_iter = create_widget('frame', master=frame_options, row=1, column=0)
self.widget['var']['n_iter'] = tk.IntVar()
cb_n_iter = tk.Checkbutton(master=frame_n_iter, variable=self.widget['var']['n_iter'], highlightthickness=0, bd=0)
cb_n_iter.grid(row=0, column=0, sticky='W')
tk.Label(master=frame_n_iter, text=self.name_options['n_iter'] + ': stop after').grid(row=0, column=1, sticky='W')
self.widget['entry']['n_iter'] = create_widget('entry', master=frame_n_iter, row=0, column=2, class_type='int',
required=True, valid_check=lambda x: x > 0, error_msg='number of iterations must be positive', pady=0)
tk.Label(master=frame_n_iter, text='iterations').grid(row=0, column=3, sticky='W')
cb_n_iter.configure(command=lambda: check(self.widget['var']['n_iter'], self.widget['entry']['n_iter']))
frame_n_sample = create_widget('frame', master=frame_options, row=2, column=0)
self.widget['var']['n_sample'] = tk.IntVar()
cb_n_sample = tk.Checkbutton(master=frame_n_sample, variable=self.widget['var']['n_sample'], highlightthickness=0, bd=0)
cb_n_sample.grid(row=0, column=0, sticky='W')
tk.Label(master=frame_n_sample, text=self.name_options['n_sample'] + ': stop when number of samples reaches').grid(row=0, column=1, sticky='W')
self.widget['entry']['n_sample'] = create_widget('entry', master=frame_n_sample, row=0, column=2, class_type='int',
required=True, valid_check=lambda x: x > 0, error_msg='number of samples must be positive', pady=0)
cb_n_sample.configure(command=lambda: check(self.widget['var']['n_sample'], self.widget['entry']['n_sample']))
frame_hv = create_widget('frame', master=frame_options, row=3, column=0)
self.widget['var']['hv'] = tk.IntVar()
cb_hv = tk.Checkbutton(master=frame_hv, variable=self.widget['var']['hv'], highlightthickness=0, bd=0)
cb_hv.grid(row=0, column=0, sticky='W')
tk.Label(master=frame_hv, text=self.name_options['hv'] + ': stop when hypervolume reaches').grid(row=0, column=1, sticky='W')
self.widget['entry']['hv'] = create_widget('entry', master=frame_hv, row=0, column=2, class_type='float',
required=True, valid_check=lambda x: x > 0, error_msg='hypervolume value must be positive', pady=0)
cb_hv.configure(command=lambda: check(self.widget['var']['hv'], self.widget['entry']['hv']))
frame_hv_conv = create_widget('frame', master=frame_options, row=4, column=0)
self.widget['var']['hv_conv'] = tk.IntVar()
cb_hv_conv = tk.Checkbutton(master=frame_hv_conv, variable=self.widget['var']['hv_conv'], highlightthickness=0, bd=0)
cb_hv_conv.grid(row=0, column=0, sticky='W')
tk.Label(master=frame_hv_conv, text=self.name_options['hv_conv'] + ': stop when hypervolume stops to improve over past').grid(row=0, column=1, sticky='W')
self.widget['entry']['hv_conv'] = create_widget('entry', master=frame_hv_conv, row=0, column=2, class_type='int',
required=True, valid_check=lambda x: x > 0, error_msg='number of iterations must be positive', pady=0)
tk.Label(master=frame_hv_conv, text='iterations').grid(row=0, column=3, sticky='W')
cb_hv_conv.configure(command=lambda: check(self.widget['var']['hv_conv'], self.widget['entry']['hv_conv']))
for key in self.name_options:
self.widget['entry'][key].disable()
frame_action = create_widget('frame', master=self.window, row=1, column=0, pady=0, sticky=None)
self.widget['save'] = create_widget('button', master=frame_action, row=0, column=0, text='Save')
self.widget['cancel'] = create_widget('button', master=frame_action, row=0, column=1, text='Cancel')
|
[
"[email protected]"
] | |
0e1c84c3ad5515132006c028d0ce7d87bdfbc4e2
|
c8c77f6cc6c032daf179ea2138e4dda5473b426b
|
/pinpoint-email/pinpoint_send_email_message_email_api.py
|
c607d8762534d68c7b98210c7dd0bc37ba9ccd58
|
[] |
no_license
|
arunmastermind/AWS-examples-using-BOTO3
|
b411a6c96011ab58a66952a53fa2938cb58d5135
|
e8390094374c10902bab016a21caba75ea179b5a
|
refs/heads/master
| 2020-09-30T13:34:33.657621 | 2019-12-11T12:37:44 | 2019-12-11T12:37:44 | 227,297,211 | 0 | 2 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,698 |
py
|
import boto3
from botocore.exceptions import ClientError
# The AWS Region that you want to use to send the email.
AWS_REGION = "us-west-2"
# The "From" address. This address has to be verified in
# Amazon Pinpoint in the region you're using to send email.
SENDER = "Mary Major <[email protected]>"
# The addresses on the "To" line. If your Amazon Pinpoint account is in
# the sandbox, these addresses also have to be verified.
TOADDRESSES = ["[email protected]"]
# CC and BCC addresses. If your account is in the sandbox, these
# addresses have to be verified.
CCADDRESSES = ["[email protected]", "[email protected]"]
BCCADDRESSES = ["[email protected]"]
# The configuration set that you want to use to send the email.
CONFIGURATION_SET = "ConfigSet"
# The subject line of the email.
SUBJECT = "Amazon Pinpoint Test (SDK for Python)"
# The body of the email for recipients whose email clients don't support HTML
# content.
BODY_TEXT = """Amazon Pinpoint Test (SDK for Python)
-------------------------------------
This email was sent with Amazon Pinpoint using the AWS SDK for Python.
For more information, see https:#aws.amazon.com/sdk-for-python/
"""
# The body of the email for recipients whose email clients can display HTML
# content.
BODY_HTML = """<html>
<head></head>
<body>
<h1>Amazon Pinpoint Test (SDK for Python)</h1>
<p>This email was sent with
<a href='https:#aws.amazon.com/pinpoint/'>Amazon Pinpoint</a> using the
<a href='https:#aws.amazon.com/sdk-for-python/'>
AWS SDK for Python</a>.</p>
</body>
</html>
"""
# The message tags that you want to apply to the email.
TAG0 = {'Name': 'key0', 'Value': 'value0'}
TAG1 = {'Name': 'key1', 'Value': 'value1'}
# The character encoding that you want to use for the subject line and message
# body of the email.
CHARSET = "UTF-8"
# Create a new Pinpoint resource and specify a region.
client = boto3.client('pinpoint-email', region_name=AWS_REGION)
# Send the email.
try:
# Create a request to send the email. The request contains all of the
# message attributes and content that were defined earlier.
response = client.send_email(
FromEmailAddress=SENDER,
# An object that contains all of the email addresses that you want to
# send the message to. You can send a message to up to 50 recipients in
# a single call to the API.
Destination={
'ToAddresses': TOADDRESSES,
'CcAddresses': CCADDRESSES,
'BccAddresses': BCCADDRESSES
},
# The body of the email message.
Content={
# Create a new Simple message. If you need to include attachments,
# you should send a RawMessage instead.
'Simple': {
'Subject': {
'Charset': CHARSET,
'Data': SUBJECT,
},
'Body': {
'Html': {
'Charset': CHARSET,
'Data': BODY_HTML
},
'Text': {
'Charset': CHARSET,
'Data': BODY_TEXT,
}
}
}
},
# The configuration set that you want to use when you send this message.
ConfigurationSetName=CONFIGURATION_SET,
EmailTags=[
TAG0,
TAG1
]
)
# Display an error if something goes wrong.
except ClientError as e:
print("The message wasn't sent. Error message: \"" + e.response['Error']['Message'] + "\"")
else:
print("Email sent!")
print("Message ID: " + response['MessageId'])
|
[
"[email protected]"
] | |
89aadd7f9dd9e91da3e1da7db4d4e2395ffb8883
|
93b495b3624399c81b7edb39d1f6c5cebb2cd987
|
/vyper/ast.py
|
445bdf16b1ecb9ebfb855d44e98e25836353f5e9
|
[
"Apache-2.0"
] |
permissive
|
fubuloubu/vyper-redux
|
bf4b91d00290e5ed063ce74b44b740af6c3afae7
|
a190c69083a968136ce10d1ceb68e42e41ff9de1
|
refs/heads/master
| 2020-12-20T16:29:44.390444 | 2020-01-25T07:53:23 | 2020-01-25T07:53:23 | 236,137,024 | 2 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 6,683 |
py
|
import ast as py_ast
import inspect
import sys
from typing import (
Any,
Dict,
List,
Tuple,
Type,
Union,
)
import lark
import stringcase
class Ast(py_ast.AST):
_fields = ()
class Module(Ast):
_fields = ('methods',)
def __init__(self, children: List[Union[lark.Tree, lark.Token]]):
self.methods, children = split_ast(children, Method)
self.variables, children = split_ast(children, Variable)
assert len(children) == 0, f"Did not save everything: {children}"
class Method(Ast):
_fields = (
'decorators',
'name',
'parameters',
'body',
)
def __init__(self, children: List[Union[lark.Tree, lark.Token]]):
decorators_node, children = split_tree(children, "decorators")
assert len(decorators_node) <= 1, "Should not have more than 1 set of decorators"
self.decorators = decorators_node[0].children
method_type, children = split_tree(children, "method_type")
assert len(method_type) == 1, "Should not have more than 1 method_type"
method_type = convert_to_dict(method_type[0].children)
self.name = method_type['NAME']
self.parameters = method_type.get('parameters', None)
body, children = split_tree(children, "body")
assert len(body) == 1, "Should not have more than 1 body"
self.body = body[0].children
assert len(children) == 0, f"Did not save everything: {children}"
class Decorator(Ast):
_fields = (
'type',
)
def __init__(self, children: List[Union[lark.Tree, lark.Token]]):
assert len(children) == 1
assert children[0].type == 'DECORATOR_NAME'
self.type = children[0].value
class Statement(Ast):
pass
class PassStmt(Statement):
def __init__(self, children: List[Union[lark.Tree, lark.Token]]):
pass # NOTE: Check later for only statement in body
class ExprStmt(Statement):
_fields = (
'assignment',
'expression',
)
def __init__(self, children: List[Union[lark.Tree, lark.Token]]):
assert len(children) == 2
self.assignment = children[0]
self.expression = children[1]
class Var(Ast):
_fields = (
'name',
'type',
)
def __init__(self, children: List[Union[lark.Tree, lark.Token]]):
properties = convert_to_dict(children)
self.name = properties['NAME']
self.type = properties.get('TYPE', None) # NOTE: Do not know type yet if none
class Variable(Ast):
_fields = (
'name',
'type',
'public',
)
def __init__(self, children: List[Union[lark.Tree, lark.Token]]):
properties = convert_to_dict(children)
if 'with_getter' in properties.keys():
self.public = True
properties = properties['with_getter']
else:
self.public = False
self.name = properties['NAME']
self.type = get_type(properties)
class Parameter(Variable):
pass
class Attribute(Var):
_fields = (
'var',
'property',
)
def __init__(self, children: List[Union[lark.Tree, lark.Token]]):
assert len(children) == 2
self.var = children[0]
properties = convert_to_dict(children[1])
self.property = properties['NAME']
def split_ast(
nodes: List[Ast],
ast_type: Type[Ast],
) -> Tuple[List[Ast], List[Ast]]:
selected = [n for n in nodes if isinstance(n, ast_type)]
others = [n for n in nodes if not isinstance(n, ast_type)]
return selected, others
def split_tree(
nodes: List[lark.Tree],
rule_type: str,
) -> Tuple[List[lark.Tree], List[lark.Tree]]:
selected = [n for n in nodes if n.data == rule_type]
others = [n for n in nodes if n.data != rule_type]
return selected, others
def convert_to_dict(
node: Union[List[Union[lark.Tree, lark.Token, Ast]], Union[lark.Tree, lark.Token, Ast]],
) -> Dict:
if isinstance(node, lark.Token):
return {node.type: node.value}
elif isinstance(node, lark.Tree):
return {node.data: convert_to_dict(node.children)}
elif isinstance(node, list):
obj = list()
for n in node:
attr = convert_to_dict(n)
obj.append(attr)
minified_obj = dict()
for item in obj:
if isinstance(item, dict) and all([k not in minified_obj.keys() for k in item.keys()]):
minified_obj.update(item)
else:
return obj # Give up an abort
return minified_obj
elif isinstance(node, Ast):
return node
else:
raise ValueError(f"Cannot convert {node}.")
def get_type(properties: Dict[str, Any]) -> str:
if 'storage' in properties.keys():
return get_type(properties['storage'])
if 'abi_type' in properties.keys():
return get_type(properties['abi_type'])
if 'memory' in properties.keys():
return get_type(properties['memory'])
if 'BASIC_TYPE' in properties.keys():
return properties['BASIC_TYPE']
raise ValueError(f"Could not process {properties}.")
def _get_ast_classes():
ast_classes = dict()
for name, obj in inspect.getmembers(sys.modules[__name__]):
if inspect.isclass(obj) and issubclass(obj, Ast):
ast_classes[name] = obj
return ast_classes
AST_CLASSES = _get_ast_classes()
class AstConverter(lark.Transformer):
def __init__(self, *args, **kwargs):
for name, ast_class in _get_ast_classes().items():
# NOTE: Convention is for classnames to be CamalCase,
# but Lark rules are snake_case
setattr(self, stringcase.snakecase(name), ast_class)
super().__init__(*args, **kwargs)
class _CheckLarkConversionFailures(py_ast.NodeVisitor):
def visit(self, node):
node_class = node.__class__.__name__
for member_name in node._fields:
member = getattr(node, member_name)
if isinstance(member, (lark.Tree, lark.Token)):
raise ValueError(
f"Could not convert {member_name} in {node_class}: {member}"
)
if isinstance(member, list):
for item in member:
if isinstance(item, (lark.Tree, lark.Token)):
raise ValueError(
f"Could not convert {member_name} in {node_class}: {item}"
)
super().visit(node)
def ast_parse(parse_tree: lark.Tree) -> Ast:
ast = AstConverter().transform(parse_tree)
_CheckLarkConversionFailures().visit(ast)
return ast
|
[
"[email protected]"
] | |
adcc7a4f456face62e0edc4a15503cb7ef48c86e
|
d3efc82dfa61fb82e47c82d52c838b38b076084c
|
/Autocase_Result/XjShRightSide/YW_GGQQ_QLFXJHA_086.py
|
6a72b553056082e63709d5549fef6af05775698c
|
[] |
no_license
|
nantongzyg/xtp_test
|
58ce9f328f62a3ea5904e6ed907a169ef2df9258
|
ca9ab5cee03d7a2f457a95fb0f4762013caa5f9f
|
refs/heads/master
| 2022-11-30T08:57:45.345460 | 2020-07-30T01:43:30 | 2020-07-30T01:43:30 | 280,388,441 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,894 |
py
|
#!/usr/bin/python
# -*- encoding: utf-8 -*-
import sys
import json
sys.path.append("/home/yhl2/workspace/xtp_test/xtp/api")
from xtp_test_case import *
sys.path.append("/home/yhl2/workspace/xtp_test/option/service")
from OptMainService import *
from OptQueryStkPriceQty import *
sys.path.append("/home/yhl2/workspace/xtp_test/service")
from log import *
from CaseParmInsertMysql import *
sys.path.append("/home/yhl2/workspace/xtp_test/option/mysql")
from Opt_SqlData_Transfer import *
sys.path.append("/home/yhl2/workspace/xtp_test/mysql")
from QueryOrderErrorMsg import queryOrderErrorMsg
sys.path.append("/home/yhl2/workspace/xtp_test/utils")
from env_restart import *
reload(sys)
sys.setdefaultencoding('utf-8')
class YW_GGQQ_QLFXJHA_086(xtp_test_case):
def setUp(self):
sql_transfer = Opt_SqlData_Transfer()
sql_transfer.transfer_fund_asset('YW_GGQQ_QLFXJHA_086')
clear_data_and_restart_sh()
Api.trade.Logout()
Api.trade.Login()
def test_YW_GGQQ_QLFXJHA_086(self):
title = '卖平(权利方平仓):限价-验资(可用资金刚好)(下单金额<费用&&可用资金=(费用-下单金额))'
# 定义当前测试用例的期待值
# 期望状态:初始、未成交、部成、全成、部撤已报、部撤、已报待撤、已撤、废单、撤废、内部撤单
# xtp_ID和cancel_xtpID默认为0,不需要变动
case_goal = {
'期望状态': '全成',
'errorID': 0,
'errorMSG': '',
'是否生成报单': '是',
'是否是撤废': '否',
'xtp_ID': 0,
'cancel_xtpID': 0,
}
logger.warning(title)
# 定义委托参数信息------------------------------------------
# 参数:证券代码、市场、证券类型、证券状态、交易状态、买卖方向(B买S卖)、期望状态、Api
stkparm = QueryStkPriceQty('10001032', '1', '*', '1', '0', '*', case_goal['期望状态'], Api)
# 如果下单参数获取失败,则用例失败
if stkparm['返回结果'] is False:
rs = {
'用例测试结果': stkparm['返回结果'],
'测试错误原因': '获取下单参数失败,' + stkparm['错误原因'],
}
logger.error('查询结果为False,错误原因: {0}'.format(
json.dumps(rs['测试错误原因'], encoding='UTF-8', ensure_ascii=False)))
self.assertEqual(rs['用例测试结果'], True)
else:
wt_reqs = {
'business_type':Api.const.XTP_BUSINESS_TYPE['XTP_BUSINESS_TYPE_OPTION'],
'order_client_id':2,
'market': Api.const.XTP_MARKET_TYPE['XTP_MKT_SH_A'],
'ticker': stkparm['证券代码'],
'side': Api.const.XTP_SIDE_TYPE['XTP_SIDE_SELL'],
'position_effect':Api.const.XTP_POSITION_EFFECT_TYPE['XTP_POSITION_EFFECT_CLOSE'],
'price_type': Api.const.XTP_PRICE_TYPE['XTP_PRICE_LIMIT'],
'price': stkparm['涨停价'],
'quantity': 2
}
ParmIni(Api, case_goal['期望状态'], wt_reqs['price_type'])
CaseParmInsertMysql(case_goal, wt_reqs)
rs = serviceTest(Api, case_goal, wt_reqs)
if rs['用例测试结果']:
logger.warning('执行结果为{0}'.format(str(rs['用例测试结果'])))
else:
logger.warning('执行结果为{0},{1},{2}'.format(
str(rs['用例测试结果']), str(rs['用例错误源']),
json.dumps(rs['用例错误原因'], encoding='UTF-8', ensure_ascii=False)))
self.assertEqual(rs['用例测试结果'], True) # 4
if __name__ == '__main__':
unittest.main()
|
[
"[email protected]"
] | |
2d5241ff37c81e87fe5dde76480448e82b1a8bf5
|
2bacd64bd2679bbcc19379947a7285e7ecba35c6
|
/1-notebook-examples/keras-udemy-course/ann_class2/mxnet_example.py
|
9ea745fce7cb6e4c583659c52a0dfbfe86e6fcb1
|
[
"MIT"
] |
permissive
|
vicb1/deep-learning
|
cc6b6d50ae5083c89f22512663d06b777ff8d881
|
23d6ef672ef0b3d13cea6a99984bbc299d620a73
|
refs/heads/master
| 2022-12-12T15:56:55.565836 | 2020-03-06T01:55:55 | 2020-03-06T01:55:55 | 230,293,726 | 0 | 0 |
MIT
| 2022-12-08T05:27:43 | 2019-12-26T16:23:18 |
Jupyter Notebook
|
UTF-8
|
Python
| false | false | 2,621 |
py
|
# https://deeplearningcourses.com/c/data-science-deep-learning-in-theano-tensorflow
# https://www.udemy.com/data-science-deep-learning-in-theano-tensorflow
from __future__ import print_function, division
from builtins import range
# Note: you may need to update your version of future
# sudo pip install -U future
# installation is just one line:
# https://mxnet.incubator.apache.org/get_started/install.html
#
# Mac:
# pip install mxnet
#
# Linux (GPU):
# pip install mxnet-cu80
#
# Windows (a little more involved):
# https://mxnet.incubator.apache.org/get_started/windows_setup.html
import mxnet as mx
import numpy as np
import matplotlib.pyplot as plt
from util import get_normalized_data, y2indicator
# get the data, same as Theano + Tensorflow examples
# no need to split now, the fit() function will do it
Xtrain, Xtest, Ytrain, Ytest = get_normalized_data()
# get shapes
N, D = Xtrain.shape
K = len(set(Ytrain))
# training config
batch_size = 32
epochs = 15
# convert the data into a format appropriate for input into mxnet
train_iterator = mx.io.NDArrayIter(
Xtrain,
Ytrain,
batch_size,
shuffle=True
)
test_iterator = mx.io.NDArrayIter(Xtest, Ytest, batch_size)
# define a placeholder to represent the inputs
data = mx.sym.var('data')
# define the model architecture
a1 = mx.sym.FullyConnected(data=data, num_hidden=500)
z1 = mx.sym.Activation(data=a1, act_type="relu")
a2 = mx.sym.FullyConnected(data=z1, num_hidden = 300)
z2 = mx.sym.Activation(data=a2, act_type="relu")
a3 = mx.sym.FullyConnected(data=z2, num_hidden=K)
y = mx.sym.SoftmaxOutput(data=a3, name='softmax')
# train it
# required in order for progress to be printed
import logging
logging.getLogger().setLevel(logging.DEBUG)
# use mx.gpu() if you have gpu
model = mx.mod.Module(symbol=y, context=mx.cpu())
model.fit(
train_iterator, # train data
eval_data=test_iterator, # validation data
optimizer=mx.optimizer.Adam(),
eval_metric='acc', # report accuracy during training
batch_end_callback = mx.callback.Speedometer(batch_size, 100), # output progress for each 100 data batches
num_epoch=epochs,
)
# no return value
# list of optimizers: https://mxnet.incubator.apache.org/api/python/optimization.html
# test it
# predict accuracy of mlp
acc = mx.metric.Accuracy()
model.score(test_iterator, acc)
print(acc)
print(acc.get())
# currently, there is no good way to plot the training loss / accuracy history
# https://github.com/apache/incubator-mxnet/issues/2873
#
# some have suggested parsing the logs
# https://github.com/apache/incubator-mxnet/blob/master/example/kaggle-ndsb1/training_curves.py
|
[
"[email protected]"
] | |
8f0559a6949b29f1325ea7e4b0952a514e72b342
|
2a17e6a5d78849469b2094ec11f8a51e86475128
|
/DIU_HS/settings.py
|
36d64d8a3fb733268c23cd3ad16ffc365d0de70c
|
[] |
no_license
|
maxhasan882/DIU_HS
|
fbe25b5d22dded5171b7bd9c31a75c16f03a7f8a
|
cbffe3b3799e46afe492064ecb45b617e8ff536b
|
refs/heads/master
| 2020-07-29T07:54:07.332060 | 2019-09-20T09:12:00 | 2019-09-20T09:12:00 | 209,721,763 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,506 |
py
|
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
SECRET_KEY = '-qqcz^f-332ox2t)s(b$d&slmg^c+q@m!--w*7_%w_pckp(gdq'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'debug_toolbar',
'rest_framework',
'dblayer',
]
MIDDLEWARE = [
'debug_toolbar.middleware.DebugToolbarMiddleware',
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'DIU_HS.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')]
,
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'DIU_HS.wsgi.application'
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql',
'NAME': 'diuhsDB',
'USER': 'postgres',
'PASSWORD': 'mithu1996',
'HOST': '127.0.0.1',
'PORT': '5432',
}
}
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
STATIC_URL = '/static/'
INTERNAL_IPS = [
'127.0.0.1',
]
|
[
"[email protected]"
] | |
d6a2692395d973722c538c781b8fecfa4e62647b
|
c53fcab99e84ccfe6d9f1455e7471892fbd6661e
|
/kubeface/commands/copy.py
|
63edb408d96fc9594aa6ee83d89f860d51b393cf
|
[
"Apache-2.0"
] |
permissive
|
proj4spes/kubeface
|
3af558ae05f1fd89b2d93e81ce479094ef3f4b8f
|
443d7432e6d2f8e4d20b6326e98fabeec7ad68b6
|
refs/heads/master
| 2021-04-03T02:53:10.284569 | 2017-06-22T19:40:30 | 2017-06-22T19:40:30 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,263 |
py
|
'''
Copy files, including support for google storage buckets.
'''
import sys
import argparse
import logging
from .. import storage
from ..common import configure_logging
from .. import serialization
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument("source")
parser.add_argument("destination")
parser.add_argument(
"--no-error",
action="store_true",
default=False,
help="")
parser.add_argument(
"--quiet",
action="store_true",
default=False,
help="")
parser.add_argument(
"--verbose",
action="store_true",
default=False,
help="")
parser.add_argument(
"--print-deserialized",
action="store_true",
default=False,
help="")
def run(argv=sys.argv[1:]):
args = parser.parse_args(argv)
configure_logging(args)
logging.info("Reading: %s" % args.source)
input_handle = storage.get(args.source)
if args.print_deserialized:
deserialized = serialization.load(input_handle)
input_handle.seek(0)
print(deserialized)
if args.destination == "-":
print(input_handle.read())
else:
logging.info("Writing: %s" % args.destination)
storage.put(args.destination, input_handle)
logging.info("Completed.")
|
[
"[email protected]"
] | |
4209e5499b98a104adc9693ae8356a5bc01c7ae4
|
30cf02eb3c15da89db2e6efd3d405e92d0c8df36
|
/src/pyobo/sources/gwascentral_study.py
|
0f00a9432c87ccdffe5a7ed6c2fc786e107b0af4
|
[
"MIT"
] |
permissive
|
shunsunsun/pyobo
|
f53e5e6a4bb0b3ea135312cd8a54c905a52bd754
|
407c8f15873eb84cb5351ccc6e6ae0e8e3add22a
|
refs/heads/master
| 2023-04-04T01:13:16.456853 | 2021-04-05T15:57:33 | 2021-04-05T15:57:33 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,162 |
py
|
# -*- coding: utf-8 -*-
"""Converter for GWAS Central."""
import logging
import tarfile
from typing import Iterable
from xml.etree import ElementTree
from pyobo.struct import Obo, Reference, Term, has_part
from pyobo.utils.path import ensure_path
logger = logging.getLogger(__name__)
VERSION = 'jan2021'
URL = f'http://www.gwascentral.org/docs/GC_{VERSION}.tar.gz'
PREFIX = 'gwascentral.study'
def get_obo():
"""Get GWAS Central Studies as OBO."""
return Obo(
ontology=PREFIX,
name='GWAS Central Study',
iter_terms=iterate_terms,
iter_terms_kwargs=dict(version=VERSION),
data_version=VERSION,
typedefs=[has_part],
auto_generated_by=f'bio2obo:{PREFIX}',
)
def _get_term_from_tree(tree: ElementTree.ElementTree) -> Term:
name = tree.find('name').text
description = tree.find('description').text
if description:
description = description.strip().replace('\n', ' ')
identifier = tree.find('identifier').text
term = Term(
reference=Reference(PREFIX, identifier, name),
definition=description,
)
for experiment in tree.findall('experiments'):
experiment_name = experiment.find('name').text
experiment_id = experiment.find('identifier').text
term.append_relationship(has_part, Reference(
'gwascentral.experiment',
identifier=experiment_id,
name=experiment_name,
))
return term
def iterate_terms(version: str) -> Iterable[Term]:
"""Iterate over GWAS Central Study terms."""
path = ensure_path(PREFIX, url=URL, version=version)
with tarfile.open(path) as tar_file:
for tar_info in tar_file:
if not tar_info.path.endswith('.xml'):
continue
with tar_file.extractfile(tar_info) as file:
try:
tree = ElementTree.parse(file)
except ElementTree.ParseError:
logger.warning('malformed XML in %s', tar_info.path)
continue
yield _get_term_from_tree(tree)
if __name__ == '__main__':
get_obo().write_default()
|
[
"[email protected]"
] | |
4a4aedbcae688967b5b85e60e73a727908c934a5
|
4fc016459e4c78680c61488c771eb6b7eb20d5fe
|
/Python-Algorithms-DataStructure/src/leet/104_MaximumDepthofBinaryTree.py
|
f6cf6d7dcb193f73f277665a5f23cbafd59f85b0
|
[] |
no_license
|
coremedy/Python-Algorithms-DataStructure
|
7c318de68fd9694377a0a4369d8dbeb49e1e17aa
|
3873502679a5def6af4be03028542f07d059d1a9
|
refs/heads/master
| 2021-01-25T07:34:17.714241 | 2015-11-05T10:17:40 | 2015-11-05T10:17:40 | 27,949,498 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 600 |
py
|
'''
Created on 2015-08-02
'''
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution:
# @param {TreeNode} root
# @return {integer}
def maxDepth(self, root):
if root is None:
return 0
return self.DFS(0, root)
def DFS(self, depth, node):
if node is None:
return depth
return max(self.DFS(depth + 1, node.left), self.DFS(depth + 1, node.right))
if __name__ == '__main__':
pass
|
[
"[email protected]"
] | |
7526e1a07f83c8b237e6f892e95f0b2f235bb4b0
|
8fb5319079f3d9a5524a4fa44dc9fdeb4e578a33
|
/Contours/counting_coins.py
|
3d0b8461f126be4dabeaf660096bdf9d2180144c
|
[] |
no_license
|
KhairulIzwan/Python-OpenCV-Basics
|
1dc414a07d25b2800f3a6f4eb7edf375e891b92b
|
2bcf3536c9d5225188dce7c081600459a7b1ebb0
|
refs/heads/main
| 2023-04-26T17:37:10.838035 | 2021-05-23T03:11:36 | 2021-05-23T03:11:36 | 369,949,180 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,261 |
py
|
#!/usr/bin/env python
import numpy as np
import argparse
import cv2
# use argparse to handle parsing our command line arguments
ap = argparse.ArgumentParser()
ap.add_argument("-i", "--image", required = True,
help = "Path to the image")
args = vars(ap.parse_args())
image = cv2.imread(args["image"])
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
blurred = cv2.GaussianBlur(gray, (11, 11), 0)
cv2.imshow("Image", image)
edged = cv2.Canny(blurred, 30, 150)
cv2.imshow("Edges", edged)
_, cnts, _ = cv2.findContours(edged.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
print "I count %d coins in this image" % (len(cnts))
coins = image.copy()
cv2.drawContours(coins, cnts, -1, (0, 255, 0), 2)
cv2.imshow("Coins", coins)
cv2.waitKey(0)
for (i, c) in enumerate(cnts):
(x, y, w, h) = cv2.boundingRect(c)
# print "Coin #%d" % (i + 1)
coinBar = "Coin #%d" % (i + 1)
coin = image[y:y + h, x:x + w]
cv2.imshow(coinBar, coin)
mask = np.zeros(image.shape[:2], dtype = "uint8")
((centerX, centerY), radius) = cv2.minEnclosingCircle(c)
cv2.circle(mask, (int(centerX), int(centerY)), int(radius), 255, -1)
mask = mask[y:y + h, x:x + w]
cv2.imshow("Masked Coin", cv2.bitwise_and(coin, coin, mask=mask))
cv2.waitKey(0)
cv2.destroyWindow(coinBar)
|
[
"[email protected]"
] | |
d04a3cc08125307c425fc4a3bbdbde890ec4fcda
|
b5aa43c8db450c3bcacc8f28897eab684a8032a1
|
/data/games/missiles/states/level_fail.py
|
1968f989a08e959a762da5f55ffd79a9dd9aa27b
|
[] |
no_license
|
iminurnamez/Python_Arcade_Collab
|
29a74cf2a6264969de9bae3c4a6ed23d6282e793
|
67702414ed30addd1bf46339bb458df34ed88f2a
|
refs/heads/master
| 2021-04-15T07:32:18.573004 | 2018-05-13T14:29:19 | 2018-05-13T14:29:19 | 126,644,972 | 2 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,748 |
py
|
import pygame as pg
from data.components.state_machine import _State
from data.core.tools import scaled_mouse_pos
from data.core import constants as prog_constants
from data.components.labels import Label, ButtonGroup
from data.components.special_buttons import NeonButton
from .. import constants
class LevelFail(_State):
def __init__(self, controller):
super(LevelFail, self).__init__(controller)
def startup(self, persistent):
self.persist = persistent
font = prog_constants.FONTS["Fixedsys500c"]
sr = constants.SCREEN_RECT
color = constants.LOW_LIGHT_GREEN
level_num = self.persist["player"].level_num
self.labels = [
Label(font, 48, "Level {} Failed".format(level_num), color,
{"midtop": (sr.centerx, 5)}),
Label(font, 32, "All your cities are", color,
{"midbottom": (sr.centerx, 200)}),
Label(font, 32, "belong to dust", color,
{"midtop": (sr.centerx, 200)})]
self.buttons = ButtonGroup()
NeonButton((373, 630), "OK", 32, self.to_high_scores,
None, self.buttons)
def to_high_scores(self, *args):
self.persist["player"].clear_save()
self.done = True
self.next = "HIGH_SCORES"
def get_event(self, event, scale):
self.buttons.get_event(event)
def update(self, surface, keys, current_time, dt, scale):
self.buttons.update(scaled_mouse_pos(scale))
self.draw(surface)
def draw(self, surface):
surface.fill(constants.BACKGROUND_BASE)
for label in self.labels:
label.draw(surface)
self.buttons.draw(surface)
|
[
"[email protected]"
] | |
b29a1e598f2e2fc26af73f214978b0998c04b461
|
8c917dc4810e2dddf7d3902146280a67412c65ea
|
/v_7/NISS/common_shamil_v3/hr_violation_punishment/__openerp__.py
|
719dad2e987819e3bcda88edba479c93f4c3fcff
|
[] |
no_license
|
musabahmed/baba
|
d0906e03c1bbd222d3950f521533f3874434b993
|
0b997095c260d58b026440967fea3a202bef7efb
|
refs/heads/master
| 2021-10-09T02:37:32.458269 | 2018-12-20T06:00:00 | 2018-12-20T06:00:00 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,110 |
py
|
# -*- coding: utf-8 -*-
##############################################################################
#
# NCTR, Nile Center for Technology Research
# Copyright (C) 2013-2014 NCTR (<http://www.nctr.sd>).
#
##############################################################################
{
'name': 'Employee Violation and Punishment',
'version': '1.1',
'author': 'NCTR',
'category': 'Human Resources',
'website': 'http://www.nctr.sd',
'summary': 'Employee Violation and Punishment',
'description': """
Employee Violation and Punishment
==========================
""",
'images' : ['images/violations.png'],
'depends': ['hr_payroll_custom'],
'data': [
'security/ir.model.access.csv',
'hr_violation_punishment_view.xml',
'report/hr_report.xml',
'hr_violations_punishment_workflow.xml',
'wizard/emp_violations_punishments.xml',
],
'demo': [],
'test': [],
'installable': True,
'application': True,
'auto_install': False,
'css': [ ],
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
[
"[email protected]"
] | |
26726fa6fa874f79a109c4fc897e9f4671bd5ca7
|
439386f9097632d44d31d1f599df76ec2820d072
|
/性能项目/统一大厅常规checklist/1601/DFQP/src/uilib/exchange_page.py
|
100b9e01ac952280dbe97665969e45d263c46165
|
[] |
no_license
|
YiFeng0755/testcase
|
33693f0940a6497aa40e2e51a0535c9eb6c12b29
|
edc19480c3e94cbcbf004aa9d20099ec6d1b9304
|
refs/heads/master
| 2020-04-28T04:34:28.232022 | 2019-03-11T11:13:25 | 2019-03-11T11:13:25 | 146,287,761 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 171 |
py
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
#Author: MindyZhang
'''
兑换奖品场景
'''
from appiumcenter.element import Element
class Exchange_Page(Element):
pass
|
[
"[email protected]"
] | |
a5a75d90679c6ca3fd506ea8dfbafd949dc61360
|
d488f052805a87b5c4b124ca93494bc9b78620f7
|
/google-cloud-sdk/lib/googlecloudsdk/core/updater/release_notes.py
|
977fe1c29e08b001c9d41029efce76a4f5bf998e
|
[
"MIT",
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] |
permissive
|
PacktPublishing/DevOps-Fundamentals
|
5ce1fc938db66b420691aa8106ecfb3f9ceb1ace
|
60597e831e08325c7e51e8557591917f7c417275
|
refs/heads/master
| 2023-02-02T04:48:15.346907 | 2023-01-30T08:33:35 | 2023-01-30T08:33:35 | 131,293,311 | 13 | 19 | null | null | null | null |
UTF-8
|
Python
| false | false | 7,835 |
py
|
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains utilities for comparing RELEASE_NOTES between Cloud SDK versions.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import re
from googlecloudsdk.core import config
from googlecloudsdk.core import log
from googlecloudsdk.core.document_renderers import render_document
from googlecloudsdk.core.updater import installers
from googlecloudsdk.core.util import encoding
from six.moves import StringIO
class ReleaseNotes(object):
"""Represents a parsed RELEASE_NOTES file.
The file should have the general structure of:
# Google Cloud SDK - Release Notes
Copyright 2014-2015 Google Inc. All rights reserved.
## 0.9.78 (2015/09/16)
* Note
* Note 2
## 0.9.77 (2015/09/09)
* Note 3
"""
# This regex matches each version section in the release notes file.
# It uses lookaheads and lookbehinds to be able to ensure double newlines
# without consuming them (because they are needed as part of the match of the
# next version section. This translates to a line starting with '##' preceded
# by a blank line that has a version string and description. It then consumes
# all lines until it hits a newline that is not followed by a blank line and
# another line starting with '##"
_VERSION_SPLIT_REGEX = (
r'(?<=\n)\n## +(?P<version>\S+).*\n(?:\n.*(?!\n\n## ))+.')
MAX_DIFF = 15
@classmethod
def FromURL(cls, url, command_path=None):
"""Parses release notes from the given URL.
Any error in downloading or parsing release notes is logged and swallowed
and None is returned.
Args:
url: str, The URL to download and parse.
command_path: str, The command that is calling this for instrumenting
the user agent for the download.
Returns:
ReleaseNotes, the parsed release notes or None if an error occurred.
"""
try:
response = installers.ComponentInstaller.MakeRequest(url, command_path)
if not response:
return None
code = response.getcode()
if code and code != 200:
return None
text = response.read()
text = encoding.Decode(text)
return cls(text)
# pylint: disable=broad-except, We don't want any failure to download or
# parse the release notes to block an update. Returning None here will
# print a generic message of where the user can go to view the release
# notes online.
except Exception:
log.debug('Failed to download [{url}]'.format(url=url), exc_info=True)
return None
def __init__(self, text):
"""Parse the release notes from the given text.
Args:
text: str, The text of the release notes to parse.
Returns:
ReleaseNotes, the parsed release notes.
"""
self._text = text.replace('\r\n', '\n')
versions = []
for m in re.finditer(ReleaseNotes._VERSION_SPLIT_REGEX, self._text):
versions.append((m.group('version'), m.group().strip()))
# [(version string, full version text including header), ...]
self._versions = versions
def GetVersionText(self, version):
"""Gets the release notes text for the given version.
Args:
version: str, The version to get the release notes for.
Returns:
str, The release notes or None if the version does not exist.
"""
index = self._GetVersionIndex(version)
if index is None:
return None
return self._versions[index][1]
def _GetVersionIndex(self, version):
"""Gets the index of the given version in the list of parsed versions.
Args:
version: str, The version to get the index for.
Returns:
int, The index of the given version or None if not found.
"""
for i, (v, _) in enumerate(self._versions):
if v == version:
return i
return None
def Diff(self, start_version, end_version):
"""Creates a diff of the release notes between the two versions.
The release notes are returned in reversed order (most recent first).
Args:
start_version: str, The version at which to start the diff. This should
be the later of the two versions. The diff will start with this version
and go backwards in time until end_version is hit. If None, the diff
will start at the most recent entry.
end_version: str, The version at which to stop the diff. This should be
the version you are currently on. The diff is accumulated until this
version it hit. This version is not included in the diff. If None,
the diff will include through the end of all release notes.
Returns:
[(version, text)], The list of release notes in the diff from most recent
to least recent. Each item is a tuple of the version string and the
release notes text for that version. Returns None if either of the
versions are not present in the release notes.
"""
if start_version:
start_index = self._GetVersionIndex(start_version)
if start_index is None:
return None
else:
start_index = 0
if end_version:
end_index = self._GetVersionIndex(end_version)
if end_index is None:
return None
else:
end_index = len(self._versions)
return self._versions[start_index:end_index]
def PrintReleaseNotesDiff(release_notes_url, current_version, latest_version):
"""Prints the release notes diff based on your current version.
If any of the arguments are None, a generic message will be printed telling
the user to go to the web to view the release notes. If the release_notes_url
is also None, it will print the developers site page for the SDK.
Args:
release_notes_url: str, The URL to download the latest release notes from.
current_version: str, The current version of the SDK you have installed.
latest_version: str, The version you are about to update to.
"""
if release_notes_url and current_version and latest_version:
notes = ReleaseNotes.FromURL(release_notes_url)
if notes:
release_notes_diff = notes.Diff(latest_version, current_version)
else:
release_notes_diff = None
else:
release_notes_diff = None
if not release_notes_diff:
# We failed to print the release notes. Send people to a nice web page with
# the release notes.
log.status.write(
'For the latest full release notes, please visit:\n {0}\n\n'.format(
config.INSTALLATION_CONFIG.release_notes_url))
return
if len(release_notes_diff) > ReleaseNotes.MAX_DIFF:
log.status.Print("""\
A lot has changed since your last upgrade. For the latest full release notes,
please visit:
{0}
""".format(config.INSTALLATION_CONFIG.release_notes_url))
return
log.status.Print("""\
The following release notes are new in this upgrade.
Please read carefully for information about new features, breaking changes,
and bugs fixed. The latest full release notes can be viewed at:
{0}
""".format(config.INSTALLATION_CONFIG.release_notes_url))
full_text = StringIO()
for _, text in release_notes_diff:
full_text.write(text)
full_text.write('\n')
full_text.seek(0)
render_document.RenderDocument('text', full_text, log.status)
log.status.Print()
|
[
"[email protected]"
] | |
5ae862e9d518c2f20efcded062ee983747e72c04
|
4778bb52672e5bfd3bc227fd46bd3e2262146788
|
/check_pickle_data.py
|
77936b0793afdc053427dfe6f921049916de9d4f
|
[] |
no_license
|
vyshor/NTU_timetable_generator
|
cf5d2914a52d41ca1087259fafe215d3298cfd3d
|
e7223fd98da718232af85e960bddc9e88ee02e5d
|
refs/heads/master
| 2021-06-02T09:12:44.419674 | 2021-05-20T14:25:04 | 2021-05-20T14:25:04 | 135,579,641 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 245 |
py
|
import pandas as pd
import pickle
import os.path
if os.path.isfile('database.p'):
with open('database.p', 'rb') as f:
store = pickle.load(f)
print(store.keys())
print([x for x in store.keys() if 'CZ' in x])
print(store['HE9091'])
|
[
"[email protected]"
] | |
9d2088838424734104abac49d03bc31bad104416
|
ca48bab2e2ffca8bb351050791f3b94bccc886b9
|
/final report/interpreter2.py
|
95afe2a50ddc908c1b87ab7b75229f75451ed525
|
[] |
no_license
|
haaksmash/QUI
|
ff394205bd3c3c089d23c0de66bcc4de6bc4e65b
|
f1cc2b3e999bebc7811598bde0f3ffddba216e65
|
refs/heads/master
| 2020-04-06T03:35:15.499196 | 2011-12-10T09:21:55 | 2011-12-10T09:21:55 | 2,872,173 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 231 |
py
|
$ python -i FileModel.py
>>> f = FileModel()
>>> f.size = "really big"
Traceback (most recent call last):
...
...
fields.ValidationError: Could not convert to int: really big
>>> f.size = 100
>>> f.size
100
>>>
|
[
"[email protected]"
] | |
21c63146676fd30217432916e59f7094633339a4
|
1a9852fe468f18e1ac3042c09286ccda000a4135
|
/Specialist Certificate in Data Analytics Essentials/DataCamp/05-Working_with_Dates_and_Times/e23_march_29_throughout_a_decade.py
|
f8a0f897b1922372263e9afbb7bb4c04be5da9a8
|
[] |
no_license
|
sarmabhamidipati/UCD
|
452b2f1e166c1079ec06d78e473730e141f706b2
|
101ca3152207e2fe67cca118923896551d5fee1c
|
refs/heads/master
| 2023-08-14T15:41:24.312859 | 2021-09-22T17:33:01 | 2021-09-22T17:33:01 | 386,592,878 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 739 |
py
|
"""
For example, in the United Kingdom, as of the time this lesson was written, Daylight Saving begins on the last Sunday
in March. Let's look at the UTC offset for March 29, at midnight, for the years 2000 to 2010.
Using tz, set the timezone for dt to be 'Europe/London'.
Within the for loop:
Use the .replace() method to change the year for dt to be y.
Call .isoformat() on the result to observe the results.
"""
# Import datetime and tz
from datetime import datetime
from dateutil import tz
# Create starting date
dt = datetime(2000, 3, 29, tzinfo=tz.gettz('Europe/London'))
# Loop over the dates, replacing the year, and print the ISO timestamp
for y in range(2000, 2011):
print(dt.replace(year=y).isoformat())
|
[
"[email protected]"
] | |
fa469309fe18cbe3e77032ace895be4cfa02963f
|
aa7049506e929693941436f93e22b13ff3122650
|
/clubs/migrations/0002_club_club_picture.py
|
95e50125132ee8e4eda71cd2d4fd2b4b1f9cfa77
|
[] |
no_license
|
austinbrovick/bellevue_college_hackathon
|
24aa5f1ef64c4a4b85dd50e1f6dd628be15f3817
|
2ad9fa6c5ea79e8a34d55df8e21838aeb8fd044f
|
refs/heads/master
| 2021-05-31T16:08:32.770057 | 2016-05-21T16:54:46 | 2016-05-21T16:54:46 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 508 |
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.6 on 2016-05-20 07:24
from __future__ import unicode_literals
import clubs.models
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('clubs', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='club',
name='club_picture',
field=models.ImageField(blank=True, null=True, upload_to=clubs.models.upload_location),
),
]
|
[
"[email protected]"
] | |
9475e978727f421d6640b6c19aa2463bef419be8
|
e9e717e8dd8d05ccf39170492721559076312a50
|
/{{ cookiecutter.repo_name }}/src/transform.py
|
37973295657a02ee98c67018a41f22b4433f8016
|
[
"MIT"
] |
permissive
|
alexkyllo/workbench-py
|
bf9ca182eb86ddfb828887ee459a63212373c79d
|
c0f56450a416fda6905b2f8ee087d414bcc0dd95
|
refs/heads/master
| 2022-12-08T12:02:01.038914 | 2020-09-04T05:28:33 | 2020-09-04T05:28:33 | 291,903,232 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,407 |
py
|
"""transform.py
fit a transformer on test data to transform
test and training data.
"""
import os
import logging
import dotenv
import click
import joblib
from sklearn import preprocessing, impute, pipeline, compose
@click.command()
@click.argument("input_file", type=click.Path(exists=True))
@click.argument("output_file", type=click.Path)
@click.option("pipeline_file", type=click.Path)
@click.option("--fit/--no-fit", default=False, help="Fit the transformer")
def transform(input_file, output_file, pipeline_file, fit):
"""
Transform INPUT_FILE to OUTPUT_FILE using serialized PIPELINE_FILE.
If --fit specified, a pipeline is created, fitted on the data,
and written to PIPELINE_FILE.
Otherwise, a pipeline is read from PIPELINE_FILE and used to transform
the data only.
"""
logger = logging.getLogger(__name__)
logger.info("Reading %s", input_file)
if fit:
# create the pipeline, fit_transform it on the data, and
# save to pipeline_file
joblib.dump(pipeline, pipeline_file)
else:
# read and deserialize the pipeline from pipeline_file
pipeline = joblib.load(pipeline_file)
def main():
log_fmt = "%(asctime)s - %(name)s - %(levelname)s - %(message)s"
logging.basicConfig(level=logging.INFO, format=log_fmt)
dotenv.load_dotenv(dotenv.find_dotenv())
transform()
if __name__ == "__main__":
main()
|
[
"[email protected]"
] | |
4240df00eb5010e26f95c087f229324170c9f756
|
18a6b272d4c55b24d9c179ae1e58959674e53afe
|
/tf_rl/examples/Sutton_RL_Intro/ch4_DP/value_iteration.py
|
60e7e8461ec89060fd9007c1bb8e4dffbb0be478
|
[
"MIT"
] |
permissive
|
Rowing0914/TF2_RL
|
6cce916f409b3d4ef2a5a40a0611908f20d08b2c
|
c1b7f9b376cbecf01deb17f76f8e761035ed336a
|
refs/heads/master
| 2022-12-10T09:58:57.456415 | 2021-05-23T02:43:21 | 2021-05-23T02:43:21 | 233,476,950 | 9 | 1 |
MIT
| 2022-12-08T07:02:42 | 2020-01-12T23:53:48 |
Python
|
UTF-8
|
Python
| false | false | 854 |
py
|
# Following the algo in section 4.4 Value Iteration
from policy_evaluation import Policy_Evaluation
import sys
import numpy as np
if "../" not in sys.path:
sys.path.append("../")
from utils.envs.grid_world import GridworldEnv
def Value_Iteration(env, policy, state_value, gamma, theta):
state_value = Policy_Evaluation(env, policy, state_value, gamma, theta).flatten()
for s in range(env.nS):
policy[s] = np.eye(env.nA)[np.argmax(policy[s])]
return (policy)
if __name__ == '__main__':
env = GridworldEnv()
state_value = np.zeros(env.nS)
policy = np.ones([env.nS, env.nA]) / env.nA
gamma = 1
theta = 0.00001
print("===== Training Started =====")
policy = Value_Iteration(env, policy, state_value, gamma, theta)
print("===== Training Finished =====")
print(policy)
print(state_value)
|
[
"[email protected]"
] | |
ae317d3819b06f5de71f3da6f88fc4df21141864
|
b593247a2bf162819eea6820b6a25c7a659d2f76
|
/Unit 07 Lists and Functions/01 Lists and Functions/1 List Recap/4-Removing elements from lists.py
|
8f667b9e1e80ddb0e19190278409ab25d8eb16c0
|
[] |
no_license
|
Angelpacman/codecademy-py3
|
d4d727857a8894fec5dd3d78c00f3f25f31979dc
|
729d232a8732e53bdf0131246b043354ed933614
|
refs/heads/master
| 2020-03-28T02:50:31.431167 | 2019-01-26T01:07:01 | 2019-01-26T01:07:01 | 147,601,355 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 272 |
py
|
n = [1, 3, 5]
# Removes 1 from the list,
# NOT the item at index 1
n.remove(1)
# Another possible solution, will remove the item at the given index:
del(n[0])
# Another possible solution will remove the item at index from the list and return it to you:
n.pop(0)
print (n)
|
[
"[email protected]"
] | |
6cf0154e33520dc042d50a3f03c9ef013abaeca8
|
1e5c6f4b08d9470fce248cf39e6dccce40e90a41
|
/codes/11/vpython_mouse.py
|
1dfa5be57b729a6fc2531903cb36ec3f2576e212
|
[] |
no_license
|
misaiya99/scipybook2
|
1529cfb7f800df2ef7ce024a86281af16e343a37
|
734ba177b4705cc25da695d42a8cbada7cd22bd9
|
refs/heads/master
| 2020-03-10T21:26:23.595494 | 2017-08-25T09:48:07 | 2017-08-25T09:48:07 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 748 |
py
|
# -*- coding: utf-8 -*-
from visual import *
text = label(pos=(0, -2, 0))
sphere(pos=(0,2,0))
box(pos = (2, 0, 0))
ray = arrow(pos=(0,0,0), color=(1,0,0))
while True:
rate(30)
texts = []
for attrname in ["pos", "pick", "pickpos", "camera", "ray"]:
texts.append("%s=%s" % (attrname, getattr(scene.mouse, attrname)))
texts.append("project=%s" %
scene.mouse.project(normal=scene.forward, point=scene.center))
text.text = "\n".join(texts)
ray.axis = scene.mouse.ray
if scene.mouse.events > 0:
event = scene.mouse.getevent()
print(("press=%s, click=%s, drag=%s, drop=%s, release=%s" % (
event.press, event.click, event.drag, event.drop, event.release
)))
|
[
"[email protected]"
] | |
4d53d7f73ebb9720864f89da0c2327cfa136e2c2
|
54ddb3f38cd09ac25213a7eb8743376fe778fee8
|
/topic_05_data_structure/practice/zip_1_common.py
|
4236c882354be343b387f643ccd4d9be6d9b4296
|
[] |
no_license
|
ryndovaira/leveluppythonlevel1_300321
|
dbfd4ee41485870097ee490f652751776ccbd7ab
|
0877226e6fdb8945531775c42193a90ddb9c8a8b
|
refs/heads/master
| 2023-06-06T07:44:15.157913 | 2021-06-18T11:53:35 | 2021-06-18T11:53:35 | 376,595,962 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,648 |
py
|
"""
Функция zip_common.
Принимает 3 аргумента: список, строку и кортеж.
Возвращает список (list) с тройками значений из каждого аргумента.
ВНИМАНИЕ: для строки один элемент = один символ
(Порядок проверки именно такой:)
Если вместо list передано что-то другое, то возвращать строку 'First arg must be list!'.
Если вместо str передано что-то другое, то возвращать строку 'Second arg must be str!'.
Если вместо tuple передано что-то другое, то возвращать строку 'Third arg must be tuple!'.
Если list пуст, то возвращать строку 'Empty list!'.
Если str пуст, то возвращать строку 'Empty str!'.
Если tuple пуст, то возвращать строку 'Empty tuple!'.
Если list, str и tuple различного размера, обрезаем до минимального (стандартный zip).
"""
def zip_common(my_list, my_str, my_tuple):
if type(my_list) != list:
return 'First arg must be list!'
if type(my_str) != str:
return 'Second arg must be str!'
if type(my_tuple) != tuple:
return 'Third arg must be tuple!'
if len(my_list) == 0:
return 'Empty list!'
if len(my_str) == 0:
return 'Empty str!'
if len(my_tuple) == 0:
return 'Empty tuple!'
return list(zip(my_list, my_str, my_tuple))
|
[
"[email protected]"
] | |
6856e89aa1d898a889e5af7dae23b5576017b49c
|
292cec77b5003a2f80360d0aee77556d12d990f7
|
/typings/filetype/types/video.pyi
|
9b61193428fe1cf6d3aee36815c69fc32f0d96e0
|
[
"Apache-2.0"
] |
permissive
|
yubozhao/BentoML
|
194a6ec804cc1c6dbe7930c49948b6707cbc3c5f
|
d4bb5cbb90f9a8ad162a417103433b9c33b39c84
|
refs/heads/master
| 2022-12-17T00:18:55.555897 | 2022-12-06T00:11:39 | 2022-12-06T00:11:39 | 178,978,385 | 3 | 0 |
Apache-2.0
| 2020-12-01T18:17:15 | 2019-04-02T01:53:53 |
Python
|
UTF-8
|
Python
| false | false | 2,296 |
pyi
|
"""
This type stub file was generated by pyright.
"""
from .base import Type
from .isobmff import IsoBmff
class Mp4(IsoBmff):
"""
Implements the MP4 video type matcher.
"""
MIME = ...
EXTENSION = ...
def __init__(self) -> None:
...
def match(self, buf): # -> bool:
...
class M4v(Type):
"""
Implements the M4V video type matcher.
"""
MIME = ...
EXTENSION = ...
def __init__(self) -> None:
...
def match(self, buf): # -> Literal[False]:
...
class Mkv(Type):
"""
Implements the MKV video type matcher.
"""
MIME = ...
EXTENSION = ...
def __init__(self) -> None:
...
def match(self, buf):
...
class Webm(Type):
"""
Implements the WebM video type matcher.
"""
MIME = ...
EXTENSION = ...
def __init__(self) -> None:
...
def match(self, buf):
...
class Mov(IsoBmff):
"""
Implements the MOV video type matcher.
"""
MIME = ...
EXTENSION = ...
def __init__(self) -> None:
...
def match(self, buf): # -> Literal[False]:
...
class Avi(Type):
"""
Implements the AVI video type matcher.
"""
MIME = ...
EXTENSION = ...
def __init__(self) -> None:
...
def match(self, buf): # -> Literal[False]:
...
class Wmv(Type):
"""
Implements the WMV video type matcher.
"""
MIME = ...
EXTENSION = ...
def __init__(self) -> None:
...
def match(self, buf): # -> Literal[False]:
...
class Flv(Type):
"""
Implements the FLV video type matcher.
"""
MIME = ...
EXTENSION = ...
def __init__(self) -> None:
...
def match(self, buf): # -> Literal[False]:
...
class Mpeg(Type):
"""
Implements the MPEG video type matcher.
"""
MIME = ...
EXTENSION = ...
def __init__(self) -> None:
...
def match(self, buf): # -> Literal[False]:
...
class M3gp(Type):
"""Implements the 3gp image type matcher."""
MIME = ...
EXTENSION = ...
def __init__(self) -> None:
...
def match(self, buf):
...
|
[
"[email protected]"
] | |
9b750390731edd5a1a683067240907563877df45
|
7a66ff970580297ba50b0d4bdd0406352071c05a
|
/Pyscience/3. numpy.py
|
5662327a3911c27438e44e19446518f84358e67d
|
[] |
no_license
|
zero-big/Python-Basic
|
1ab3da9d09983d937b410ca9ec1741424ebaa3ae
|
5cd2eaa822aedb46a79283a6007b900a3c9665c8
|
refs/heads/master
| 2023-08-03T13:10:22.556732 | 2021-09-24T11:35:50 | 2021-09-24T11:35:50 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,391 |
py
|
import numpy as np
# 1. 배열 만들기 : array
b = np.array([2, 4, 6, 8])
print(b) # [2 4 6 8]
# ndim : 랭크를 반환
print(b.ndim) # 1
# size : 배열에 있는 값의 총 개수 반환
print(b.size) # 4
# shape : 각 랭크에 있는 값의 개수 반환
print(b.shape) # (4,)
a = np.arange(10)
print(a) # [0 1 2 3 4 5 6 7 8 9]
print(a.ndim) # 1
print(a.shape) # (10,)
print(a.size) # 10
a = np.arange(7, 11)
print(a) # [ 7 8 9 10]
f = np.arange(2.0, 9.8, 0.3)
print(f)
# [2. 2.3 2.6 2.9 3.2 3.5 3.8 4.1 4.4 4.7 5. 5.3 5.6 5.9 6.2 6.5 6.8 7.1
# 7.4 7.7 8. 8.3 8.6 8.9 9.2 9.5 9.8]
g = np.arange(10, 4, -1.5, dtype=np.float)
print(g) # [10. 8.5 7. 5.5]
a = np.zeros((3,))
print(a) # [0. 0. 0.]
print(a.ndim) # 1
print(a.shape) # (3,)
print(a.size) # 3
b = np.zeros((2, 4))
print(b)
# [[0. 0. 0. 0.]
# [0. 0. 0. 0.]]
print(b.ndim) # 2
print(b.shape) # (2, 4)
print(b.size) # 8
k = np.ones((3, 5))
print(k)
# [[1. 1. 1. 1. 1.]
# [1. 1. 1. 1. 1.]
# [1. 1. 1. 1. 1.]]
m = np.random.random((3, 5))
print(m)
# [[0.92144665 0.79460743 0.98429623 0.5172086 0.0727177 ]
# [0.3467992 0.07082806 0.06713763 0.92576145 0.37867405]
# [0.57972622 0.02252859 0.66872603 0.70532502 0.7316084 ]]
a = np.arange(10)
a = a.reshape(2, 5)
print(a)
# [[0 1 2 3 4]
# [5 6 7 8 9]]
print(a.ndim) # 2
print(a.shape) # (2, 5)
print(a.size) # 10
a = a.reshape(5, 2)
print(a)
# [[0 1]
# [2 3]
# [4 5]
# [6 7]
# [8 9]]
print(a.ndim) # 2
print(a.shape) # (5, 2)
print(a.size) # 10
a.shape = (2, 5)
print(a)
# 배열 연산
from numpy import *
a = arange(4)
a *= 3
print(a) # [0 3 6 9]
plain_list = list(range(4))
print(plain_list) # [0, 1, 2, 3]
plain_list = [num*3 for num in plain_list]
print(plain_list) # [0, 3, 6, 9]
a = zeros((2, 5)) + 17.0
print(a)
# [[17. 17. 17. 17. 17.]
# [17. 17. 17. 17. 17.]]
# @ : 행렬 곱
a = np.array([[1,2], [3,4]])
b = a @ a
print(b)
# [[ 7 10]
# [15 22]]
# 선형 대수
# 4x + 5y = 20
# x + 2y = 13
coefficients = np.array([ [4,5], [1,2]])
dependents = np.array([20, 13])
answer = np.linalg.solve(coefficients, dependents)
print(answer)
# [-8.33333333 10.66666667]
print(4 * answer[0] + 5 * answer[1] ) # 20.0
print(1 * answer[0] + 2 * answer[1] ) # 13.0
product = np.dot(coefficients, answer)
print(product) # [20. 13.]
print(np.allclose(product, dependents)) # True
|
[
"[email protected]"
] | |
4bc2b97cfdf5ecd84e54794669f4b1629022175a
|
d3efc82dfa61fb82e47c82d52c838b38b076084c
|
/utils/insertOrder.py
|
85952c6096e1e1cff45f6714581d1c7d9b599c2b
|
[] |
no_license
|
nantongzyg/xtp_test
|
58ce9f328f62a3ea5904e6ed907a169ef2df9258
|
ca9ab5cee03d7a2f457a95fb0f4762013caa5f9f
|
refs/heads/master
| 2022-11-30T08:57:45.345460 | 2020-07-30T01:43:30 | 2020-07-30T01:43:30 | 280,388,441 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,208 |
py
|
#!/usr/bin/python
# -*- encoding: utf-8 -*-
import sys
sys.path.append("/home/yhl2/workspace/xtp_test/xtp/api")
from xtp_test_case import *
sys.path.append("/home/yhl2/workspace/xtp_test/service")
from QueryStkPriceQty import *
import time
a = []
i = 0
def insertOrder(order_client_id):
case_goal = {
'case_ID': 'ATC-103-19',
'期望状态': '全成',
'errorID': 0,
'errorMSG': '',
'是否生成报单': '是',
'是否是撤废': '否',
'xtp_ID': 0,
'cancel_xtpID': 0,
}
stkparm = QueryStkPriceQty('999999', '2', '0', '2', '0', 'B', case_goal['期望状态'], Api)
wt_reqs = {
'business_type':Api.const.XTP_BUSINESS_TYPE['XTP_BUSINESS_TYPE_CASH'],
'market': Api.const.XTP_MARKET_TYPE['XTP_MKT_SZ_A'],
'ticker': stkparm['证券代码'],
'side': Api.const.XTP_SIDE_TYPE['XTP_SIDE_BUY'],
'price_type': Api.const.XTP_PRICE_TYPE['XTP_PRICE_FORWARD_BEST'],
'price': stkparm['涨停价'],
'quantity': 200
}
wt_reqs['order_client_id'] = order_client_id
Api.trade.InsertOrder(wt_reqs)
# 报单分页查询
def test_orderpage(self):
def pagedate(data, req_count, order_sequence, query_reference, request_id, is_last):
#print data,is_last
global i
for k in data.keys():
if 'order_cancel_xtp_id' in k:
i +=1
a.append(i)
Api.trade.setQueryOrderByPageHandle(pagedate)
Api.trade.QueryOrdersByPage({'req_count':13,'reference':198})
time.sleep(0.5)
rs = a[-1]
self.assertEqual(rs, 3)
# 成交分页查询
def test_tradepage():
def pagedate(data, req_count, trade_sequence, query_reference, request_id, is_last):
print data,is_last
Api.trade.setQueryTradeByPageHandle(pagedate)
Api.trade.QueryTradesByPage({'req_count':10,'reference':0})
time.sleep(0.5)
if __name__ == '__main__':
'''
for i in range(100):
order_client_id = i+1
#print order_client_id
Api.trade.Login()
insertOrder(order_client_id)
'''
#test_orderpage()
test_tradepage()
|
[
"[email protected]"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.