blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 777
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 149
values | src_encoding
stringclasses 26
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 3
10.2M
| extension
stringclasses 188
values | content
stringlengths 3
10.2M
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
a14162e5b6a13393930258a65f239c5258f998cc
|
68eb441faf3f9415fbcbc8330f9b01ad6933bede
|
/ebook/machinelearningdemo/MachineLearningDemo/python_day03/Demo02_Dataframe.py
|
05cda830db9ce800c5ec102716c80ec285d7354b
|
[] |
no_license
|
OrriO/jupyter_myworkspace
|
fb8e97865f15abe2fb3aa01985fdb4f34317f15f
|
a592ab92f38a1cd466c454bb36fd0002c75202a9
|
refs/heads/master
| 2023-06-01T02:00:36.986439 | 2021-07-08T13:44:26 | 2021-07-08T13:44:26 | 381,997,768 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 249 |
py
|
# -*- coding: utf-8 -*-
# @Time : 2018/12/5 9:18
# @Author : Z
# @Email : S
# @File : Demo02_DataFrame.py
import pandas as pd
df1 = pd.DataFrame([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
print(df1)
print("hello git")
# 添加修改内容
###23点26分
|
[
"[email protected]"
] | |
0fd66913e4d6500654e1346a6071f0e86c16f33c
|
7b870523b8e432384cff27fd50056da8c6a5b1e3
|
/leetcode/083删除排序链表中的重复元素.py
|
f8238ba6c934f92a34234fd778fd7e26f68308a3
|
[] |
no_license
|
ShawDa/Coding
|
93e198acdda528da608c62ca5b9e29bb0fb9e060
|
b8ec1350e904665f1375c29a53f443ecf262d723
|
refs/heads/master
| 2020-03-25T09:20:08.767177 | 2019-09-01T06:25:10 | 2019-09-01T06:25:10 | 143,660,448 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 534 |
py
|
# -*- coding:utf-8 -*-
__author__ = 'ShawDa'
# Definition for singly-linked list.
# class ListNode:
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution:
def deleteDuplicates(self, head: 'ListNode') -> 'ListNode':
if not head or not head.next:
return head
node = head
while node and node.next:
if node.val != node.next.val:
node = node.next
else:
node.next = node.next.next
return head
|
[
"[email protected]"
] | |
948b205c20464b1078bce436cc5f9b6691697db2
|
8ec910de801b424540abb4e6e955838a287663b6
|
/CursoPython/Unidad10/Ejemplos/archivo_writable.py
|
0e2f86d2c2f512256f01be061874c555b02eacdf
|
[] |
no_license
|
hector81/Aprendiendo_Python
|
f4f211ace32d334fb6b495b1b8b449d83a7f0bf8
|
9c73f32b0c82f08e964472af1923f66c0fbb4c22
|
refs/heads/master
| 2022-12-28T03:41:20.378415 | 2020-09-28T09:15:03 | 2020-09-28T09:15:03 | 265,689,885 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 90 |
py
|
from io import open
f = open("Unidad10\\Ejemplos\\archivo.txt", "a")
print(f.writable())
|
[
"[email protected]"
] | |
f8e8d4ca4ac680377c3eb67bf278605933850dba
|
c289ba5d699ce015c7f7b7176621057bb8dfe1fa
|
/Python/algorithm/hanoi_tower/하노이의탑.py
|
bec0986a931b07758de7dcad219cfe58a5898788
|
[] |
no_license
|
sug5806/Upload
|
abd44985281b7aeadc39c654db099451a285a360
|
c7e8ca38b04bdf2076862226cceaeff5ff3cfbc6
|
refs/heads/master
| 2020-05-03T00:48:53.763042 | 2019-04-08T10:00:11 | 2019-04-08T10:00:11 | 178,318,964 | 0 | 0 | null | 2019-04-01T02:03:14 | 2019-03-29T02:37:45 |
Jupyter Notebook
|
UTF-8
|
Python
| false | false | 314 |
py
|
n = 0
a = "A" # 출발
b = "B" # 중간
c = "C" # 도착
def hanoi(n, a, b, c):
if n == 1:
print("{}번째 원반을 {}로 이동".format(n, c))
return
hanoi(n-1, a, c, b)
print("{}번째 원반을 {}로 이동".format(n, c))
hanoi(n-1, b, a, c)
hanoi(3, a, b, c)
|
[
"[email protected]"
] | |
26fccac5a29754168d7ea75db7369492bf854d46
|
e669b3fe7da2698da4ce02e98325ce154d2aa546
|
/swaps/model/wallet/withdraw.py
|
4baa2eb3a0cc154ae15f13a3bd987a77ee669be8
|
[
"Apache-2.0"
] |
permissive
|
marcellinamichie291/cash_carry_leveraged_futures_arbitrageur
|
0834a911fdd6c9f1462f6f2f59926f715fc51461
|
1120ebfb487ce4987fe70e6645b36e0d7ce041ec
|
refs/heads/main
| 2023-03-16T18:35:28.730554 | 2020-12-04T07:46:13 | 2020-12-04T07:46:13 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,040 |
py
|
from swaps.constant import *
class Withdraw:
"""
The latest status for withdraws.
:member
id: The transfer id.
currency: The crypto currency to deposit.
tx_hash: The on-chain transaction hash.
amount: The number of crypto asset transferred in its minimum unit.
address: The deposit source address.
address_tag: The user defined address tag.
fee: The amount of fee taken by Huobi in this crypto's minimum unit.
created_at: The UNIX formatted timestamp in UTC for the transfer creation.
updated_at: The UNIX formatted timestamp in UTC for the transfer's latest update.
state: The withdraw state of this transfer.
"""
def __init__(self):
self.id = 0
self.type = DepositWithdraw.WITHDRAW
self.currency = ""
self.chain = ""
self.tx_hash = ""
self.amount = 0.0
self.address = ""
self.address_tag = ""
self.fee = 0.0
self.created_at = 0
self.updated_at = 0
self.state = WithdrawState.INVALID
def print_object(self, format_data=""):
from swaps.utils.print_mix_object import PrintBasic
PrintBasic.print_basic(self.id, format_data + "ID")
PrintBasic.print_basic(self.currency, format_data + "Currency")
PrintBasic.print_basic(self.type, format_data + "Operator Type")
PrintBasic.print_basic(self.chain, format_data + "Chain")
PrintBasic.print_basic(self.tx_hash, format_data + "Trade Hash")
PrintBasic.print_basic(self.amount, format_data + "Amount")
PrintBasic.print_basic(self.address, format_data + "Address")
PrintBasic.print_basic(self.address_tag, format_data + "Address Tag")
PrintBasic.print_basic(self.fee, format_data + "Fee")
PrintBasic.print_basic(self.state, format_data + "Withdraw State")
PrintBasic.print_basic(self.created_at, format_data + "Create Time")
PrintBasic.print_basic(self.updated_at, format_data + "Update Time")
|
[
"[email protected]"
] | |
51f4f5dc31c811838a7775de5000e66f98945f1b
|
1b01dec8c454337232a6cf1046412ec98269fe5d
|
/examples/sync_example.py
|
e60390356bcbce9b05c4461bb86460e1f002efa9
|
[
"BSD-3-Clause"
] |
permissive
|
lietu/shylock
|
d38710220306af1e4fac638b2d24df8a8fdc3801
|
5ada3cb4bf75e2395fadb19f68ceff5ff92e6a65
|
refs/heads/master
| 2023-08-09T06:58:47.653545 | 2023-03-05T16:27:30 | 2023-03-05T16:27:30 | 243,951,853 | 5 | 8 |
NOASSERTION
| 2023-07-25T21:10:23 | 2020-02-29T10:55:05 |
Python
|
UTF-8
|
Python
| false | false | 1,196 |
py
|
from time import time
from pymongo import MongoClient
from shylock import Lock, ShylockPymongoBackend, configure
from shylock.backends.pymongo import DOCUMENT_TTL
CONNECTION_STRING = "mongodb://localhost:27017"
def main():
print("Start")
c = MongoClient(CONNECTION_STRING)
configure(ShylockPymongoBackend.create(c, "shylock_test", "shylock"))
lock_name = "test-lock"
test_lock = Lock(lock_name)
try:
with Lock(lock_name):
print("Got lock")
print("Testing re-lock")
assert not test_lock.acquire(False)
raise ValueError()
except ValueError:
print("Caught exception, lock should be released")
assert test_lock.acquire(False)
test_lock.release()
print(
f"Testing automatic release, this will take a while (~{DOCUMENT_TTL}-{DOCUMENT_TTL+60}s)."
)
# Test automatic release
start = time()
with test_lock:
lock2 = Lock(lock_name)
try:
lock2.acquire()
released = time() - start
finally:
lock2.release()
print(f"Lock automatically released after {released:.3f}s")
if __name__ == "__main__":
main()
|
[
"[email protected]"
] | |
efe2fd91d27dab3e24d2bc319c21afca1f2a83e6
|
c309e7d19af94ebcb537f1e8655c0122dbe0cb13
|
/Chapter03/01-chapter-content/argparse_positional_arguments.py
|
41c0ebbfbbc78a0e5b576c61eab36691762b9e38
|
[
"MIT"
] |
permissive
|
PacktPublishing/Mastering-OpenCV-4-with-Python
|
0fb82c88cb7205c7050c8db9f95a6deb3b1b3333
|
4194aea6f925a4b39114aaff8463be4d18e73aba
|
refs/heads/master
| 2023-03-07T04:51:16.071143 | 2023-02-13T10:17:48 | 2023-02-13T10:17:48 | 151,057,527 | 375 | 226 |
MIT
| 2022-08-27T13:32:19 | 2018-10-01T08:27:29 |
Python
|
UTF-8
|
Python
| false | false | 784 |
py
|
"""
Example to introduce argparse with a positional argument
"""
# Import the required packages
import argparse
# We first create the ArgumentParser object
# The created object 'parser' will have the necessary information
# to parse the command-line arguments into data types.
parser = argparse.ArgumentParser()
# We add a positional argument using add_argument() including a help
parser.add_argument("first_argument", help="this is the string text in connection with first_argument")
# The information about program arguments is stored in 'parser'
# Then, it is used when the parser calls parse_args().
# ArgumentParser parses arguments through the parse_args() method:
args = parser.parse_args()
# We get and print the first argument of this script:
print(args.first_argument)
|
[
"[email protected]"
] | |
21ef6f2b8e9d65d3486b9995043fba33e64b7ee2
|
53fab060fa262e5d5026e0807d93c75fb81e67b9
|
/backup/user_205/ch38_2020_03_17_21_32_21_644302.py
|
2e02b97d73747423301a36c3f31f3d55c3ad3ac1
|
[] |
no_license
|
gabriellaec/desoft-analise-exercicios
|
b77c6999424c5ce7e44086a12589a0ad43d6adca
|
01940ab0897aa6005764fc220b900e4d6161d36b
|
refs/heads/main
| 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 193 |
py
|
def quantos_uns(x):
n = 0
x_str=str("x")
while(n<x_srt):
if "1" in x_str:
n+=1
return n
else:
return None
|
[
"[email protected]"
] | |
a7e8541c94ddc02f2d6400690083c360a1be0115
|
e9fa62b4173b83ec0804329e7821a58624d3eb9f
|
/core/nodes/groupNode.py
|
c58a9a427c2692759591226e641652614218b466
|
[] |
no_license
|
UIKit0/meShaderEd
|
af5f0c4dc524b8be12ec6336a86c28439f1f3b1a
|
683756f9c66d4613d89afdff4d1d014488b199f7
|
refs/heads/master
| 2021-01-17T21:16:27.037070 | 2013-12-26T12:30:26 | 2013-12-26T12:30:26 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,420 |
py
|
#===============================================================================
# groupNode.py
#===============================================================================
import os, sys
from PyQt4 import QtCore
from core.node import Node
from core.nodeParam import NodeParam
from core.nodeNetwork import NodeNetwork
from global_vars import app_global_vars, DEBUG_MODE
import gui.ui_settings as UI
#
# GroupNode
#
class GroupNode ( Node ) :
#
# __init__
#
def __init__ ( self, xml_node = None ) :
#
Node.__init__ ( self, xml_node )
if xml_node is None :
self.type = 'nodegroup'
self.name = self.label = self.type
self.nodenet = NodeNetwork ()
if DEBUG_MODE : print '>> GroupNode( %s ).__init__' % self.label
#
# copy
#
def copy ( self ) :
if DEBUG_MODE : print '>> GrouphNode( %s ).copy' % self.label
newNode = GroupNode ()
self.copySetup ( newNode )
return newNode
#
# copySetup
#
def copySetup ( self, newNode ) :
#
if DEBUG_MODE : print '>> GrouphNode( %s ).copySetup ' % self.label
Node.copySetup ( self, newNode )
newNode.nodenet = self.nodenet.copy ()
#
# computeNode
#
def computeNode ( self ) :
#
if DEBUG_MODE : print '>> GroupNode( %s ).computeNode' % self.label
# inside controlm_code, imageName value can be assigned from different
# input parameters
self.execControlCode ()
|
[
"[email protected]"
] | |
35a6990ff5ac68e8cbda235af7ac71de5d442e3d
|
7d56ddd456613b77872598360494edd411366a79
|
/tests/test_is_adjacent.py
|
adf62949215be3e2c359bd244b743528413b6fa4
|
[] |
no_license
|
andrewswan/lwotai
|
3bf6629eaed86c945fdd65f221320bd743dc18ac
|
33c6435ce3f0f85668cc5414cf78c1e9c614f6a9
|
refs/heads/release
| 2021-01-19T10:38:55.939281 | 2017-09-17T03:38:33 | 2017-09-17T03:38:33 | 87,885,218 | 1 | 0 | null | 2017-04-11T03:41:12 | 2017-04-11T03:41:12 | null |
UTF-8
|
Python
| false | false | 620 |
py
|
from labyrinth_test_case import LabyrinthTestCase
from lwotai.labyrinth import Labyrinth
class IsAdjacent(LabyrinthTestCase):
"""Test isAdjacent"""
def test_is_adjacent(self):
app = Labyrinth(1, 1, self.set_up_blank_test_scenario)
self.assertTrue(app.is_adjacent("Iran", "Iraq"))
self.assertTrue(app.is_adjacent("Germany", "Spain"))
self.assertTrue(app.is_adjacent("Libya", "Italy"))
self.assertTrue(app.is_adjacent("Benelux", "Russia"))
self.assertTrue(app.is_adjacent("Lebanon", "France"))
self.assertFalse(app.is_adjacent("United States", "Lebanon"))
|
[
"[email protected]"
] | |
38d4016261da034fd1ad2793b4f25832785265f8
|
2670452749c6299386a33391f9fb5014db0203ec
|
/meraki/aio/api/mg_lan_settings.py
|
e71f434d300a7ebdeef9def563dca0cc0a6c628d
|
[
"MIT"
] |
permissive
|
npappin-wsu/dashboard-api-python
|
f9d3fc682b517e6bac437cd54101afd09b653274
|
5aedfc740f676fbf34e5f79269e8ece73421e3da
|
refs/heads/master
| 2020-06-28T17:49:44.911294 | 2020-04-14T04:27:38 | 2020-04-14T04:27:38 | 255,509,439 | 0 | 0 |
MIT
| 2020-04-14T04:24:55 | 2020-04-14T04:24:54 | null |
UTF-8
|
Python
| false | false | 1,508 |
py
|
class AsyncMGLANSettings:
def __init__(self, session):
super().__init__()
self._session = session
async def getDeviceCellularGatewaySettings(self, serial: str):
"""
**Show the LAN Settings of a MG**
https://api.meraki.com/api_docs#show-the-lan-settings-of-a-mg
- serial (string)
"""
metadata = {
'tags': ['MG LAN settings'],
'operation': 'getDeviceCellularGatewaySettings',
}
resource = f'/devices/{serial}/cellularGateway/settings'
return await self._session.get(metadata, resource)
async def updateDeviceCellularGatewaySettings(self, serial: str, **kwargs):
"""
**Update the LAN Settings for a single MG.**
https://api.meraki.com/api_docs#update-the-lan-settings-for-a-single-mg
- serial (string)
- reservedIpRanges (array): list of all reserved IP ranges for a single MG
- fixedIpAssignments (array): list of all fixed IP assignments for a single MG
"""
kwargs.update(locals())
metadata = {
'tags': ['MG LAN settings'],
'operation': 'updateDeviceCellularGatewaySettings',
}
resource = f'/devices/{serial}/cellularGateway/settings'
body_params = ['reservedIpRanges', 'fixedIpAssignments']
payload = {k: v for (k, v) in kwargs.items() if k in body_params}
return await self._session.put(metadata, resource, payload)
|
[
"[email protected]"
] | |
78de78847f7852c794ae07579f9696246ae7fe59
|
02bae0ffc4dea8e65cceb5ef49b8ec30ed3ab0fe
|
/week5/count_pairs.py
|
92d77c4f651c0da25c5fb40badf33e4e6fa641f6
|
[] |
no_license
|
Gambrinius/Python_Course
|
030ae41a129aa6a4ded06cc1ed9ca852bcf1c756
|
0d7529fa3dcc012b117241900d9d564d4096208b
|
refs/heads/master
| 2020-03-06T16:01:33.322266 | 2018-06-15T17:49:40 | 2018-06-15T17:49:40 | 126,965,876 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 260 |
py
|
numList = list(map(int, input().split()))
# print(sum(numList.count(x) - 1 for x in numList) // 2)
counter = 0
for i in range(len(numList)):
for j in range(i + 1, len(numList)):
if numList[i] == numList[j]:
counter += 1
print(counter)
|
[
"[email protected]"
] | |
ec38145aaa3b9dba8286dd421d20ebdb7df5390f
|
f9d564f1aa83eca45872dab7fbaa26dd48210d08
|
/huaweicloud-sdk-rds/huaweicloudsdkrds/v3/model/list_authorized_db_users_request.py
|
71cbb2d260e258c598a2112f6a4543e44ec3d044
|
[
"Apache-2.0"
] |
permissive
|
huaweicloud/huaweicloud-sdk-python-v3
|
cde6d849ce5b1de05ac5ebfd6153f27803837d84
|
f69344c1dadb79067746ddf9bfde4bddc18d5ecf
|
refs/heads/master
| 2023-09-01T19:29:43.013318 | 2023-08-31T08:28:59 | 2023-08-31T08:28:59 | 262,207,814 | 103 | 44 |
NOASSERTION
| 2023-06-22T14:50:48 | 2020-05-08T02:28:43 |
Python
|
UTF-8
|
Python
| false | false | 6,237 |
py
|
# coding: utf-8
import six
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class ListAuthorizedDbUsersRequest:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'x_language': 'str',
'instance_id': 'str',
'db_name': 'str',
'page': 'int',
'limit': 'int'
}
attribute_map = {
'x_language': 'X-Language',
'instance_id': 'instance_id',
'db_name': 'db-name',
'page': 'page',
'limit': 'limit'
}
def __init__(self, x_language=None, instance_id=None, db_name=None, page=None, limit=None):
"""ListAuthorizedDbUsersRequest
The model defined in huaweicloud sdk
:param x_language: 语言
:type x_language: str
:param instance_id: 实例ID。
:type instance_id: str
:param db_name: 数据库名。
:type db_name: str
:param page: 分页页码,从1开始。
:type page: int
:param limit: 每页数据条数。取值范围[1, 100]。
:type limit: int
"""
self._x_language = None
self._instance_id = None
self._db_name = None
self._page = None
self._limit = None
self.discriminator = None
if x_language is not None:
self.x_language = x_language
self.instance_id = instance_id
self.db_name = db_name
self.page = page
self.limit = limit
@property
def x_language(self):
"""Gets the x_language of this ListAuthorizedDbUsersRequest.
语言
:return: The x_language of this ListAuthorizedDbUsersRequest.
:rtype: str
"""
return self._x_language
@x_language.setter
def x_language(self, x_language):
"""Sets the x_language of this ListAuthorizedDbUsersRequest.
语言
:param x_language: The x_language of this ListAuthorizedDbUsersRequest.
:type x_language: str
"""
self._x_language = x_language
@property
def instance_id(self):
"""Gets the instance_id of this ListAuthorizedDbUsersRequest.
实例ID。
:return: The instance_id of this ListAuthorizedDbUsersRequest.
:rtype: str
"""
return self._instance_id
@instance_id.setter
def instance_id(self, instance_id):
"""Sets the instance_id of this ListAuthorizedDbUsersRequest.
实例ID。
:param instance_id: The instance_id of this ListAuthorizedDbUsersRequest.
:type instance_id: str
"""
self._instance_id = instance_id
@property
def db_name(self):
"""Gets the db_name of this ListAuthorizedDbUsersRequest.
数据库名。
:return: The db_name of this ListAuthorizedDbUsersRequest.
:rtype: str
"""
return self._db_name
@db_name.setter
def db_name(self, db_name):
"""Sets the db_name of this ListAuthorizedDbUsersRequest.
数据库名。
:param db_name: The db_name of this ListAuthorizedDbUsersRequest.
:type db_name: str
"""
self._db_name = db_name
@property
def page(self):
"""Gets the page of this ListAuthorizedDbUsersRequest.
分页页码,从1开始。
:return: The page of this ListAuthorizedDbUsersRequest.
:rtype: int
"""
return self._page
@page.setter
def page(self, page):
"""Sets the page of this ListAuthorizedDbUsersRequest.
分页页码,从1开始。
:param page: The page of this ListAuthorizedDbUsersRequest.
:type page: int
"""
self._page = page
@property
def limit(self):
"""Gets the limit of this ListAuthorizedDbUsersRequest.
每页数据条数。取值范围[1, 100]。
:return: The limit of this ListAuthorizedDbUsersRequest.
:rtype: int
"""
return self._limit
@limit.setter
def limit(self, limit):
"""Sets the limit of this ListAuthorizedDbUsersRequest.
每页数据条数。取值范围[1, 100]。
:param limit: The limit of this ListAuthorizedDbUsersRequest.
:type limit: int
"""
self._limit = limit
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ListAuthorizedDbUsersRequest):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
[
"[email protected]"
] | |
4d2747bd95007d60e79741f0a86d8d7132ffc21f
|
8ce5ad4d2c6fdf94f34ced61c87e080a54869924
|
/ch5/buy_apple_orange.py
|
fd20dab3d4f17f9ab9957459224cd9f642057ba1
|
[] |
no_license
|
fuchami/zero-deep-learning
|
dabc70841a6ae5b92d9a353a47c10ee0a3d17609
|
de423b143ca071530fa64b3efc18b92387932169
|
refs/heads/master
| 2020-07-03T10:35:37.085132 | 2019-09-06T07:50:40 | 2019-09-06T07:50:40 | 201,879,951 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 834 |
py
|
# coding:utf-8
from layer_naive import *
apple = 100
apple_num = 2
orange = 150
orange_num = 3
tax = 1.1
# layer
mul_apple_layer = MulLayer()
mul_orange_layer = MulLayer()
add_apple_orange = AddLayer()
mul_tax_layer = MulLayer()
# forward
apple_price = mul_apple_layer.forward(apple, apple_num)
orange_price = mul_orange_layer.forward(orange, orange_num)
all_price = add_apple_orange.forward(apple_price, orange_price)
price = mul_tax_layer.forward(all_price, tax)
# backward
dprice = 1
dall_price, dtax = mul_tax_layer.backward(dprice)
dapple_price, dorange_price = add_apple_orange.backward(dall_price)
dorange, dorange_num = mul_orange_layer.backward(dorange_price)
dapple, dapple_num = mul_apple_layer.backward(dapple_price)
print(price) # 715
print(dapple_num, dapple, dorange, dorange_num, dtax) # 110, 2.2, 3.3, 165, 650
|
[
"[email protected]"
] | |
ffdd91659d06d727143545bb500513b60ea0f9c5
|
4869f79cedcb8aef7f4d064bb8927ed3595c4c5e
|
/AnonymousGroupLogin/RegisterUser/RegisteringComponent/sleekxmpp/__init__.py
|
1057895dfac385044b7f120be85a8ee7c23a037c
|
[] |
no_license
|
mpetyx/xmpp-padgets-development
|
622fef069e2b8f6beb15296b0d3fdd554d13535f
|
a0ca9ed2dd513f83ebb8cb4f4836708c82975713
|
refs/heads/master
| 2021-01-25T07:34:33.869597 | 2012-03-27T12:45:40 | 2012-03-27T12:45:40 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 650 |
py
|
"""
SleekXMPP: The Sleek XMPP Library
Copyright (C) 2010 Nathanael C. Fritz
This file is part of SleekXMPP.
See the file LICENSE for copying permission.
"""
from sleekxmpp.basexmpp import BaseXMPP
from sleekxmpp.clientxmpp import ClientXMPP
from sleekxmpp.componentxmpp import ComponentXMPP
from sleekxmpp.stanza import Message, Presence, Iq
from sleekxmpp.xmlstream.handler import *
from sleekxmpp.xmlstream import XMLStream, RestartStream
from sleekxmpp.xmlstream.matcher import *
from sleekxmpp.xmlstream.stanzabase import StanzaBase, ET
from sleekxmpp.version import __version__, __version_info__
print "olo customies kanw! "
|
[
"[email protected]"
] | |
588015cc847c31ca5a2a70db1437035557d1592b
|
1284718203be50b23dcd1f6159746cfa42a04163
|
/tensorflow_data/sawyer/online_data1_fine/conf.py
|
27d247ce2a2ed6774e7a2e912beab6b918db7255
|
[] |
no_license
|
febert/robustness_via_retrying
|
8fe4106d7705228ff339f9643518a80c0a243d36
|
1def282dc22f24b72c51ff1ef9ea1a7a83291369
|
refs/heads/master
| 2020-03-31T19:33:39.664525 | 2018-11-07T21:52:56 | 2018-11-07T21:52:56 | 152,502,702 | 17 | 2 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,367 |
py
|
import os
current_dir = os.path.dirname(os.path.realpath(__file__))
# tf record data location:
DATA_BASE_DIR = '/'.join(str.split(current_dir, '/')[:-3]) + '/pushing_data'
BASE_DIR = '/'.join(str.split(current_dir, '/')[:-3])
# local output directory
OUT_DIR = current_dir + '/modeldata'
from python_visual_mpc.video_prediction.basecls.prediction_model_basecls import Base_Prediction_Model
configuration = {
'experiment_name': 'sna',
'pred_model': Base_Prediction_Model,
# 'test_data_dir': TEST_DATA_DIR, # 'directory containing data.' ,
'output_dir': OUT_DIR, #'directory for model checkpoints.' ,
'current_dir': current_dir, #'directory for writing summary.' ,
'num_iterations': 200000, #'number of training iterations.' ,
'resume_pretrained': '', # 'filepath of a pretrained model to resume training from.' ,
'data_dir':[DATA_BASE_DIR+ '/weiss_gripper_20k/train',DATA_BASE_DIR + '/online_data1/train'],
'test_data_ind':1,
'load_pretrained':BASE_DIR + '/tensorflow_data/sawyer/weissgripper_basecls_20k/modeldata/model96002',
'sequence_length': 14, # 'sequence length to load, including context frames.' ,
'skip_frame': 1, # 'use ever i-th frame to increase prediction horizon' ,
'context_frames': 2, # of frames before predictions.' ,
'use_state': 1, #'Whether or not to give the state+action to the model' ,
'model': 'CDNA', #'model architecture to use - CDNA, DNA, or STP' ,
'num_masks': 10, # 'number of masks, usually 1 for DNA, 10 for CDNA, STN.' ,
'schedsamp_k': 900.0, # 'The k hyperparameter for scheduled sampling -1 for no scheduled sampling.' ,
'train_val_split': 0.95, #'The percentage of files to use for the training set vs. the validation set.' ,
'batch_size': 32, #'batch size for training' ,
'learning_rate': 0.001, #'the base learning rate of the generator' ,
'visualize': '', #'load model from which to generate visualizations
'file_visual': '', # datafile used for making visualizations
'kern_size': 9, #size of DNA kerns
'sawyer':'',
'single_view':"",
'use_len':14, # number of steps used for training where the starting location is selected randomly within sequencelength
'1stimg_bckgd':'',
# 'visual_flowvec':'',
'adim':5,
'sdim':4,
'img_height':56,
'img_width':64,
'color_augmentation':"",
}
|
[
"[email protected]"
] | |
126b9836584a362317e832bf74cc2bbc7a083f1b
|
18d51ac0a6ca14c8221c26f0dacd8d3721ca28e9
|
/hun59.py
|
baa5b5f1fb712f457c9f7d03882e9ee6df6b936d
|
[] |
no_license
|
mahakalai/mahak
|
05f96d52880ed7b2e5eb70dd1dbf14fc533236e8
|
613be9df7743ef59b1f0e07b7df987d29bb23ec7
|
refs/heads/master
| 2020-04-15T05:01:58.541930 | 2019-07-15T16:28:32 | 2019-07-15T16:28:32 | 164,406,486 | 0 | 2 | null | null | null | null |
UTF-8
|
Python
| false | false | 155 |
py
|
n=int(input())
l=[int(x) for x in input().split()]
l2=[int(x) for x in input().split()]
c=[]
for i in range(len(l)):
s=l[i]+l2[i]
c.append(s)
print(*c)
|
[
"[email protected]"
] | |
ecfed22a55a201c3f82849c3350765d7d6ff4eba
|
881041fab1b4d05f1c5371efed2f9276037eb609
|
/tasks/gender-of-subject-officers-compared-to-new-york-city-police-department-demographics-2005-2009/depositor.py
|
9a7c26eb798400b152d2480a759e5404a12e4edc
|
[] |
no_license
|
ResidentMario/urban-physiology-nyc-catalog
|
b568f3b6ee1a887a50c4df23c488f50c92e30625
|
cefbc799f898f6cdf24d0a0ef6c9cd13c76fb05c
|
refs/heads/master
| 2021-01-02T22:43:09.073952 | 2017-08-06T18:27:22 | 2017-08-06T18:27:22 | 99,377,500 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 496 |
py
|
import requests
r = requests.get("https://data.cityofnewyork.us/api/views/jhq9-vaec/rows.csv?accessType=DOWNLOAD")
with open("/home/alex/Desktop/urban-physiology-nyc-catalog/catalog/gender-of-subject-officers-compared-to-new-york-city-police-department-demographics-2005-2009/data.csv", "wb") as f:
f.write(r.content)
outputs = ["/home/alex/Desktop/urban-physiology-nyc-catalog/catalog/gender-of-subject-officers-compared-to-new-york-city-police-department-demographics-2005-2009/data.csv"]
|
[
"[email protected]"
] | |
dc310ccf36c09943bcf439e31b5b6381c1913d7e
|
52ab2da7b131643a344ee5344d8f35aebd6e2eed
|
/WebProject1/myvenv/lib/python3.6/site-packages/sqlalchemy/sql/__init__.py
|
8968d2993b0f5da270970be936ef2021846d8b59
|
[
"MIT"
] |
permissive
|
ucsb-cs48-w19/5pm-findtheroommate
|
cd6db6c4cf3ee6f159b04456ba13b1ef684c7546
|
d9d01b95c478e7493b5b32c8b56ceed00578b188
|
refs/heads/master
| 2020-04-16T01:00:16.617610 | 2019-03-19T20:42:38 | 2019-03-19T20:42:38 | 165,158,037 | 2 | 1 |
MIT
| 2019-03-05T00:46:12 | 2019-01-11T01:28:11 |
Python
|
UTF-8
|
Python
| false | false | 3,753 |
py
|
# sql/__init__.py
# Copyright (C) 2005-2019 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
from .expression import Alias # noqa
from .expression import alias # noqa
from .expression import all_ # noqa
from .expression import and_ # noqa
from .expression import any_ # noqa
from .expression import asc # noqa
from .expression import between # noqa
from .expression import bindparam # noqa
from .expression import case # noqa
from .expression import cast # noqa
from .expression import ClauseElement # noqa
from .expression import collate # noqa
from .expression import column # noqa
from .expression import ColumnCollection # noqa
from .expression import ColumnElement # noqa
from .expression import CompoundSelect # noqa
from .expression import Delete # noqa
from .expression import delete # noqa
from .expression import desc # noqa
from .expression import distinct # noqa
from .expression import except_ # noqa
from .expression import except_all # noqa
from .expression import exists # noqa
from .expression import extract # noqa
from .expression import false # noqa
from .expression import False_ # noqa
from .expression import FromClause # noqa
from .expression import func # noqa
from .expression import funcfilter # noqa
from .expression import Insert # noqa
from .expression import insert # noqa
from .expression import intersect # noqa
from .expression import intersect_all # noqa
from .expression import Join # noqa
from .expression import join # noqa
from .expression import label # noqa
from .expression import lateral # noqa
from .expression import literal # noqa
from .expression import literal_column # noqa
from .expression import modifier # noqa
from .expression import not_ # noqa
from .expression import null # noqa
from .expression import nullsfirst # noqa
from .expression import nullslast # noqa
from .expression import or_ # noqa
from .expression import outerjoin # noqa
from .expression import outparam # noqa
from .expression import over # noqa
from .expression import quoted_name # noqa
from .expression import Select # noqa
from .expression import select # noqa
from .expression import Selectable # noqa
from .expression import subquery # noqa
from .expression import table # noqa
from .expression import TableClause # noqa
from .expression import TableSample # noqa
from .expression import tablesample # noqa
from .expression import text # noqa
from .expression import true # noqa
from .expression import True_ # noqa
from .expression import tuple_ # noqa
from .expression import type_coerce # noqa
from .expression import union # noqa
from .expression import union_all # noqa
from .expression import Update # noqa
from .expression import update # noqa
from .expression import within_group # noqa
from .visitors import ClauseVisitor # noqa
def __go(lcls):
global __all__
from .. import util as _sa_util
import inspect as _inspect
__all__ = sorted(
name
for name, obj in lcls.items()
if not (name.startswith("_") or _inspect.ismodule(obj))
)
from .annotation import _prepare_annotations
from .annotation import Annotated # noqa
from .elements import AnnotatedColumnElement
from .elements import ClauseList # noqa
from .selectable import AnnotatedFromClause # noqa
_prepare_annotations(ColumnElement, AnnotatedColumnElement)
_prepare_annotations(FromClause, AnnotatedFromClause)
_prepare_annotations(ClauseList, Annotated)
_sa_util.dependencies.resolve_all("sqlalchemy.sql")
from . import naming # noqa
__go(locals())
|
[
"[email protected]"
] | |
bfa7b1112caec573c39a7a869fab1368f335267a
|
14c9f543d540d318d30d7acffb636e47b5d62f0a
|
/ctrl/ctrl/session_parameters/temi/robot_axioms.py
|
3af95e75a3eae08a9f59e0ac3f2ffa37f12f4be8
|
[] |
no_license
|
Wisc-HCI/Figaro
|
cecd71d0f179bcfe413b657e9a8dc02be015eff6
|
20ae549dc53064d3d4f203e623e71220a3cde1e7
|
refs/heads/master
| 2023-04-27T11:40:02.969537 | 2021-05-19T16:26:12 | 2021-05-19T16:26:12 | 358,723,686 | 0 | 1 | null | 2021-05-17T20:54:53 | 2021-04-16T21:29:01 |
Python
|
UTF-8
|
Python
| false | false | 3,917 |
py
|
class RobotAxioms:
def __init__(self):
pass
def ensure_position_and_movement_overlap(self,moments):
# if movement is currently True and position is SOMETHING, and (1) the next movement is False and (2) the next position is ["X"], then add "movement" to the nexr position
for i in range(len(moments)-1):
curr_moment = moments[i]
next_moment = moments[i+1]
if curr_moment.tracks["movement"] == ["True"] and next_moment.tracks["movement"] != ["True"]:
curr_position = curr_moment.tracks["position"]
next_position = next_moment.tracks["position"]
if set(curr_position) != set(next_position):
next_moment.tracks["movement"] = ["True"]
def process_position_movement(self, moments):
for i in range(len(moments)):
moment = moments[i]
for human in ["h1"]:
if moment.tracks["position"] is not None:
for item in moment.tracks["position"]:
if human in item:
moment.tracks["close_to_human"] = True
moment.tracks["position"].remove(item)
# combine robot position and movement
if moment.tracks["movement"] == ["True"]:
if moment.tracks["position"] is None:
moment.tracks["position"] = ["movement"]
else:
moment.tracks["position"].append("movement")
'''
# look ahead to see if the next position is not movement
if i < len(moments) - 1:
if moments[i+1].tracks["movement"] is None:
lookahead_position = moments[i+1].tracks["position"]
if lookahead_position is not None:
for pos in lookahead_position:
#Discard position from human (it is redundant info)
detected_human_position = False
for human in ["h1"]:
if human in pos:
detected_human_position = True
if not detected_human_position:
moment.tracks["position"].append(pos)
'''
# combine human position and movement
# TODO: remove this
for human in ["h1"]:
if moment.tracks["{}_position".format(human)] is not None and any("robot" in string for string in moment.tracks["{}_position".format(human)]):
moment.tracks["{}_near_rob".format(human)] = True
moment.tracks["{}_movement".format(human)] = None
moment.tracks["{}_position".format(human)] = None
def remove_unrecognizable_objects_or_regions(self, moments, objects, regions):
# objects and regions are lists of tuples at the moment containing both name and coordinate data
# must extract only the name
'''
obj_name_list = []
for obj in objects:
obj_name_list.append(obj[0])
print(obj_name_list)
exit()
'''
####################################
for moment in moments:
if moment.tracks["position"] is not None:
to_remove = []
for pos in moment.tracks["position"]:
#print("considering {}".format(pos))
if pos in objects:
#print("removing {}".format(pos))
to_remove.append(pos)
for pos in to_remove:
moment.tracks["position"].remove(pos)
def axiom_only_final_movement_destination_matters(self,moments):
movement_started = False
movement_moments = []
for moment in moments:
if not movement_started and moment.tracks["movement"] == ["True"]:
movement_started = True
movement_moments.append(moment)
elif movement_started and moment.tracks["movement"] != ["True"]:
movement_started = False
# process movement moments
movement_moments.reverse()
init_pos = movement_moments[0].tracks["position"]
for mv in movement_moments:
if mv.tracks["position"] != init_pos:
if mv.tracks["position"] is not None:
to_remove = []
for item in mv.tracks["position"]:
if "h1" not in item:
to_remove.append(item)
for item in to_remove:
mv.tracks["position"].remove(item)
if len(mv.tracks["position"]) == 0:
mv.tracks["position"] = None
movement_moments = []
elif movement_started:
movement_moments.append(moment)
|
[
"[email protected]"
] | |
3a5ced59755c9071387459bf7015cf54c2e95827
|
e3365bc8fa7da2753c248c2b8a5c5e16aef84d9f
|
/indices/nnvoila.py
|
dfe94dfc59bad8caffd2aabc270d1f9a193b666d
|
[] |
no_license
|
psdh/WhatsintheVector
|
e8aabacc054a88b4cb25303548980af9a10c12a8
|
a24168d068d9c69dc7a0fd13f606c080ae82e2a6
|
refs/heads/master
| 2021-01-25T10:34:22.651619 | 2015-09-23T11:54:06 | 2015-09-23T11:54:06 | 42,749,205 | 2 | 3 | null | 2015-09-23T11:54:07 | 2015-09-18T22:06:38 |
Python
|
UTF-8
|
Python
| false | false | 102 |
py
|
ii = [('CarlTFR.py', 6), ('IrviWVD.py', 1), ('MedwTAI2.py', 2), ('MackCNH2.py', 1), ('RogeSIP.py', 1)]
|
[
"[email protected]"
] | |
42a5eabd43a28f32e8d007c07ca5aae29e454e35
|
d554b1aa8b70fddf81da8988b4aaa43788fede88
|
/5 - Notebooks e Data/1 - Análises numéricas/Arquivos David/Atualizados/logDicas-master/data/2019-1/225/users/1165/codes/1756_1576.py
|
83c8a894e2fa689875e700b54fc225283acbf3c8
|
[] |
no_license
|
JosephLevinthal/Research-projects
|
a3bc3ca3b09faad16f5cce5949a2279cf14742ba
|
60d5fd6eb864a5181f4321e7a992812f3c2139f9
|
refs/heads/master
| 2022-07-31T06:43:02.686109 | 2020-05-23T00:24:26 | 2020-05-23T00:24:26 | 266,199,309 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 587 |
py
|
from numpy import*
#Sequencia de jogadas de Eusápia
v1 = array(eval(input("Insira a sequencia: ")))
#Sequencia dejogadas de Barsanulfo
v2 = array(eval(input("Insira a sequencia: ")))
i = 0
ve = 0
vb = 0
while(i < size(v1)):
if(((v1[i]== 11) and (v2[i]==33)) or ((v1[i]==22) and (v2[i]==11)) or ((v1[i]==33) and (v2[i]==22))):
ve = ve + 1
elif(((v2[i]==11) and (v1[i]==33)) or ((v2[i]==22) and (v1[i]==11)) or ((v2[i]==33) and (v1[i]==22))):
vb = vb + 1
i = i + 1
print(i)
if(ve > vb):
print("EUSAPIA")
elif(ve < vb):
print("BARSANULFO")
elif(ve == vb):
print("EMPATE")
|
[
"[email protected]"
] | |
03901fefe164445132e86bddd2aa7e411d6aeea7
|
9124e66c8ec04e61537473437a92b53daa32ce20
|
/rojas/app19.py
|
65d6e760247d6d6473079299176ccd573657bac7
|
[] |
no_license
|
erick1984linares/t10_linares_rojas
|
28618baccb3472fb8d48b34f5d1107b702c399d0
|
ba9462b3b881dbd3665907a7a33c4c7d80aa4251
|
refs/heads/master
| 2020-12-04T06:38:06.929626 | 2020-01-10T11:52:29 | 2020-01-10T11:52:29 | 231,661,040 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 474 |
py
|
from Rojas import libreria
oppcn=0
limit=3
while (oppcn != limit):
print("########################")
print("# MENU #")
print("########################")
print("# 1. Agregar anotacion #")
print("# 2. Ver agregados #")
print("# 3. Salir #")
print("########################")
oppcn=libreria.pedir_numero("Ingrese la opcion deseada: ",1,3)
if (oppcn == 1):
if (oppcn == 2):
print("fin del programa")
|
[
"[email protected]"
] | |
5856fc7d02728c47ba6b6baf94230d6063b3e862
|
55c64c7a78c5f78f35f84ae1fce60d387ba11040
|
/bc4py/chain/workhash.py
|
9d2a80975c3daa597969793d516bf0fa1df853e0
|
[
"MIT"
] |
permissive
|
kmn/bc4py
|
df580de46456fed860e7fc0a812f5b46e04483c3
|
8f9ee09ed1325faad8c361a9b6c1b5abbc93cbc1
|
refs/heads/master
| 2020-04-13T06:04:32.273534 | 2018-12-18T02:48:41 | 2018-12-18T02:48:41 | 163,010,903 | 0 | 0 | null | 2018-12-24T17:16:21 | 2018-12-24T17:16:21 | null |
UTF-8
|
Python
| false | false | 7,130 |
py
|
from bc4py.config import C, BlockChainError
from multiprocessing import get_context, current_process
import threading
import logging
from os import urandom
from time import time, sleep
from yespower import hash as yespower_hash # for CPU
from x11_hash import getPoWHash as x11_hash # for ASIC
from hmq_hash import getPoWHash as hmq_hash # for GPU
from litecoin_scrypt import getPoWHash as ltc_hash # for ASIC
from shield_x16s_hash import getPoWHash as x16s_hash # for GPU
from pooled_multiprocessing import cpu_num
mp_generator = list()
mp_lock = threading.Lock()
def self_check_hash_fnc():
check_hash = b'\x00' * 80
check_list = [
(yespower_hash, b'z\x1b\xde\x0c\x01\xec\xc1\xd3\xdf\x86{\xb2;\x97>\xee\xbc\x96\xfd'
b'\x83[\x14sv\xca\xe9\xf9\xa7\x04t\xe0F'),
(x11_hash, b'\x83(\x84a\x80\x96[\xceV\xf6\x1e\x01]\xb6*\xf5b\xa6\x11\xd8^^r\x1d\x85L\x8d\x97\xe4z>\xa3'),
(hmq_hash, b'\xf9\xf2~\xbc\x96=\xe0\xed\xff\xd0\xd3&\xe5\xab&\xea\xe1\xec'
b'\x0f\x031\n\xdf\x12\xf1b zT\xeb\xd6\x86'),
(ltc_hash, b'\x16\x1d\x08v\xf3\xb9;\x10H\xcd\xa1\xbd\xea\xa73.\xe2\x10\xf7'
b'\x13\x1bB\x01<\xb49\x13\xa6U:Ki'),
(x16s_hash, b'\xcc\xa6\x1bVE\xd4\xcez3\x9b\xbf\xba\x80\x05\xeb\xd3\xa5\x86\x9bW'
b'\x01\xf8\xb6\xe5a\xc3\x9e\xd9\x8c\xca\x02\x1a')]
for hash_fnc, correct_hash in check_list:
if hash_fnc(check_hash) != correct_hash:
raise Exception('self check failed, hash module "{}".'.format(hash_fnc.__module__))
def get_workhash_fnc(flag):
if flag == C.BLOCK_YES_POW:
return yespower_hash
elif flag == C.BLOCK_X11_POW:
return x11_hash
elif flag == C.BLOCK_HMQ_POW:
return hmq_hash
elif flag == C.BLOCK_LTC_POW:
return ltc_hash
elif flag == C.BLOCK_X16R_POW:
return x16s_hash
elif flag in C.consensus2name:
raise Exception('Not found block flag {}'.format(C.consensus2name[flag]))
else:
raise Exception('Not found block flag {}?'.format(flag))
def update_work_hash(block):
if block.flag == C.BLOCK_GENESIS:
block.work_hash = b'\xff' * 32
elif block.flag == C.BLOCK_POS:
proof_tx = block.txs[0]
if proof_tx.pos_amount is None:
from bc4py.database.builder import tx_builder
txhash, txindex = proof_tx.inputs[0]
output_tx = tx_builder.get_tx(txhash)
if output_tx is None:
raise BlockChainError('Not found output {} of {}'.format(proof_tx, block))
address, coin_id, amount = output_tx.outputs[txindex]
proof_tx.pos_amount = amount
block.work_hash = proof_tx.get_pos_hash(block.previous_hash)
else:
# POW_???
hash_fnc = get_workhash_fnc(block.flag)
block.work_hash = hash_fnc(block.b)
def generate_many_hash(block, how_many):
assert block.flag != C.BLOCK_POS and block.flag != C.BLOCK_GENESIS
assert how_many > 0
# hash generating with multi-core
start = time()
with mp_lock:
f_wait = False
while True:
free_process = list()
for hash_generator in mp_generator:
if not hash_generator.lock.locked():
free_process.append(hash_generator)
if len(free_process) > 0:
break
else:
f_wait = True
sleep(0.05)
if f_wait:
logging.debug("Wait for free_process for mining... {}mSec"
.format(int((time()-start)*1000)))
request_num = how_many // len(free_process)
# throw task
for hash_generator in free_process:
hash_generator.generate(block, request_num)
block_b = None
work_hash = None
work_hash_int = 0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
for hash_generator in free_process:
tmp_block_b, check_hash = hash_generator.result()
check_int = int.from_bytes(check_hash, 'little')
if check_int < work_hash_int:
block_b = tmp_block_b
work_hash = check_hash
work_hash_int = check_int
block.b = block_b
block.work_hash = work_hash
block.deserialize()
return time() - start
def start_work_hash(process=None):
if current_process().name != 'MainProcess':
raise Exception('Is not main process!')
if len(mp_generator) != 0:
raise Exception('Already mp_generator is filled.')
if process is None:
process = cpu_num
for index in range(1, process + 1):
# Want to use 1 core for main-thread
hash_generator = HashGenerator(index=index)
hash_generator.start()
mp_generator.append(hash_generator)
def close_work_hash():
for hash_generator in mp_generator:
hash_generator.close()
mp_generator.clear()
logging.debug("Close hashing process.")
def _pow_generator(pipe):
binary = None
while True:
try:
binary, block_flag, how_many = pipe.recv()
hash_fnc = get_workhash_fnc(block_flag)
hashed = hash_fnc(binary)
minimum_num = int.from_bytes(hashed, 'little')
new_binary = binary
for i in range(how_many):
new_binary = new_binary[:-4] + urandom(4)
new_hash = hash_fnc(new_binary)
new_num = int.from_bytes(new_hash, 'little')
if minimum_num > new_num:
binary = new_binary
hashed = new_hash
minimum_num = new_num
pipe.send((binary, hashed))
except Exception as e:
msg = "Hashing failed {} by \"{}\"".format(binary, e)
try:
pipe.send(msg)
except Exception as e:
logging.info("Close by pipe error, {}".format(e))
return
class HashGenerator:
def __init__(self, index):
self.index = index
cxt = get_context('spawn')
parent_conn, child_conn = cxt.Pipe(duplex=True)
self.process = cxt.Process(
target=_pow_generator, name="Hashing{}".format(index), args=(child_conn,))
self.process.daemon = True
self.parent_conn = parent_conn
self.lock = threading.Lock()
def start(self):
self.process.start()
logging.info("Start work hash gene {}".format(self.index))
def close(self):
if self.process.is_alive():
self.process.terminate()
self.parent_conn.close()
def generate(self, block, how_many):
self.lock.acquire()
self.parent_conn.send((block.b, block.flag, how_many))
def result(self):
data = self.parent_conn.recv()
self.lock.release()
if isinstance(data, tuple):
return data
else:
raise BlockChainError('Unknown status on pipe {}'.format(data))
self_check_hash_fnc()
__all__ = [
"get_workhash_fnc",
"start_work_hash",
"update_work_hash",
"generate_many_hash",
"close_work_hash"
]
|
[
"[email protected]"
] | |
8c1eb68dd152c81f80eb1ecdfc66c0b3e21895bd
|
cc6f0e69703e8900c99cd0c15d1df10e81d24909
|
/PiGlassBetaTesting.py
|
db69570a289dfaabd0f0499a9a8d3dadd3c2f948
|
[
"MIT"
] |
permissive
|
iCodeIN/piglass
|
789c72a6f68542c4954ce0185f40f31f5da2fffc
|
eaf84d4620908848e3b9e835f027d450153ab37c
|
refs/heads/master
| 2022-04-16T13:41:24.489525 | 2018-05-16T19:01:43 | 2018-05-16T19:01:43 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 12,361 |
py
|
from subprocess import call
import RPi.GPIO as GPIO
import picamera
import time
import sys
import datetime
import cv2
import numpy as np
import KeyboardPoller
import subprocess
import thread
import re
height = 600
width = 800
alphaValue = 75
o = None
recording = 0
buttoncounter = 0
camera = picamera.PiCamera()
global videoFile
global zoomcount
zoomcount=0
globalCounter = 0
global roi
roi = 0
def initialize_camera():
camera.resolution = (width, height)
camera.sharpness = 0
camera.contrast = 0
camera.brightness = 50
camera.saturation = 0
camera.ISO = 0
camera.video_stabilization = True
camera.exposure_compensation = 0
camera.exposure_mode = 'auto'
camera.meter_mode = 'average'
camera.awb_mode = 'auto'
camera.image_effect = 'none'
camera.color_effects = None
camera.rotation = -90
camera.hflip = False
camera.vflip = False
camera.start_preview()
print "Camera is configured and outputting video..."
if (width%32) > 0 or (height%16) > 0:
print "Rounding down set resolution to match camera block size:"
width = width-(width%32)
height = height-(height%16)
print "New resolution: " + str(width) + "x" + str(height)
ovl = np.zeros((height, width, 3), dtype=np.uint8)
globalz = {
'zoom_step' : 0.03,
'zoom_xy_min' : 0.0,
'zoom_xy' : 0.0,
'zoom_xy_max' : 0.4,
'zoom_wh_min' : 1.0,
'zoom_wh' : 1.0,
'zoom_wh_max' : 0.2,
}
def update_zoom():
global roi
#print roi
#print str(roi)[1:-1]
roi = str(globalz['zoom_xy'])[:6], str(globalz['zoom_xy'])[:6], str(globalz['zoom_wh'])[:6], str(globalz['zoom_wh'])[:6]
print roi
camera.zoom = (globalz['zoom_xy'], globalz['zoom_xy'], globalz['zoom_wh'], globalz['zoom_wh'])
print "Camera at (x, y, w, h) = ", camera.zoom
def set_min_zoom():
globalz['zoom_xy'] = globalz['zoom_xy_min']
globalz['zoom_wh'] = globalz['zoom_wh_min']
def set_max_zoom():
globalz['zoom_xy'] = globalz['zoom_xy_max']
globalz['zoom_wh'] = globalz['zoom_wh_max']
def zoom_out():
global zoomcount
if globalz['zoom_xy'] - globalz['zoom_step'] < globalz['zoom_xy_min']:
set_min_zoom()
else:
globalz['zoom_xy'] -= globalz['zoom_step']
globalz['zoom_wh'] += (globalz['zoom_step'] * 2)
zoomcount = zoomcount - 1
update_zoom()
def zoom_in():
global zoomcount
if globalz['zoom_xy'] + globalz['zoom_step'] > globalz['zoom_xy_max']:
set_max_zoom()
else:
zoomcount = zoomcount + 1
globalz['zoom_xy'] += globalz['zoom_step']
globalz['zoom_wh'] -= (globalz['zoom_step'] * 2)
update_zoom()
ovl = np.zeros((height, width, 3), dtype=np.uint8)
# initial config for gpio ports
GPIO.setwarnings(False)
GPIO.setmode(GPIO.BCM)
colors = {
'white': (255,255,255),
'red': (255,0,0),
'green': (0,255,0),
'blue': (0,0,255),
'yellow': (255,255,0),
}
def colormap(col):
return colors.get(col, (255,255,255))
col = colormap('white')
font = cv2.FONT_HERSHEY_PLAIN
guivisible = 1
togsw = 1
guiOn = 1
gui = np.zeros((height, width, 3), dtype=np.uint8)
gui1 = 'PiGlass'
gui2 = 'Version 0.5 alpha'
gui3 = 'P Key = take pic'
gui4 = 'V Key = take video'
gui5 = ' '
def get_file_name_pic(): # new
return datetime.datetime.now().strftime("%Y-%m-%d_%H.%M.%S.jpg")
def get_file_name_vid(): # new
return datetime.datetime.now().strftime("%Y-%m-%d_%H.%M.%S.h264")
def creategui(target):
global gui5
cv2.putText(target, gui1, (10,height-160), font, 10, col, 6)
cv2.putText(target, gui2, (10,height-130), font, 3, col, 3)
cv2.putText(target, gui3, (10,height-90), font, 3, col, 3)
cv2.putText(target, gui4, (10,height-50), font, 3, col, 3)
cv2.putText(target, gui5, (10,height-10), font, 3, colormap("green"), 3)
return
def patternswitch(target,guitoggle):
global o, alphaValue
toggleonoff()
if guitoggle == 1:
creategui(gui)
o = camera.add_overlay(np.getbuffer(target), layer=3, alpha=alphaValue)
return
def patternswitcherRecord(target,guitoggle):
global o, zoomcount, ycenter
if guitoggle == 1:
creategui(gui)
# function
def togglepatternRecord():
global togsw,o,curpat,col,ovl,gui,alphaValue,ycenter,zoomcount
# if overlay is inactive, ignore button:
if togsw == 0:
print "Pattern button pressed, but ignored --- Crosshair not visible."
else:
if guivisible == 0:
ovl = np.zeros((height, width, 3), dtype=np.uint8)
patternswitcherRecord(ovl,0)
else:
gui = np.zeros((height, width, 3), dtype=np.uint8)
creategui(gui)
patternswitcherRecord(gui,1)
return
def togglepattern():
global togsw,o,ovl,gui,alphaValue
# if overlay is inactive, ignore button:
if togsw == 0:
print "Pattern button pressed, but ignored --- Crosshair not visible."
# if overlay is active, drop it, change pattern, then show it again
else:
if guivisible == 0:
# reinitialize array:
ovl = np.zeros((height, width, 3), dtype=np.uint8)
patternswitch(ovl,0)
if o != None:
camera.remove_overlay(o)
o = camera.add_overlay(np.getbuffer(ovl), layer=3, alpha=alphaValue)
else:
# reinitialize array
gui = np.zeros((height, width, 3), dtype=np.uint8)
creategui(gui)
patternswitch(gui,1)
if o != None:
camera.remove_overlay(o)
o = camera.add_overlay(np.getbuffer(gui), layer=3, alpha=alphaValue)
return
def toggleonoff():
global togsw,o,alphaValue
if togsw == 1:
print "Toggle Crosshair OFF"
if o != None:
camera.remove_overlay(o)
togsw = 0
else:
print "Toggle Crosshair ON"
if guivisible == 0:
o = camera.add_overlay(np.getbuffer(ovl), layer=3, alpha=alphaValue)
else:
o = camera.add_overlay(np.getbuffer(gui), layer=3, alpha=alphaValue)
togsw = 1
return
# function
def togglepatternZoomIn():
global togsw,o,curpat,col,ovl,gui,alphaValue,ycenter,zoomcount
# if overlay is inactive, ignore button:
if togsw == 0:
print "Pattern button pressed, but ignored --- Crosshair not visible."
zoom_in()
else:
if guivisible == 0:
zoom_in()
# reinitialize array:
ovl = np.zeros((height, width, 3), dtype=np.uint8)
patternswitcherZoomIn(ovl,0)
else:
# reinitialize array
zoom_in()
gui = np.zeros((height, width, 3), dtype=np.uint8)
creategui(gui)
patternswitcherZoomIn(gui,1)
return
def togglepatternZoomOut():
global togsw,o,curpat,col,ovl,gui,alphaValue
# if overlay is inactive, ignore button:
if togsw == 0:
zoom_out()
else:
if guivisible == 0:
zoom_out()
# reinitialize array:
ovl = np.zeros((height, width, 3), dtype=np.uint8)
patternswitcherZoomOut(ovl,0)
o = camera.add_overlay(np.getbuffer(ovl), layer=3, alpha=alphaValue)
else:
zoom_out()
# reinitialize array
gui = np.zeros((height, width, 3), dtype=np.uint8)
creategui(gui)
patternswitcherZoomOut(gui,1)
o = camera.add_overlay(np.getbuffer(gui), layer=3, alpha=alphaValue)
return
def patternswitcherZoomIn(target,guitoggle):
global o, zoomcount, ycenter
if guitoggle == 1:
creategui(gui)
if globalz['zoom_xy'] == globalz['zoom_xy_max']:
print("zoom at max")
def patternswitcherZoomOut(target,guitoggle):
global o, zoomcount, ycenter
# first remove existing overlay:
if o != None:
camera.remove_overlay(o)
if guitoggle == 1:
creategui(gui)
if globalz['zoom_xy'] == globalz['zoom_xy_min']:
print("zoom at min")
def main():
global buttoncounter, zoomcount, guiOn, recording, gui5, gui, o, ovl, camera
try:
initialize_camera()
zoom_in()
zoom_in()
zoom_in()
zoom_in()
zoom_in()
zoom_in()
zoom_in()
zoom_in()
zoom_in()
patternswitch(gui,1)
guivisible = 1
while True:
if KeyboardPoller.keypressed.isSet():
if KeyboardPoller.key=="z":
togglepatternZoomIn()
if KeyboardPoller.key=="x":
togglepatternZoomOut()
if KeyboardPoller.key=="i":
loopcount = 14 - zoomcount
for x in range(loopcount):
togglepatternZoomIn()
if KeyboardPoller.key=="o":
loopcount = zoomcount + 1
for x in range(loopcount):
togglepatternZoomOut()
if KeyboardPoller.key=="n":
set_min_zoom()
update_zoom()
for x in range(14):
zoom_in()
if KeyboardPoller.key=="p":
global roi
filename = get_file_name_pic()
#pushNotification = "curl --data 'key=XXXXXX&title=Photo Taken&msg='"+filename+" https://api.simplepush.io/send"
print camera.zoom
camera.close()
o = None
roi = str(roi)[1:-1]
roi = re.sub("'","",roi)
roi = re.sub(" ","",roi)
print roi
photo = "raspistill -roi "+roi+" -br 55 -ex auto -o /home/pi/piglass/"+filename+" -rot 270"
subprocess.Popen(photo, shell=True)
time.sleep(1)
photofile = "/home/pi/Dropbox-Uploader/dropbox_uploader.sh upload "+filename+" "+filename
time.sleep(6)
camera = picamera.PiCamera()
subprocess.Popen(photofile, shell=True)
#subprocess.Popen(pushNotification, shell=True)
initialize_camera()
camera.start_preview()
update_zoom()
patternswitch(gui, 1)
gui5 = "uploading"
togglepatternRecord()
toggleonoff()
toggleonoff()
time.sleep(1)
gui5 = ""
togglepatternRecord()
toggleonoff()
toggleonoff()
if KeyboardPoller.key=="v":
if recording == 0:
global videoFile, recording
print("recording")
videoFile = get_file_name_vid()
camera.close()
o = None
vid = "raspivid -t 0 -o /home/pi/piglass/"+videoFile+" -rot 270"
subprocess.Popen(vid, shell=True)
recording = 1
if KeyboardPoller.key=="b":
global videoFile, recording
recording = 0
o = None
kill = "killall raspivid"
subprocess.Popen(kill, shell=True)
#pushNotification = "curl --data 'key=XXXXXX&title=Video Taken&msg='"+videoFile+" https://api.simplepush.io/send"
#subprocess.Popen(pushNotification, shell=True)
#time.sleep(2)
vidfile = "/home/pi/Dropbox-Uploader/dropbox_uploader.sh upload "+videoFile+" "+videoFile
subprocess.Popen(vidfile, shell=True)
camera = picamera.PiCamera()
initialize_camera()
camera.start_preview()
patternswitch(gui, 1)
gui5 = "uploaded"
togglepatternRecord()
toggleonoff()
toggleonoff()
time.sleep(1)
gui5 = ""
togglepatternRecord()
toggleonoff()
toggleonoff()
if KeyboardPoller.key=="t":
toggleonoff()
KeyboardPoller.WaitKey().thread.start()
finally:
camera.close() # clean up camera
GPIO.cleanup() # clean up GPIO
if __name__ == "__main__":
main()
|
[
"[email protected]"
] | |
2f41fce2486a6fd898fa969a55cd13b94650392e
|
882c865cf0a4b94fdd117affbb5748bdf4e056d0
|
/python/BOJ/08_DP/1915_가장큰정사각형.py
|
553f36b68765d60dd1500395a58dfec90db8a5c6
|
[] |
no_license
|
minhee0327/Algorithm
|
ebae861e90069e2d9cf0680159e14c833b2f0da3
|
fb0d3763b1b75d310de4c19c77014e8fb86dad0d
|
refs/heads/master
| 2023-08-15T14:55:49.769179 | 2021-09-14T04:05:11 | 2021-09-14T04:05:11 | 331,007,037 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 447 |
py
|
n, m = map(int, input().split())
arr = [[0 for _ in range(m+1)] for i in range(n+1)]
DP = [[0 for _ in range(m+1)] for i in range(n+1)]
ans = 0
for i in range(n):
for idx, j in enumerate(list(map(int, list(input())))):
arr[i+1][idx+1] = j
for i in range(1, n+1):
for j in range(1, m+1):
if arr[i][j]:
DP[i][j] = min(DP[i-1][j], DP[i][j-1], DP[i-1][j-1])+1
ans = max(ans, DP[i][j])
print(ans**2)
|
[
"[email protected]"
] | |
a1ef0772f64128312a6b795eea2b2e63a7a6c374
|
e8a48749014f372633de65d79bfa26a3ad743d89
|
/src/transformers/models/unispeech_sat/configuration_unispeech_sat.py
|
bc8663587d96d8a070a3832872ff53a64827a6f1
|
[
"Apache-2.0"
] |
permissive
|
pvcastro/pytorch-pretrained-BERT
|
183b7291972c8d8c66c995647df66c1fe439a763
|
49cd736a288a315d741e5c337790effa4c9fa689
|
refs/heads/master
| 2022-08-19T08:55:16.332585 | 2022-06-30T16:11:08 | 2022-06-30T16:11:08 | 168,367,637 | 1 | 0 |
Apache-2.0
| 2019-01-30T15:39:42 | 2019-01-30T15:39:41 | null |
UTF-8
|
Python
| false | false | 18,400 |
py
|
# coding=utf-8
# Copyright 2021 The Fairseq Authors and The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" UniSpeechSat model configuration"""
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
logger = logging.get_logger(__name__)
UNISPEECH_SAT_PRETRAINED_CONFIG_ARCHIVE_MAP = {
"microsoft/unispeech-sat-base-100h-libri-ft": (
"https://huggingface.co/microsoft/unispeech-sat-base-100h-libri-ft/resolve/main/config.json"
),
# See all UniSpeechSat models at https://huggingface.co/models?filter=unispeech_sat
}
class UniSpeechSatConfig(PretrainedConfig):
r"""
This is the configuration class to store the configuration of a [`UniSpeechSatModel`]. It is used to instantiate an
UniSpeechSat model according to the specified arguments, defining the model architecture. Instantiating a
configuration with the defaults will yield a similar configuration to that of the UniSpeechSat
[microsoft/unispeech-sat-base-100h-libri-ft](https://huggingface.co/microsoft/unispeech-sat-base-100h-libri-ft)
architecture.
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
vocab_size (`int`, *optional*, defaults to 32):
Vocabulary size of the UniSpeechSat model. Defines the number of different tokens that can be represented
by the `inputs_ids` passed when calling [`UniSpeechSatModel`]. Vocabulary size of the model. Defines the
different tokens that can be represented by the *inputs_ids* passed to the forward method of
[`UniSpeechSatModel`].
hidden_size (`int`, *optional*, defaults to 768):
Dimensionality of the encoder layers and the pooler layer.
num_hidden_layers (`int`, *optional*, defaults to 12):
Number of hidden layers in the Transformer encoder.
num_attention_heads (`int`, *optional*, defaults to 12):
Number of attention heads for each attention layer in the Transformer encoder.
intermediate_size (`int`, *optional*, defaults to 3072):
Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`):
The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
`"relu"`, `"selu"` and `"gelu_new"` are supported.
hidden_dropout (`float`, *optional*, defaults to 0.1):
The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
attention_dropout (`float`, *optional*, defaults to 0.1):
The dropout ratio for the attention probabilities.
final_dropout (`float`, *optional*, defaults to 0.1):
The dropout probability for the final projection layer of [`UniSpeechSatForCTC`].
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
layer_norm_eps (`float`, *optional*, defaults to 1e-12):
The epsilon used by the layer normalization layers.
feat_extract_norm (`str`, *optional*, defaults to `"group"`):
The norm to be applied to 1D convolutional layers in feature encoder. One of `"group"` for group
normalization of only the first 1D convolutional layer or `"layer"` for layer normalization of all 1D
convolutional layers.
feat_proj_dropout (`float`, *optional*, defaults to 0.0):
The dropout probability for output of the feature encoder.
feat_extract_activation (`str, `optional`, defaults to `"gelu"`):
The non-linear activation function (function or string) in the 1D convolutional layers of the feature
extractor. If string, `"gelu"`, `"relu"`, `"selu"` and `"gelu_new"` are supported.
feat_quantizer_dropout (`float`, *optional*, defaults to 0.0):
The dropout probabilitiy for quantized feature encoder states.
conv_dim (`Tuple[int]` or `List[int]`, *optional*, defaults to `(512, 512, 512, 512, 512, 512, 512)`):
A tuple of integers defining the number of input and output channels of each 1D convolutional layer in the
feature encoder. The length of *conv_dim* defines the number of 1D convolutional layers.
conv_stride (`Tuple[int]` or `List[int]`, *optional*, defaults to `(5, 2, 2, 2, 2, 2, 2)`):
A tuple of integers defining the stride of each 1D convolutional layer in the feature encoder. The length
of *conv_stride* defines the number of convolutional layers and has to match the the length of *conv_dim*.
conv_kernel (`Tuple[int]` or `List[int]`, *optional*, defaults to `(10, 3, 3, 3, 3, 3, 3)`):
A tuple of integers defining the kernel size of each 1D convolutional layer in the feature encoder. The
length of *conv_kernel* defines the number of convolutional layers and has to match the the length of
*conv_dim*.
conv_bias (`bool`, *optional*, defaults to `False`):
Whether the 1D convolutional layers have a bias.
num_conv_pos_embeddings (`int`, *optional*, defaults to 128):
Number of convolutional positional embeddings. Defines the kernel size of 1D convolutional positional
embeddings layer.
num_conv_pos_embedding_groups (`int`, *optional*, defaults to 16):
Number of groups of 1D convolutional positional embeddings layer.
do_stable_layer_norm (`bool`, *optional*, defaults to `False`):
Whether to apply *stable* layer norm architecture of the Transformer encoder. `do_stable_layer_norm is
True` corresponds to applying layer norm before the attention layer, whereas `do_stable_layer_norm is
False` corresponds to applying layer norm after the attention layer.
apply_spec_augment (`bool`, *optional*, defaults to `True`):
Whether to apply *SpecAugment* data augmentation to the outputs of the feature encoder. For reference see
[SpecAugment: A Simple Data Augmentation Method for Automatic Speech
Recognition](https://arxiv.org/abs/1904.08779).
mask_time_prob (`float`, *optional*, defaults to 0.05):
Percentage (between 0 and 1) of all feature vectors along the time axis which will be masked. The masking
procecure generates ''mask_time_prob*len(time_axis)/mask_time_length'' independent masks over the axis. If
reasoning from the propability of each feature vector to be chosen as the start of the vector span to be
masked, *mask_time_prob* should be `prob_vector_start*mask_time_length`. Note that overlap may decrease the
actual percentage of masked vectors. This is only relevant if `apply_spec_augment is True`.
mask_time_length (`int`, *optional*, defaults to 10):
Length of vector span along the time axis.
mask_time_min_masks (`int`, *optional*, defaults to 2),:
The minimum number of masks of length `mask_feature_length` generated along the time axis, each time step,
irrespectively of `mask_feature_prob`. Only relevant if ''mask_time_prob*len(time_axis)/mask_time_length <
mask_time_min_masks''
mask_feature_prob (`float`, *optional*, defaults to 0.0):
Percentage (between 0 and 1) of all feature vectors along the feature axis which will be masked. The
masking procecure generates ''mask_feature_prob*len(feature_axis)/mask_time_length'' independent masks over
the axis. If reasoning from the propability of each feature vector to be chosen as the start of the vector
span to be masked, *mask_feature_prob* should be `prob_vector_start*mask_feature_length`. Note that overlap
may decrease the actual percentage of masked vectors. This is only relevant if `apply_spec_augment is
True`.
mask_feature_length (`int`, *optional*, defaults to 10):
Length of vector span along the feature axis.
mask_feature_min_masks (`int`, *optional*, defaults to 0),:
The minimum number of masks of length `mask_feature_length` generated along the feature axis, each time
step, irrespectively of `mask_feature_prob`. Only relevant if
''mask_feature_prob*len(feature_axis)/mask_feature_length < mask_feature_min_masks''
num_codevectors_per_group (`int`, *optional*, defaults to 320):
Number of entries in each quantization codebook (group).
num_codevector_groups (`int`, *optional*, defaults to 2):
Number of codevector groups for product codevector quantization.
contrastive_logits_temperature (`float`, *optional*, defaults to 0.1):
The temperature *kappa* in the contrastive loss.
feat_quantizer_dropout (`float`, *optional*, defaults to 0.0):
The dropout probabilitiy for the output of the feature encoder that's used by the quantizer.
num_negatives (`int`, *optional*, defaults to 100):
Number of negative samples for the contrastive loss.
codevector_dim (`int`, *optional*, defaults to 256):
Dimensionality of the quantized feature vectors.
proj_codevector_dim (`int`, *optional*, defaults to 256):
Dimensionality of the final projection of both the quantized and the transformer features.
diversity_loss_weight (`int`, *optional*, defaults to 0.1):
The weight of the codebook diversity loss component.
ctc_loss_reduction (`str`, *optional*, defaults to `"mean"`):
Specifies the reduction to apply to the output of `torch.nn.CTCLoss`. Only relevant when training an
instance of [`UniSpeechSatForCTC`].
ctc_zero_infinity (`bool`, *optional*, defaults to `False`):
Whether to zero infinite losses and the associated gradients of `torch.nn.CTCLoss`. Infinite losses mainly
occur when the inputs are too short to be aligned to the targets. Only relevant when training an instance
of [`UniSpeechSatForCTC`].
use_weighted_layer_sum (`bool`, *optional*, defaults to `False`):
Whether to use a weighted average of layer outputs with learned weights. Only relevant when using an
instance of [`UniSpeechSatForSequenceClassification`].
classifier_proj_size (`int`, *optional*, defaults to 256):
Dimensionality of the projection before token mean-pooling for classification.
tdnn_dim (`Tuple[int]` or `List[int]`, *optional*, defaults to `(512, 512, 512, 512, 1500)`):
A tuple of integers defining the number of output channels of each 1D convolutional layer in the *TDNN*
module of the *XVector* model. The length of *tdnn_dim* defines the number of *TDNN* layers.
tdnn_kernel (`Tuple[int]` or `List[int]`, *optional*, defaults to `(5, 3, 3, 1, 1)`):
A tuple of integers defining the kernel size of each 1D convolutional layer in the *TDNN* module of the
*XVector* model. The length of *tdnn_kernel* has to match the length of *tdnn_dim*.
tdnn_dilation (`Tuple[int]` or `List[int]`, *optional*, defaults to `(1, 2, 3, 1, 1)`):
A tuple of integers defining the dilation factor of each 1D convolutional layer in *TDNN* module of the
*XVector* model. The length of *tdnn_dilation* has to match the length of *tdnn_dim*.
xvector_output_dim (`int`, *optional*, defaults to 512):
Dimensionality of the *XVector* embedding vectors.
Example:
```python
>>> from transformers import UniSpeechSatModel, UniSpeechSatConfig
>>> # Initializing a UniSpeechSat microsoft/unispeech-sat-base-100h-libri-ft style configuration
>>> configuration = UniSpeechSatConfig()
>>> # Initializing a model from the microsoft/unispeech-sat-base-100h-libri-ft style configuration
>>> model = UniSpeechSatModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = "unispeech-sat"
def __init__(
self,
vocab_size=32,
hidden_size=768,
num_hidden_layers=12,
num_attention_heads=12,
intermediate_size=3072,
hidden_act="gelu",
hidden_dropout=0.1,
activation_dropout=0.1,
attention_dropout=0.1,
feat_proj_dropout=0.0,
feat_quantizer_dropout=0.0,
final_dropout=0.1,
layerdrop=0.1,
initializer_range=0.02,
layer_norm_eps=1e-5,
feat_extract_norm="group",
feat_extract_activation="gelu",
conv_dim=(512, 512, 512, 512, 512, 512, 512),
conv_stride=(5, 2, 2, 2, 2, 2, 2),
conv_kernel=(10, 3, 3, 3, 3, 2, 2),
conv_bias=False,
num_conv_pos_embeddings=128,
num_conv_pos_embedding_groups=16,
do_stable_layer_norm=False,
apply_spec_augment=True,
mask_time_prob=0.05,
mask_time_length=10,
mask_time_min_masks=2,
mask_feature_prob=0.0,
mask_feature_length=10,
mask_feature_min_masks=0,
num_codevectors_per_group=320,
num_codevector_groups=2,
contrastive_logits_temperature=0.1,
num_negatives=100,
codevector_dim=256,
proj_codevector_dim=256,
diversity_loss_weight=0.1,
ctc_loss_reduction="mean",
ctc_zero_infinity=False,
use_weighted_layer_sum=False,
classifier_proj_size=256,
tdnn_dim=(512, 512, 512, 512, 1500),
tdnn_kernel=(5, 3, 3, 1, 1),
tdnn_dilation=(1, 2, 3, 1, 1),
xvector_output_dim=512,
pad_token_id=0,
bos_token_id=1,
eos_token_id=2,
num_clusters=504,
**kwargs
):
super().__init__(**kwargs, pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id)
self.hidden_size = hidden_size
self.feat_extract_norm = feat_extract_norm
self.feat_extract_activation = feat_extract_activation
self.conv_dim = list(conv_dim)
self.conv_stride = list(conv_stride)
self.conv_kernel = list(conv_kernel)
self.conv_bias = conv_bias
self.num_conv_pos_embeddings = num_conv_pos_embeddings
self.num_conv_pos_embedding_groups = num_conv_pos_embedding_groups
self.num_feat_extract_layers = len(self.conv_dim)
self.num_hidden_layers = num_hidden_layers
self.intermediate_size = intermediate_size
self.hidden_act = hidden_act
self.num_attention_heads = num_attention_heads
self.hidden_dropout = hidden_dropout
self.attention_dropout = attention_dropout
self.activation_dropout = activation_dropout
self.feat_proj_dropout = feat_proj_dropout
self.final_dropout = final_dropout
self.layerdrop = layerdrop
self.layer_norm_eps = layer_norm_eps
self.initializer_range = initializer_range
self.vocab_size = vocab_size
self.num_clusters = num_clusters
self.do_stable_layer_norm = do_stable_layer_norm
self.use_weighted_layer_sum = use_weighted_layer_sum
if (
(len(self.conv_stride) != self.num_feat_extract_layers)
or (len(self.conv_kernel) != self.num_feat_extract_layers)
or (len(self.conv_dim) != self.num_feat_extract_layers)
):
raise ValueError(
"Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="
" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="
f" {len(self.conv_dim)}`, `len(config.conv_stride) = {len(self.conv_stride)}`,"
f" `len(config.conv_kernel) = {len(self.conv_kernel)}`."
)
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
self.apply_spec_augment = apply_spec_augment
self.mask_time_prob = mask_time_prob
self.mask_time_length = mask_time_length
self.mask_time_min_masks = mask_time_min_masks
self.mask_feature_prob = mask_feature_prob
self.mask_feature_length = mask_feature_length
self.mask_feature_min_masks = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
self.num_codevectors_per_group = num_codevectors_per_group
self.num_codevector_groups = num_codevector_groups
self.contrastive_logits_temperature = contrastive_logits_temperature
self.feat_quantizer_dropout = feat_quantizer_dropout
self.num_negatives = num_negatives
self.codevector_dim = codevector_dim
self.proj_codevector_dim = proj_codevector_dim
self.diversity_loss_weight = diversity_loss_weight
# ctc loss
self.ctc_loss_reduction = ctc_loss_reduction
self.ctc_zero_infinity = ctc_zero_infinity
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
self.classifier_proj_size = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
self.tdnn_dim = list(tdnn_dim)
self.tdnn_kernel = list(tdnn_kernel)
self.tdnn_dilation = list(tdnn_dilation)
self.xvector_output_dim = xvector_output_dim
@property
def inputs_to_logits_ratio(self):
return functools.reduce(operator.mul, self.conv_stride, 1)
|
[
"[email protected]"
] | |
9e81fd44c79e908e7b9db8f27e74f9b6311874d5
|
2c7de112498f65c7b6a74888490266ec6c772e47
|
/PDSim/core/journal_bearing.py
|
04b5ebc907b447af1da97d9e2d092cf83768c42a
|
[] |
no_license
|
bansal16/pdsim
|
16c83dfc1af9816c369e07b6ef50e74658359c22
|
9098086c558d6b23c25d1b9e45ea86186905f41a
|
refs/heads/master
| 2020-12-31T01:48:08.672061 | 2015-04-08T02:14:09 | 2015-04-08T02:14:09 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,225 |
py
|
from __future__ import division
import numpy as np
from math import pi, atan
import scipy.optimize
import matplotlib.pyplot as plt
N = 61
#e_mat=[0.2,0.25,0.3,0.35,0.4,0.5,0.6,0.7,0.8,0.9];
phi_star = pi
def TwoDGriddedIntegrate(I,N):
# Average the center of each cell based on its neighboring nodes
return np.sum(np.sum((I[0:N-1,0:N-1]+I[1:N,0:N-1]+I[0:N-1,1:N]+I[1:N,1:N])))/4
def TwoDGriddedIntegrate2(PHI,Y,I):
#Integrate along phi direction for each y, then do a trapezoidal integration of each of the y
plt.plot(Y[1,:],np.trapz(I,PHI,axis = 0))
plt.show()
return np.trapz(np.trapz(I,PHI,axis = 0),Y[1,:])
def OBJECTIVE(phi_star, epsilon, plot = False, output = False):
PHI = np.tile(np.linspace(0,phi_star,N).T,(N,1)).T
Y = np.tile(np.linspace(0,1,N),(N,1))
dPHI = phi_star/(N-1)
dY = 1/(N-1)
sinPHI=np.sin(PHI)
P = 0*PHI
Pnew = 0*PHI
f = 0*PHI
df = 0*PHI
_lambda = 1
change = 999
eps=1e-6;
count=0;
while (change>eps):
#Calculate geometric parameters
H=1+epsilon*np.cos(PHI);
H3=H**3;
#Coefficients
A=H3[2:N,1:N-1]
B=H3[0:N-2,1:N-1]
C=H3[1:N-1,1:N-1]
#Calculate residuals
f[1:N-1,1:N-1] = -(4*A+4*B+2*_lambda*dPHI**2/dY**2*C)*P[1:N-1,1:N-1]+(3*A+B)*P[2:N,1:N-1]+(A+3*B)*P[0:N-2,1:N-1]+(_lambda**2*dPHI**2/dY**2*C)*(P[1:N-1,2:N]+P[1:N-1,0:N-2])+24*dPHI**2*epsilon*sinPHI[1:N-1,1:N-1]
#Calculate derivative
df[1:N-1,1:N-1]=-(4*A+4*B+2*_lambda*dPHI**2/dY**2*C);
#Evaluate P_new=P_old-f/dfdP
P[1:N-1,1:N-1]=P[1:N-1,1:N-1]-f[1:N-1,1:N-1]/df[1:N-1,1:N-1];
#Evaluate change
change=np.max(np.max(np.abs(f[1:N-1,1:N-1]/df[1:N-1,1:N-1])));
if count % 1000 == 0:
print change
count += 1
if output:
Wx=dY*dPHI*np.sum(np.sum(np.sin(PHI)*P))
Wz=-dY*dPHI*np.sum(np.sum(np.cos(PHI)*P))
Wr = np.sqrt(Wx**2+Wz**2)
PHI_angle = atan(Wx/Wz)
B_j = 1/(pi*Wr)
DPDPHI = 0*Y
DPDPHI[0:N-2,0:N] = (P[1:N-1,0:N]-P[0:N-2,0:N])/(dPHI)
DPDPHI[N-1:N-1,0:N] = (P[N-1:N,0:N]-P[N-2:N-2,0:N])/(dPHI)
integrand = 1/H
#integrand = H/2*DPDPHI+1/H
Fb1 = dPHI*dY*np.sum(np.sum(integrand))
Fb2 = dPHI*dY*TwoDGriddedIntegrate(integrand,N)
Fb3 = TwoDGriddedIntegrate2(PHI,Y,integrand)
mu_rb_c = Fb3/Wr # mu*r_b/c
print 'Fb1,Fb2,Fb3',Fb1,Fb2,Fb3
print 'B_j', B_j
print 'mu*rb/c', mu_rb_c
#print 'mu*rb/c', mu_rb_c/12.8
print 'PHI_angle', PHI_angle/pi*180
plt.contour(PHI,Y,H/2*DPDPHI+1/H)
plt.show()
if plot:
plt.contour(PHI,Y,P,30)
plt.show()
return np.sum(3*P[N-1,N//2+1]-4*P[N-2,N//2+1]+P[N-3,N//2+1])/(2*dPHI)
if __name__=='__main__':
#print scipy.optimize.newton.__doc__; quit()
phi_star = scipy.optimize.newton(OBJECTIVE, pi, args = (0.6,), tol = 0.004)
OBJECTIVE(phi_star,0.6,plot = True, output = True)
|
[
"[email protected]"
] | |
146eda27bba2e7af27bc4756c5c0cd8650510af9
|
60e38d3122cfb18cf8901e0d7fba02ef2a32affa
|
/notebooks/converted_notebooks/rotate_and_crop_images.py
|
3f4def955ba5f7466af281090a2d737a65fe3d19
|
[
"BSD-3-Clause"
] |
permissive
|
earnestdl/python_notebooks
|
ac11b40d9d5e721b947b083b2f4c301079f206a8
|
4ef31711b70b90cf621e9e9d094fa2a43eeeae16
|
refs/heads/master
| 2023-03-12T19:41:44.229158 | 2021-02-22T15:41:57 | 2021-02-22T15:41:57 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,223 |
py
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.5.0
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# [](https://neutronimaging.pages.ornl.gov/tutorial/notebooks/rotate_and_crop_images)
#
# <img src='__docs/__all/notebook_rules.png' />
# # Select Your IPTS
# +
from __code.ui_builder import UiBuilder
o_builder = UiBuilder(ui_name = 'ui_rotate_and_crop.ui')
from __code.load_images import LoadImages
from __code.rotate_and_crop_images import RotateAndCropImages, Export
from __code import system
system.System.select_working_dir()
from __code.__all import custom_style
custom_style.style()
# + run_control={"frozen": false, "read_only": false}
# %gui qt
# + [markdown] run_control={"frozen": false, "read_only": false}
# # Select and Load Working Images
# + [markdown] run_control={"frozen": false, "read_only": false}
# Select the images (tiff or fits) you want to crop and/or rotate
# + run_control={"frozen": false, "read_only": false}
o_load = LoadImages(working_dir=system.System.get_working_dir())
o_load.select_images(use_next=True)
# + [markdown] run_control={"frozen": false, "read_only": false}
# # Select crop region and/or rotation angle
# + run_control={"frozen": false, "read_only": false}
list_images = o_load.list_images
o_crop = RotateAndCropImages(o_load = o_load)
o_crop.show()
# + [markdown] run_control={"frozen": false, "read_only": false}
# # Export Images
# + run_control={"frozen": false, "read_only": false}
rotated_working_data = o_crop.rotated_working_data
rotation_angle = o_crop.rotation_angle
o_output_folder = Export(working_dir=system.System.get_working_dir(),
data=rotated_working_data,
list_files=list_images,
rotation_angle=rotation_angle)
o_output_folder.select_folder()
# + [markdown] run_control={"frozen": false, "read_only": false}
# Cleaning notebook memory
# + run_control={"frozen": false, "read_only": false}
try:
del o_crop
del o_load
except:
pass
# -
|
[
"[email protected]"
] | |
204a241e69f547a1d9258401f479b7a26b973865
|
c38597764dba09207302358901ff74c54d6bdd0d
|
/cmd3/plugins/pause.py
|
2db4f149972a8ac97316c00634d84086ff18aec7
|
[
"Apache-2.0"
] |
permissive
|
futuregrid/cmd3
|
29ab97d939f8c99b02a8515ce02d7e517ca6788f
|
266bf073457165679d918c238a220a3136f41ed5
|
refs/heads/master
| 2021-01-02T09:26:36.376760 | 2015-03-28T15:45:07 | 2015-03-28T15:45:07 | 8,934,650 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 383 |
py
|
from cmd3.shell import command
class pause:
def activate_pause(self):
pass
@command
def do_pause(self, arg, arguments):
"""
Usage:
pause [MESSAGE]
Displays the specified text then waits for the user to press RETURN.
Arguments:
MESSAGE message to be displayed
"""
raw_input(arg + '\n')
|
[
"[email protected]"
] | |
bbc7675acc7ac35bcfd976febc56886686fd3b6c
|
53fab060fa262e5d5026e0807d93c75fb81e67b9
|
/backup/user_317/ch19_2020_10_07_12_53_37_322833.py
|
b1a16cdb8cabc49234e4f11bf87e85323c500893
|
[] |
no_license
|
gabriellaec/desoft-analise-exercicios
|
b77c6999424c5ce7e44086a12589a0ad43d6adca
|
01940ab0897aa6005764fc220b900e4d6161d36b
|
refs/heads/main
| 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 225 |
py
|
def classifica_triangulo(a,b,c):
if a == b and b==c and c==a:
return "equilátero"
elif a!=b and b!=c and c!=a:
return "escaleno"
elif a==b and b==c and c!=a:
return "isósceles"
|
[
"[email protected]"
] | |
f69dafe7e3cd2bba8f46924f56fc36ccaeb49bb1
|
a3d0e224a2a8487f8d271991b3cf1981b9e58791
|
/python/PoissonDistribuition/solution/PoissonDistribII.py
|
4c246a2f1086dd71d25a654cf77052b7b7aca74e
|
[] |
no_license
|
chrislucas/hackerrank-10-days-of-statistics
|
c66306f55ca7e0080cecebfed497b5032f8a0007
|
94fce754274ad706b44b06f9d6ff8d96838c80d0
|
refs/heads/master
| 2020-03-25T23:48:11.992249 | 2018-12-05T17:35:21 | 2018-12-05T17:35:21 | 144,293,679 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 536 |
py
|
'''
https://www.hackerrank.com/challenges/s10-poisson-distribution-2/problem
'''
from math import e as E
def factorial(n):
acc = 1
for i in range(n, 1, -1):
acc *= i
return acc
def poisson_distribution(success, avg):
return ((avg ** success) * (E ** (-avg))) / factorial(success)
'''
a = 0.88
b = 1.55
'''
def run():
a, b = map(float, input().split(" "))
ca = 160 + 40 * (a + a * a)
cb = 128 + 40 * (b + b * b)
print("%.3f\n%.3f" % (ca, cb))
run()
if __name__ == '__main__':
pass
|
[
"[email protected]"
] | |
736fc586eeb80e5b1b5a212f088fc98d4a063837
|
9f1039075cc611198a988034429afed6ec6d7408
|
/tensorflow-stubs/python/ops/array_grad.pyi
|
0ce04ead67f2347cf2ccd72e01a31d5ad3c5b6c6
|
[] |
no_license
|
matangover/tensorflow-stubs
|
9422fbb1cb3a3638958d621461291c315f9c6ec2
|
664bd995ef24f05ba2b3867d979d23ee845cb652
|
refs/heads/master
| 2020-05-23T12:03:40.996675 | 2019-05-15T06:21:43 | 2019-05-15T06:21:43 | 186,748,093 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 575 |
pyi
|
# Stubs for tensorflow.python.ops.array_grad (Python 3)
#
# NOTE: This dynamically typed stub was automatically generated by stubgen.
from tensorflow.python import pywrap_tensorflow as pywrap_tensorflow
from tensorflow.python.eager import context as context
from tensorflow.python.framework import constant_op as constant_op, ops as ops, sparse_tensor as sparse_tensor, tensor_util as tensor_util
from tensorflow.python.ops import array_ops as array_ops, control_flow_util as control_flow_util, gen_array_ops as gen_array_ops, math_ops as math_ops, sparse_ops as sparse_ops
|
[
"[email protected]"
] | |
5c899471a1f6911eedcac4f5185958ee38057e03
|
8acffb8c4ddca5bfef910e58d3faa0e4de83fce8
|
/ml-flask/Lib/site-packages/blis/tests/common.py
|
643d09ec1fd370778e9270a7a94dc2d2f13b6eea
|
[
"MIT"
] |
permissive
|
YaminiHP/SimilitudeApp
|
8cbde52caec3c19d5fa73508fc005f38f79b8418
|
005c59894d8788c97be16ec420c0a43aaec99b80
|
refs/heads/master
| 2023-06-27T00:03:00.404080 | 2021-07-25T17:51:27 | 2021-07-25T17:51:27 | 389,390,951 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 129 |
py
|
version https://git-lfs.github.com/spec/v1
oid sha256:9047f9d9f85d18d5f16a141581ac46738f83c8e1b9d1ceff33a90c06e9979143
size 2577
|
[
"[email protected]"
] | |
7a220d54cfeb7e621203b6f09cdc08c108375b43
|
5f5c6809e9e68127262c843602185f3d6d6d556b
|
/thejoker/tests/test_data.py
|
e8cf4dcbd7dfbe01bb76eec800c6f88f20866519
|
[
"MIT"
] |
permissive
|
minaskar/thejoker
|
e195bd361d4eadf051fb29380d110d214ea65a1b
|
b7ba1d094ce3d4d61c1db80da37981327f280d34
|
refs/heads/master
| 2023-03-16T02:55:04.644778 | 2020-06-15T19:39:29 | 2020-06-15T19:39:29 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 10,712 |
py
|
"""Tests for data.py and data_helpers.py"""
# Third-party
from astropy.table import Table
from astropy.time import Time
from astropy.timeseries import TimeSeries
import astropy.units as u
import numpy as np
import pytest
try:
import matplotlib.pyplot as plt
HAS_MPL = True
except ImportError:
HAS_MPL = False
try:
import fuzzywuzzy # noqa
HAS_FUZZY = True
except ImportError:
HAS_FUZZY = False
# Package
from ..data import RVData
from ..data_helpers import guess_time_format, validate_prepare_data
from ..prior import JokerPrior
def test_guess_time_format():
for yr in np.arange(1975, 2040, 5):
assert guess_time_format(Time(f'{yr}-05-23').jd) == 'jd'
assert guess_time_format(Time(f'{yr}-05-23').mjd) == 'mjd'
with pytest.raises(NotImplementedError):
guess_time_format('asdfasdf')
for bad_val in np.array([0., 1450., 2500., 5000.]):
with pytest.raises(ValueError):
guess_time_format(bad_val)
def get_valid_input(rnd=None, size=32):
if rnd is None:
rnd = np.random.default_rng(42)
t_arr = rnd.uniform(55555., 56012., size=size)
t_obj = Time(t_arr, format='mjd')
rv = 100 * np.sin(2*np.pi * t_arr / 15.) * u.km / u.s
err = rnd.uniform(0.1, 0.5, size=len(t_arr)) * u.km/u.s
cov = (np.diag(err.value) * err.unit) ** 2
_tbl = Table()
_tbl['rv'] = rnd.uniform(size=len(rv))
_tbl['rv'].unit = u.km/u.s
_tbl['rv_err'] = rnd.uniform(size=len(rv))
_tbl['rv_err'].unit = u.km/u.s
raw = {'t_arr': t_arr,
't_obj': t_obj,
'rv': rv,
'err': err,
'cov': cov}
return [dict(t=t_arr, rv=rv, rv_err=err),
(t_arr, rv, err),
(t_obj, rv, err),
(t_obj, _tbl['rv'], _tbl['rv_err']),
(t_arr, rv, cov),
(t_obj, rv, cov)], raw
def test_rvdata_init():
rnd = np.random.default_rng(42)
# Test valid initialization combos
# These should succeed:
valid_inputs, raw = get_valid_input(rnd)
for x in valid_inputs:
if isinstance(x, tuple):
RVData(*x)
else:
RVData(**x)
t_arr = raw['t_arr']
t_obj = raw['t_obj']
rv = raw['rv']
err = raw['err']
cov = raw['cov']
# With/without clean:
for i in range(1, 3): # skip time, because Time() catches nan values
inputs = list(valid_inputs[1])
arr = inputs[i].copy()
arr[0] = np.nan
inputs[i] = arr
data = RVData(*inputs)
assert len(data) == (len(arr)-1)
data = RVData(*inputs, clean=True)
assert len(data) == (len(arr)-1)
data = RVData(*inputs, clean=False)
assert len(data) == len(arr)
# With/without t0
data = RVData(t_arr, rv, err, t0=False)
assert data.t0 is None
data = RVData(t_arr, rv, err, t0=t_obj[3])
assert np.isclose(data.t0.mjd, t_obj[3].mjd)
# ------------------------------------------------------------------------
# Test expected failures:
# no units on something
with pytest.raises(TypeError):
RVData(t_arr, rv.value, err)
with pytest.raises(TypeError):
RVData(t_arr, rv, err.value)
# shapes must be consistent
with pytest.raises(ValueError):
RVData(t_obj[:-1], rv, err)
with pytest.raises(ValueError):
RVData(t_obj, rv[:-1], err)
with pytest.raises(ValueError):
RVData(t_obj, rv, err[:-1])
with pytest.raises(ValueError):
RVData(t_obj, rv, cov[:-1])
bad_cov = np.arange(8).reshape((2, 2, 2)) * (u.km/u.s)**2
with pytest.raises(ValueError):
RVData(t_obj, rv, bad_cov)
# t0 must be a Time instance
with pytest.raises(TypeError):
RVData(t_arr, rv, err, t0=t_arr[3])
@pytest.mark.parametrize("inputs",
get_valid_input()[0])
def test_data_methods(tmpdir, inputs):
# check that copy works
if isinstance(inputs, tuple):
data1 = RVData(*inputs)
else:
data1 = RVData(**inputs)
data2 = data1.copy()
data1._t_bmjd += 1.5
data1.rv *= 1.5
assert np.all(data2._t_bmjd != data1._t_bmjd)
assert np.all(data2.rv != data1.rv)
assert isinstance(data1.rv, u.Quantity)
assert isinstance(data1.rv_err, u.Quantity)
# check slicing
data2 = data1[:16]
assert len(data2) == 16
assert len(data2.t) == 16
assert len(data2.rv) == 16
assert len(data2.rv_err) == 16
# converting to a timeseries object:
ts = data1.to_timeseries()
assert isinstance(ts, TimeSeries)
filename = str(tmpdir / 'test.hdf5')
ts.write(filename, serialize_meta=True)
data2 = RVData.from_timeseries(filename)
assert u.allclose(data1.t.mjd, data2.t.mjd)
assert u.allclose(data1.rv, data2.rv)
assert u.allclose(data1.rv_err, data2.rv_err)
assert u.allclose(data1.t0.mjd, data2.t0.mjd)
# get phase from data object
phase1 = data1.phase(P=15.*u.day)
assert phase1.min() >= 0
assert phase1.max() <= 1
phase2 = data1.phase(P=15.*u.day, t0=Time(58585.24, format='mjd'))
assert not np.allclose(phase1, phase2)
# compute inverse variance
ivar = data1.ivar
assert ivar.unit == (1 / data1.rv.unit**2)
cov = data1.cov
assert cov.shape == (len(data1), len(data1))
def test_guess_from_table():
"""NOTE: this is not an exhaustive set of tests, but at least checks a few
common cases"""
for rv_name in ['rv', 'vr', 'radial_velocity']:
tbl = Table()
tbl['t'] = np.linspace(56423.234, 59324.342, 16) * u.day
tbl[rv_name] = np.random.normal(0, 1, len(tbl['t']))
tbl[f'{rv_name}_err'] = np.random.uniform(0.1, 0.2, len(tbl['t']))
data = RVData.guess_from_table(tbl, rv_unit=u.km/u.s)
assert np.allclose(data.t.utc.mjd, tbl['t'])
if HAS_FUZZY:
for rv_name in ['VHELIO', 'VHELIO_AVG', 'vr', 'vlos']:
tbl = Table()
tbl['t'] = np.linspace(56423.234, 59324.342, 16) * u.day
tbl[rv_name] = np.random.normal(0, 1, len(tbl['t']))
tbl[f'{rv_name}_err'] = np.random.uniform(0.1, 0.2, len(tbl['t']))
data = RVData.guess_from_table(tbl, rv_unit=u.km/u.s, fuzzy=True)
assert np.allclose(data.t.utc.mjd, tbl['t'])
tbl = Table()
tbl['t'] = np.linspace(2456423.234, 2459324.342, 16) * u.day
tbl['rv'] = np.random.normal(0, 1, len(tbl['t'])) * u.km/u.s
tbl['rv_err'] = np.random.uniform(0.1, 0.2, len(tbl['t'])) * u.km/u.s
data = RVData.guess_from_table(tbl)
assert np.allclose(data.t.utc.jd, tbl['t'])
data = RVData.guess_from_table(tbl, time_kwargs=dict(scale='tcb'))
assert np.allclose(data.t.tcb.jd, tbl['t'])
@pytest.mark.skipif(not HAS_MPL, reason='matplotlib not installed')
@pytest.mark.parametrize("inputs",
get_valid_input()[0])
def test_plotting(inputs):
# check that copy works
if isinstance(inputs, tuple):
data = RVData(*inputs)
else:
data = RVData(**inputs)
data.plot()
# style
data.plot(color='r')
# custom axis
fig, ax = plt.subplots(1, 1)
data.plot(ax=plt.gca())
# formatting
data.plot(rv_unit=u.m/u.s)
data.plot(rv_unit=u.m/u.s, time_format='jd')
data.plot(rv_unit=u.m/u.s, time_format=lambda x: x.utc.mjd)
data.plot(ecolor='r')
plt.close('all')
def test_multi_data():
import exoplanet.units as xu
import pymc3 as pm
rnd = np.random.default_rng(42)
# Set up mulitple valid data objects:
_, raw1 = get_valid_input(rnd=rnd)
data1 = RVData(raw1['t_obj'], raw1['rv'], raw1['err'])
_, raw2 = get_valid_input(rnd=rnd, size=8)
data2 = RVData(raw2['t_obj'], raw2['rv'], raw2['err'])
_, raw3 = get_valid_input(rnd=rnd, size=4)
data3 = RVData(raw3['t_obj'], raw3['rv'], raw3['err'])
prior1 = JokerPrior.default(1*u.day, 1*u.year,
25*u.km/u.s,
sigma_v=100*u.km/u.s)
# Object should return input:
multi_data, ids, trend_M = validate_prepare_data(data1,
prior1.poly_trend,
prior1.n_offsets)
assert np.allclose(multi_data.rv.value, data1.rv.value)
assert np.all(ids == 0)
assert np.allclose(trend_M[:, 0], 1.)
# Three valid objects as a list:
with pm.Model():
dv1 = xu.with_unit(pm.Normal('dv0_1', 0, 1.),
u.km/u.s)
dv2 = xu.with_unit(pm.Normal('dv0_2', 4, 5.),
u.km/u.s)
prior2 = JokerPrior.default(1*u.day, 1*u.year,
25*u.km/u.s,
sigma_v=100*u.km/u.s,
v0_offsets=[dv1, dv2])
datas = [data1, data2, data3]
multi_data, ids, trend_M = validate_prepare_data(datas,
prior2.poly_trend,
prior2.n_offsets)
assert len(np.unique(ids)) == 3
assert len(multi_data) == sum([len(d) for d in datas])
assert 0 in ids and 1 in ids and 2 in ids
assert np.allclose(trend_M[:, 0], 1.)
# Three valid objects with names:
datas = {'apogee': data1, 'lamost': data2, 'weave': data3}
multi_data, ids, trend_M = validate_prepare_data(datas,
prior2.poly_trend,
prior2.n_offsets)
assert len(np.unique(ids)) == 3
assert len(multi_data) == sum([len(d) for d in datas.values()])
assert 'apogee' in ids and 'lamost' in ids and 'weave' in ids
assert np.allclose(trend_M[:, 0], 1.)
# Check it fails if n_offsets != number of data sources
with pytest.raises(ValueError):
validate_prepare_data(datas,
prior1.poly_trend,
prior1.n_offsets)
with pytest.raises(ValueError):
validate_prepare_data(data1,
prior2.poly_trend,
prior2.n_offsets)
# Check that this fails if one has a covariance matrix
data_cov = RVData(raw3['t_obj'], raw3['rv'], raw3['cov'])
with pytest.raises(NotImplementedError):
validate_prepare_data({'apogee': data1, 'test': data2,
'weave': data_cov},
prior2.poly_trend, prior2.n_offsets)
with pytest.raises(NotImplementedError):
validate_prepare_data([data1, data2, data_cov],
prior2.poly_trend,
prior2.n_offsets)
|
[
"[email protected]"
] | |
aac470ddd0fd3c5e81d63ee500ed64596d8d060c
|
d9b2805a8b39f147bd77e35c8e96e0cbd5eaa726
|
/학교 공부/3학년1학기/운영체제 - 김정준/텀과제/osProject_피드.py
|
28c84c62698c8a349e6894f628b2c94152b2f5c3
|
[] |
no_license
|
LeeInHaeng/Study
|
ca8e3e2d4111dc3f742eefea541a67739d729e75
|
96bdb1d224702cebb8a6de6bbd596b075ee33f7b
|
refs/heads/master
| 2020-03-28T11:03:03.848316 | 2019-04-20T08:33:26 | 2019-04-20T08:33:26 | 148,172,460 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,975 |
py
|
import threading
import random
import time
customCnt = int(input("전체 고객의 수를 입력하세요 : "))
bank = int(input("은행 창구의 수를 입력하세요 : "))
sem = threading.Semaphore(bank) # 세마포 객체 생성, ~개의 쓰레드로 제한
vip_sem = threading.Semaphore(1)
class RestrictedArea(threading.Thread):
def run(self):
# self.getName() : Thread-1, Thread-2 ....
custom = self.getName().replace("Thread","Custom")
CounselingTime = random.randint(5,10) # 작업소요시간 5~10초 사이
msg =('[-]%s 상담중...\n' % custom)
msg2 = ('[+]%s 상담 완료... / 상담 소요시간 : %d초\n' % (custom,CounselingTime))
sem.acquire() # unlocked --> locked
print(msg) # 쓰레드만이 존재할수 있는 영역
time.sleep(CounselingTime)
sem.release() # locked --> unlocked
print(msg2)
class RestrictedAreaVIP(threading.Thread):
def run(self):
# self.getName() : Thread-1, Thread-2 ....
vip = self.getName().replace("Thread","[[ VIP ]]")
CounselingTime = random.randint(5,10) # 작업소요시간 5~10초 사이
msg =('[[[ [-]%s 상담중... ]]]\n' % vip)
msg2 = ('[[[ [+]%s 상담 완료... / 상담 소요시간 : %d초 ]]]\n' % (vip,CounselingTime))
vip_sem.acquire() # unlocked --> locked
print(msg) # 쓰레드만이 존재할수 있는 영역
time.sleep(CounselingTime)
vip_sem.release() # locked --> unlocked
print(msg2)
vipSecond = 0
vipCnt = 0
def vipCreate():
vips = []
global vipCnt
global vipSecond
global proEnd
while proEnd:
vipSecond += 1
time.sleep(1)
if vipSecond%10==0:
print('[[[ VIP 등장! ]]]\n')
vips.append(RestrictedAreaVIP())
vips[vipCnt].start()
vipCnt+=1
for vip in vips:
vip.join()
print('%d 명의 [ VIP ] 상담 완료' % (vipCnt))
customs = []
proEnd = True
start_time = time.time()
for i in range(customCnt): # ~개수의 쓰레드
customs.append(RestrictedArea())
print(customs[i].getName().replace("Thread","Custom")+" 번호표 뽑음")
th = threading.Thread(target=vipCreate)
th.start()
for cus in customs:
cus.start() # 쓰레드 시작
for cus in customs:
cus.join() # 종료대기
print(cus.getName().replace("Thread","Custom")+" 퇴장\n")
end_time = time.time()
proEnd = False
print('%d 명의 고객 상담 완료' % (i+1))
print('총 상담 처리 시간 : %lf초' % (end_time - start_time))
|
[
"[email protected]"
] | |
17382c82886a6f79476e82a3746c9219c595aa7c
|
e0045eec29aab56212c00f9293a21eb3b4b9fe53
|
/product/models/res_partner.py
|
9d36b9fcfddf842d91ea1defac36b635e4999208
|
[] |
no_license
|
tamam001/ALWAFI_P1
|
a3a9268081b9befc668a5f51c29ce5119434cc21
|
402ea8687c607fbcb5ba762c2020ebc4ee98e705
|
refs/heads/master
| 2020-05-18T08:16:50.583264 | 2019-04-30T14:43:46 | 2019-04-30T14:43:46 | 184,268,686 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,024 |
py
|
# -*- coding: utf-8 -*-
# Part of ALWAFI. See LICENSE file for full copyright and licensing details.
from odoo import fields, models, api
class Partner(models.Model):
_name = 'res.partner'
_inherit = 'res.partner'
# NOT A REAL PROPERTY !!!!
property_product_pricelist = fields.Many2one(
'product.pricelist', 'Pricelist', compute='_compute_product_pricelist',
inverse="_inverse_product_pricelist", company_dependent=False,
help="This pricelist will be used, instead of the default one, for sales to the current partner")
@api.multi
@api.depends('country_id')
def _compute_product_pricelist(self):
company = self.env.context.get('force_company', False)
res = self.env['product.pricelist']._get_partner_pricelist_multi(self.ids, company_id=company)
for p in self:
p.property_product_pricelist = res.get(p.id)
@api.one
def _inverse_product_pricelist(self):
pls = self.env['product.pricelist'].search(
[('country_group_ids.country_ids.code', '=', self.country_id and self.country_id.code or False)],
limit=1
)
default_for_country = pls and pls[0]
actual = self.env['ir.property'].get('property_product_pricelist', 'res.partner', 'res.partner,%s' % self.id)
# update at each change country, and so erase old pricelist
if self.property_product_pricelist or (actual and default_for_country and default_for_country.id != actual.id):
# keep the company of the current user before sudo
self.env['ir.property'].with_context(force_company=self.env.user.company_id.id).sudo().set_multi(
'property_product_pricelist',
self._name,
{self.id: self.property_product_pricelist or default_for_country.id},
default_value=default_for_country.id
)
def _commercial_fields(self):
return super(Partner, self)._commercial_fields() + ['property_product_pricelist']
|
[
"[email protected]"
] | |
cc9344aa970edeb891170348018e4f20b39b2bc2
|
1581bacbb7e7ed2f97aa1fb903ca0cf1a351be14
|
/lib/framework/__init__.py
|
e7df70e79c346c3778f3fc2b43c63503b14059ef
|
[] |
no_license
|
COOHU-Kr/SJVA3
|
f5b7287aaa658287b003300e1973b63d2f6ac567
|
ef68c085d980d0eb395da21f89cf999eeca8f980
|
refs/heads/main
| 2023-04-12T08:58:29.074902 | 2021-05-16T12:30:02 | 2021-05-16T12:30:02 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 6,488 |
py
|
# -*- coding: utf-8 -*-
version = '0.2.20.10'
#########################################################
# python
import os
import sys
import platform
path_app_root = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
path_data = os.path.join(path_app_root, 'data')
flag_system_loading = False
from datetime import datetime, timedelta
import json
import traceback
# third-party
from flask import Flask, redirect, render_template, Response, request, jsonify, send_file, send_from_directory, abort, Markup
from flask_sqlalchemy import SQLAlchemy
from flask_socketio import SocketIO, emit
from flask_login import LoginManager, login_user, logout_user, current_user, login_required
#from celery import Celery
# sjva 공용
from .init_args import args
from .py_version_func import *
from framework.class_scheduler import Scheduler
from framework.logger import get_logger
from .menu import init_menu
from .user import User
from .init_web import jinja_initialize
from .init_etc import check_api, make_default_dir, pip_install, config_initialize
#########################################################
# App 시작
#########################################################
## 기본디렉토리 생성
make_default_dir(path_data)
package_name = __name__.split('.')[0]
logger = get_logger(package_name)
try:
# Global
logger.debug('Path app root : %s', path_app_root)
logger.debug('Path app data : %s', path_data)
logger.debug('Platform : %s', platform.system())
app = Flask('sjva')
#try:
# from flask_restful import Api
# api = Api(app)
#except:
# logger.debug('NOT INSTALLED FLASK_RESTFUL')
app.secret_key = os.urandom(24)
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///data/db/sjva.db?check_same_thread=False'
app.config['SQLALCHEMY_BINDS'] = {'sjva':'sqlite:///data/db/sjva.db'}
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
app.config['config'] = {}
config_initialize('start')
pip_install()
db = SQLAlchemy(app, session_options={"autoflush": False})
scheduler = Scheduler(args)
#socketio = SocketIO(app, cors_allowed_origins="*") #, async_mode='gevent')
if args is not None and args.use_gevent == False:
socketio = SocketIO(app, cors_allowed_origins="*", async_mode='threading')
else:
socketio = SocketIO(app, cors_allowed_origins="*") #, async_mode='gevent')
from flask_cors import CORS
CORS(app)
login_manager = LoginManager()
login_manager.init_app(app)
login_manager.login_view = "login"
exit_code = -1
# app route가 되어 있는데 import 해야지만 routing이 됨
from .log_viewer import *
from .manual import *
# 추후 삭제
USERS = {"sjva"+version : User("sjva"+version, passwd_hash="sjva"+version),}
# System plugin import
##########################################
from .init_celery import celery
import framework.common.celery
##########################################
# 시스템 플러그인
# 시스템 DB부터 만들자.
import system
from system.model import ModelSetting as SystemModelSetting
# epg 없이 klive 만 있고 db 파일이 없을 때 아예 다른 모듈이 로딩안되는 문제 발생
# klive에서 epg 칼럼을 참조해서 그러는것 같음. 방어코드이나 확인못함
try:
db.create_all()
except Exception as exception:
logger.error('CRITICAL db.create_all()!!!')
logger.error('Exception:%s', exception)
logger.error(traceback.format_exc())
config_initialize('auth')
system.plugin_load()
flag_system_loading = True # 로그레벨에서 사용. 필요한가??
if app.config['config']['run_by_init_db']:
logger.debug('================================================')
logger.debug('Run by init db.. exit')
sys.exit()
app.register_blueprint(system.blueprint)
config_initialize('system_loading_after')
################################################################
# 아래는 코드 동작.. 위는 import만
plugin_menu = []
plugin_menu.append(system.menu)
plugin_instance_list = {}
jinja_initialize(app)
######################################################
# 플러그인
system.LogicPlugin.custom_plugin_update()
from .init_plugin import plugin_init
plugin_init()
logger.debug('### plugin loading completed')
#####################################################
# 메뉴
init_menu(plugin_menu)
system.SystemLogic.apply_menu_link()
logger.debug('### menu loading completed')
app.config['config']['port'] = 0
if sys.argv[0] == 'sjva.py' or sys.argv[0] == 'sjva3.py':
try:
app.config['config']['port'] = SystemModelSetting.get_int('port')
if app.config['config']['port'] == 19999 and app.config['config']['running_type'] == 'docker' and not os.path.exists('/usr/sbin/nginx'):
SystemModelSetting.set('port', '9999')
app.config['config']['port'] = 9999
except:
app.config['config']['port'] = 9999
if args is not None:
if args.port is not None:
app.config['config']['port'] = args.port
app.config['config']['repeat'] = args.repeat
app.config['config']['use_celery'] = args.use_celery
if platform.system() == 'Windows':
app.config['config']['use_celery'] = False
app.config['config']['use_gevent'] = args.use_gevent
logger.debug('### config ###')
logger.debug(json.dumps(app.config['config'], indent=4))
logger.debug('### LAST')
logger.debug('### PORT:%s', app.config['config']['port'])
logger.debug('### Now you can access SJVA by webbrowser!!')
except Exception as exception:
logger.error('Exception:%s', exception)
logger.error(traceback.format_exc())
# 반드시 마지막에
#import init_route
from .init_route import *
from .util import Util
try:
from tool_expand import TorrentProcess
TorrentProcess.server_process(None, category='None')
except:
pass
"""
try:
from lib_metadata import *
except:
pass
"""
|
[
"[email protected]"
] | |
c132e2ae4180e29a9a299525b1e4ec34899ea39a
|
d2eaacf8189655051d0d078e39a4d924df215b96
|
/termtables/__about__.py
|
699d5d53309c216bc4352884aae952bc2dc24cd2
|
[
"MIT"
] |
permissive
|
jayvdb/termtables
|
4fdee11ec5b713c542c0d26aa00be0103db55787
|
5508afa6d813081355d95d80f5471f2d9ada738a
|
refs/heads/master
| 2020-08-20T12:01:02.779004 | 2019-09-26T08:48:31 | 2019-09-26T08:48:31 | 216,020,488 | 0 | 0 |
MIT
| 2019-10-18T12:36:25 | 2019-10-18T12:36:22 | null |
UTF-8
|
Python
| false | false | 267 |
py
|
__author__ = "Nico Schlömer"
__email__ = "[email protected]"
__copyright__ = "Copyright (c) 2019 {} <{}>".format(__author__, __email__)
__license__ = "License :: OSI Approved :: MIT License"
__version__ = "0.1.0"
__status__ = "Development Status :: 4 - Beta"
|
[
"[email protected]"
] | |
6785637051cfb8ea05984b5fe150317fe94fb5fb
|
f0d713996eb095bcdc701f3fab0a8110b8541cbb
|
/zRm6YDfQHoesdc3rb_23.py
|
f98bf044cde1ef882771ddac94ca7a7d1163f5aa
|
[] |
no_license
|
daniel-reich/turbo-robot
|
feda6c0523bb83ab8954b6d06302bfec5b16ebdf
|
a7a25c63097674c0a81675eed7e6b763785f1c41
|
refs/heads/main
| 2023-03-26T01:55:14.210264 | 2021-03-23T16:08:01 | 2021-03-23T16:08:01 | 350,773,815 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 691 |
py
|
"""
Let there be a square matrix, where each square is a rectangle, and a
combination of more squares are also rectangles. To find the number of
rectangles, Pete sat down and started counting... but that's highly
inefficient.
Create a function that takes the order of the matrix as input and returns the
number of rectangles in them.
### Examples
rectangles(1) ➞ 1
rectangles(2) ➞ 9
rectangles(3) ➞ 36
### Notes
* The input will always be an integer.
* Number of rectangles are given by: `((n(n+1))/2)^2`
* Watch the video listed in the **Resources** tab to get three different formulas.
"""
def rectangles(step):
return step**2*(step+1)**2/4
|
[
"[email protected]"
] | |
3d72b403ae7bfb81b3d7bc29b76b7c477254a591
|
d25a8b0e9f8a4f48504a49e094085f92d8d8e08a
|
/doc_db/db_utility.py
|
7e8a6fd740a58b26dbba23e873fb50e127b99381
|
[
"MIT"
] |
permissive
|
William-Lake/doc_db
|
c7ad963bc0ff3a75b9a690bf44025e2aa6d1773b
|
022b3e08d10d104fd838c7a094091e78d771ebe1
|
refs/heads/master
| 2020-03-31T01:34:59.815672 | 2019-01-30T18:36:39 | 2019-01-30T18:36:39 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 527 |
py
|
import logging
from peewee import *
from database import *
class DatabaseUtility(object):
def __init__(self):
self.logger = logging.getLogger()
self.logger.info('DatabaseUtility Loaded')
def get_saved_doc_names(self): return [Doc.name for doc in Doc.select(Doc.name)]
def get_doc_by_name(self,name): return Doc.select().where(Doc.name == name)
def save_docs(self,name_data_dict):
for name in name_data_dict.keys(): Doc.create(name=name,data=name_data_dict[name])
|
[
"noreply"
] |
noreply
|
45392b0540f4de029b377456a799b8585ff10c7b
|
764ce53fd708bb3f81d67cc9a2366265c9a685b9
|
/Week1/Factors.py
|
aa00fbffd99894f7d3cca1da5f13b82509bf51a8
|
[] |
no_license
|
Vaishnavi-Gajinkar/Bridgelabz
|
3d17b8399432ac5643059e822ccad9a90f919e9f
|
e51551ab675dbb5444ba222cc88ac05fbeab49d2
|
refs/heads/master
| 2020-12-28T02:45:18.517627 | 2020-03-09T13:42:37 | 2020-03-09T13:42:37 | 238,153,294 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 117 |
py
|
print("Enter a number")
num=int(input())
l=[]
for i in range(1, num+1):
if num%i==0:
l.append(i)
print(l)
|
[
"[email protected]"
] | |
d4128909b323bf4c9ffdb7a50cb441e3b45941ec
|
2eff7fdb6b4d61341c66e6afbf1ba63c67394d72
|
/.history/codes_20201115144555.py
|
079c1938cbf7e8425a8f965983bbb47a1a63132e
|
[] |
no_license
|
E-STAT/speech_datacamp
|
6b07390954b733d78768b24e18002579d744b58a
|
8999629b0053e8662fc54ebb161f3a8a4f74d09d
|
refs/heads/master
| 2023-01-21T06:13:40.909833 | 2020-11-23T12:32:42 | 2020-11-23T12:32:42 | 315,310,078 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,206 |
py
|
import wave
import numpy as np
import matplotlib.pyplot as plt
# Create audio file wave object
good_morning = wave.open('good_morning.wav', 'r')
# Read all frames from wave object
signal_gm = good_morning.readframes(-1)
# View first 10
print(signal_gm[:10])
########################Converting Soundwave byte to integers##########
# Convert good morning audio bytes to integers
soundwave_gm = np.frombuffer(signal_gm, dtype='int16')
# View the first 10 sound wave values
print(soundwave_gm[:10])
# Get the sound wave frame rate
framerate_gm = good_morning.getframerate()
# Find the sound wave timestamps
time_gm = np.linspace(start=0,
stop=len(soundwave_gm/framerate_gm), num=len(soundwave_gm))
# Print the first 10 timestamps
print(time_gm[:10])
#######plotting the wave
# Setup the title and axis titles
plt.title('Good Afternoon vs. Good Morning')
plt.ylabel('Amplitude')
plt.xlabel('Time (seconds)')
# Add the Good Afternoon data to the plot
plt.plot(time_ga, soundwave_ga, label='Good Afternoon')
# Add the Good Morning data to the plot
plt.plot(time_gm, soundwave_gm, label='Good Morning',
# Set the alpha variable to 0.5
alpha=0.5)
plt.legend()
plt.show()
|
[
"[email protected]"
] | |
0d8aded2a3e9f49e4ad86a5be226ec743735d1d2
|
f9d564f1aa83eca45872dab7fbaa26dd48210d08
|
/huaweicloud-sdk-dlf/huaweicloudsdkdlf/v1/model/list_resources_request.py
|
5716faaafbd2ec1d1739014329eae634757f0efb
|
[
"Apache-2.0"
] |
permissive
|
huaweicloud/huaweicloud-sdk-python-v3
|
cde6d849ce5b1de05ac5ebfd6153f27803837d84
|
f69344c1dadb79067746ddf9bfde4bddc18d5ecf
|
refs/heads/master
| 2023-09-01T19:29:43.013318 | 2023-08-31T08:28:59 | 2023-08-31T08:28:59 | 262,207,814 | 103 | 44 |
NOASSERTION
| 2023-06-22T14:50:48 | 2020-05-08T02:28:43 |
Python
|
UTF-8
|
Python
| false | false | 3,110 |
py
|
# coding: utf-8
import six
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class ListResourcesRequest:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'workspace': 'str'
}
attribute_map = {
'workspace': 'workspace'
}
def __init__(self, workspace=None):
"""ListResourcesRequest
The model defined in huaweicloud sdk
:param workspace: 工作空间id
:type workspace: str
"""
self._workspace = None
self.discriminator = None
if workspace is not None:
self.workspace = workspace
@property
def workspace(self):
"""Gets the workspace of this ListResourcesRequest.
工作空间id
:return: The workspace of this ListResourcesRequest.
:rtype: str
"""
return self._workspace
@workspace.setter
def workspace(self, workspace):
"""Sets the workspace of this ListResourcesRequest.
工作空间id
:param workspace: The workspace of this ListResourcesRequest.
:type workspace: str
"""
self._workspace = workspace
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ListResourcesRequest):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
[
"[email protected]"
] | |
64f66b71e952e997146063c90f70b2ed49cfa209
|
38a9e2780ac8b800c336207a5c0a621eb1277a53
|
/src/olympus/analyzer/__init__.py
|
839899b2886be83bce8b5b9e17bc248f3c111b4b
|
[
"MIT"
] |
permissive
|
priyansh-1902/olympus
|
2454850413bb0562a1bfe20ab35fa7e770367323
|
f57ad769918c0d5d805c439ab5ffbd180af698fa
|
refs/heads/main
| 2023-06-21T05:58:49.118264 | 2021-08-07T22:19:41 | 2021-08-07T22:19:41 | 342,454,516 | 0 | 0 |
MIT
| 2021-08-07T22:19:41 | 2021-02-26T03:43:08 |
Python
|
UTF-8
|
Python
| false | false | 54 |
py
|
#!/usr/bin/env python
from .analyzer import Analyzer
|
[
"[email protected]"
] | |
c9e642a44b968079964309823b5b11beb0050205
|
905020fce75b4b63517ec31c601e721f5c260cd1
|
/Тестирование is_prime().py
|
f87456f26c0167831c8087dac86c87e727c705f9
|
[] |
no_license
|
Dimaed90800/Python_Y
|
7858ad46309281a89c5c1e83a0f09030996182a4
|
04092b854605cb05df439eeeb52003e585bb5a29
|
refs/heads/main
| 2023-01-24T04:11:17.858281 | 2020-11-17T20:42:45 | 2020-11-17T20:42:45 | 313,731,672 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 569 |
py
|
from yandex_testing_lesson import is_prime
ans = ''
prime_nums = ['2', '3', '5', '7', '11', '13', '17', '19', '23', '29', '31',
'83', '89', '97', '101', '103', '107', '109']
for i in prime_nums:
if is_prime(i) in prime_nums:
ans = 'YES'
else:
ans = 'NO'
complicated = ['6', '9', '144', '1075', '6111']
for i in complicated:
if is_prime(i) in complicated:
ans = 'NO'
else:
ans = 'YES'
if is_prime('0') != 'ValueError' or is_prime('1') != 'ValueError':
ans = 'NO'
print('ans')
|
[
"[email protected]"
] | |
9036422dcea82e711dfe2869f7bd5fd22ae042fc
|
38fb82ff9f5ecee937cb950889335402aba2c7a6
|
/route/migrations/0003_suggest_description.py
|
18fdd3df086e6356dcde63674aefc2a8cd5563c2
|
[] |
no_license
|
squallcs12/vivu2017
|
abe9c42cfd831de3411c1b986b6d5c4c4099808a
|
c6a3f37ee238464c9bf9de61a1c6e9f5be21f40f
|
refs/heads/master
| 2021-01-11T22:36:21.289404 | 2017-02-02T08:11:19 | 2017-02-02T08:11:19 | 78,998,307 | 0 | 0 | null | 2017-02-02T08:11:20 | 2017-01-15T04:55:01 |
Python
|
UTF-8
|
Python
| false | false | 486 |
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-01-24 05:04
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('route', '0002_auto_20170124_0431'),
]
operations = [
migrations.AddField(
model_name='suggest',
name='description',
field=models.TextField(default=''),
preserve_default=False,
),
]
|
[
"[email protected]"
] | |
06080b3fb0a23b3b4bc63e89f195003d3c5f50f8
|
9a9088713c917ac47c0b8713d6969b2cfcdbadac
|
/leetcode_python/829.Consecutive_Numbers_Sum.py
|
2b564ef821aabab0906660d0a0f99482b9bb0ad7
|
[] |
no_license
|
zihuaweng/leetcode-solutions
|
615fdcb9178b19764b4d30bcfe65a9f785e77270
|
e431ff831ddd5f26891e6ee4506a20d7972b4f02
|
refs/heads/master
| 2023-02-06T03:58:26.413711 | 2020-12-26T05:23:03 | 2020-12-26T05:23:03 | 311,418,790 | 4 | 7 | null | null | null | null |
UTF-8
|
Python
| false | false | 498 |
py
|
#!/usr/bin/env python3
# coding: utf-8
# Time complexity: O()
# Space complexity: O()
# https://leetcode.com/problems/consecutive-numbers-sum/
# https://leetcode.com/problems/consecutive-numbers-sum/discuss/129015/5-lines-C%2B%2B-solution-with-detailed-mathematical-explanation.
class Solution:
def consecutiveNumbersSum(self, N: int) -> int:
count = 1
for i in range(2, int(N**0.5+1)):
if (N-(i*i + i)/2) % i == 0:
count += 1
return count
|
[
"[email protected]"
] | |
4b3fe77e2e0a1432c7500cff79ec5504e9928a0f
|
5a52ccea88f90dd4f1acc2819997fce0dd5ffb7d
|
/alipay/aop/api/domain/AlipayOpenSearchboxDowngradePreconsultModel.py
|
6939163275bc9a2554845ced061bb3ab165d749c
|
[
"Apache-2.0"
] |
permissive
|
alipay/alipay-sdk-python-all
|
8bd20882852ffeb70a6e929038bf88ff1d1eff1c
|
1fad300587c9e7e099747305ba9077d4cd7afde9
|
refs/heads/master
| 2023-08-27T21:35:01.778771 | 2023-08-23T07:12:26 | 2023-08-23T07:12:26 | 133,338,689 | 247 | 70 |
Apache-2.0
| 2023-04-25T04:54:02 | 2018-05-14T09:40:54 |
Python
|
UTF-8
|
Python
| false | false | 883 |
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class AlipayOpenSearchboxDowngradePreconsultModel(object):
def __init__(self):
self._box_id = None
@property
def box_id(self):
return self._box_id
@box_id.setter
def box_id(self, value):
self._box_id = value
def to_alipay_dict(self):
params = dict()
if self.box_id:
if hasattr(self.box_id, 'to_alipay_dict'):
params['box_id'] = self.box_id.to_alipay_dict()
else:
params['box_id'] = self.box_id
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = AlipayOpenSearchboxDowngradePreconsultModel()
if 'box_id' in d:
o.box_id = d['box_id']
return o
|
[
"[email protected]"
] | |
5170e8a9d1354e33d0bbb1a8e191b448f5397bdd
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p03167/s315916496.py
|
8af060443edca9f35df49dadab2a2d70859c258b
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 508 |
py
|
#dt = {} for i in x: dt[i] = dt.get(i,0)+1
import sys;input = sys.stdin.readline
inp,ip = lambda :int(input()),lambda :[int(w) for w in input().split()]
M = 10**9+7
h,w = ip()
grid = [input().strip() for i in range(h)]
dp = [[0]*w for i in range(h)]
dp[0][0] = 1
for i in range(h):
for j in range(w):
if i-1 >= 0 and grid[i-1][j] == '.':
dp[i][j] += dp[i-1][j]
if j-1 >= 0 and grid[i][j-1] == '.':
dp[i][j] += dp[i][j-1]
dp[i][j] %= M
print(dp[-1][-1]%M)
|
[
"[email protected]"
] | |
568313b9e57d494a2f69cf8e3b087d4be843b081
|
d0fe291905e1be001b3407c38e4d9702e94d7d42
|
/ar/register_device.py
|
c5ee21483d7a3f300f479fe1124ef28a36308029
|
[] |
no_license
|
storvik/pythonremote
|
f508a4913fe653e51006a8456bbbf5b0aced9fd7
|
a880a585a70c8a853b736ecb09f0712c96f2614b
|
refs/heads/master
| 2021-01-01T06:33:18.628235 | 2015-09-21T04:46:34 | 2015-09-21T04:46:34 | 25,593,912 | 5 | 3 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,210 |
py
|
import os
import string
import requests
import urllib
from .gcm import Gcm_req
from .color import color, green, red, yellow
from .load_device import load_device
from .load_computer import load_computer
from .unshorten_url import unshorten_url
# Register new device to autoremotedevices.txt
def register_device(config_path, host_name):
if os.path.isfile(config_path + 'autoremotedevices.txt'):
print(color(green,"Found registered devices. Continuing server startup.."))
else:
print(color(yellow,"Did not find any devices."))
answr = input(color(yellow,"You want to add a device? [y/n] "))
if answr in ['y','yes','Y','YES']:
register_newdevice(config_path, host_name)
else:
print(color(red,"autoremote is useless with no devices registered. Aborting..."))
exit(-1)
# Register new device
def register_newdevice(config_path, host_name):
fd = open(config_path + 'autoremotedevices.txt', 'a+') # Opening device file
# Todo: Check for existing name or key
name = input("Enter name for new device: ")
key = input("Enter personal key or characters after goo.gl/: ")
if len(key) > 5:
key_raw = unshorten_url('https://goo.gl/' + key)
if key_raw == key:
print(color(red,"Could not unshorten URL. Try with regular key if problem continues.."))
answr = input(color(yellow,"You want to try again? [y/n] "))
else:
key = key_raw.split("key=")[1]
register_sendtodevice(config_path, key)
fd.write(name+"\n"+key+"\n")
print(color(green,"Successfully added "+name+" to device list.."))
answr = input(color(yellow,"You want to add another device? [y/n] "))
else:
register_sendtodevice(config_path, key)
fd.write(name+"\n"+key+"\n")
print(color(green,"Successfully added "+name+" to device list.."))
answr = input(color(yellow,"You want to add another device? [y/n] "))
fd.close
if answr in ['y','yes','Y','YES']:
register_newdevice(config_path, host_name)
# Register computer on device
def register_sendtodevice(config_path, key):
computer = load_computer(config_path)
gcm = Gcm_req(key, computer["sender"], computer) # GCM register device message
headers = {'Content-Type': 'application/x-www-form-urlencoded'}
r = requests.post("https://autoremotejoaomgcd.appspot.com/sendrequest", data=urllib.parse.urlencode(gcm.__dict__), headers=headers)
if r.text == "OK": # If message is sent
print(color(green,"Register device request successfully sent to device!"))
else:
print(color(red,"Couldn't send request. Aborting..."))
exit(-1)
def register_updatedevice(config_path):
if os.path.isfile('autoremotedevices.txt'):
devlist = load_device(config_path)
for i in range(1, len(devlist)-1, 2):
register_sendtodevice(config_path,devlist[i])
print(color(green,"Updated information on devices.."))
else:
print(color(yellow,"No 'autoremotedevices.txt', nothing done.."))
|
[
"="
] |
=
|
09a981fcec8dd6271c17d600235d3a7be9caba06
|
53fab060fa262e5d5026e0807d93c75fb81e67b9
|
/backup/user_325/ch42_2020_03_26_19_44_19_862731.py
|
030854db3859bd46deaf4ce4de9d234c5a682e3a
|
[] |
no_license
|
gabriellaec/desoft-analise-exercicios
|
b77c6999424c5ce7e44086a12589a0ad43d6adca
|
01940ab0897aa6005764fc220b900e4d6161d36b
|
refs/heads/main
| 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 194 |
py
|
palavra = input("Digite sua palavra: ")
lista = []
i = 0
while palavra != "fim":
palavra = input("Digite sua palavra: ")
if palavra[i] == "a":
lista.append(palavra[i])
i += 1
|
[
"[email protected]"
] | |
2cc97c38877aae8391444fa04b9a9e8252833132
|
2a1b8a671aceda6bc446f8ce26400aa84fa444a6
|
/Packs/CommonScripts/Scripts/ExtractHTMLTables/ExtractHTMLTables.py
|
992e12ef7d173e3f43d201634fe78157ba9c1345
|
[
"MIT"
] |
permissive
|
demisto/content
|
6d4722d46f0ff0beea2748e9f7de585bf91a78b4
|
890def5a0e0ae8d6eaa538148249ddbc851dbb6b
|
refs/heads/master
| 2023-09-04T00:02:25.618032 | 2023-09-03T21:56:22 | 2023-09-03T21:56:22 | 60,525,392 | 1,023 | 1,921 |
MIT
| 2023-09-14T20:55:24 | 2016-06-06T12:17:02 |
Python
|
UTF-8
|
Python
| false | false | 2,217 |
py
|
import demistomock as demisto # noqa: F401
from bs4 import BeautifulSoup
from CommonServerPython import * # noqa: F401
def extract_html_table(html, indexes):
soup = BeautifulSoup(html, 'html.parser')
tables = []
for index, tab in enumerate(soup.find_all('table')):
if len(indexes) > 0 and index not in indexes and str(index) not in indexes:
continue
table = []
headers = []
# Check if there are headers and use them
for th in tab.find_all('th'):
headers.append(th.text)
for tr in tab.find_all('tr'):
tds = tr.find_all('td')
# This is a data row and not header row
if len(tds) > 0:
# Single value in a table - just create an array of strings ignoring header
if len(tds) == 1:
table.append(tds[0].text)
# If there are 2 columns and no headers, treat as key-value (might override values if same key in first column)
elif len(tds) == 2 and len(headers) == 0:
if type(table) == list:
table = {} # type: ignore
table[tds[0].text] = tds[1].text
else:
row = {}
if len(headers) > 0:
for i, td in enumerate(tds):
row[headers[i]] = td.text
else:
for i, td in enumerate(tds):
row['cell' + str(i)] = td.text
table.append(row)
if len(table) > 0:
tables.append(table)
if len(tables) > 0:
return({
'Type': entryTypes['note'],
'Contents': 'Found {} tables in HTML.'.format(len(tables)),
'ContentsFormat': formats['text'],
'EntryContext': {'HTMLTables': tables if len(tables) > 1 else tables[0]}
})
else:
return 'Did not find tables in HTML.'
def main():
html = demisto.getArg('html')
indexes = argToList(demisto.getArg('indexes'))
demisto.results(extract_html_table(html, indexes))
if __name__ in ['__main__', 'builtin', 'builtins']:
main()
|
[
"[email protected]"
] | |
785c3cc0af72cc2637877e9c6a612a0a81a3389b
|
30eb942d849dab9250bbd541a8d7128d15be8556
|
/host/EcoBTCentralManagerWorker.py
|
20eb62f8ea031115606c95ce53db5940223fcd9f
|
[] |
no_license
|
cjhuo/Lab_projects-EcoBT-HOST-N-SERVER
|
55e42b1e4d5f88bc978f5b6c07ab3798626a88fa
|
396cb823ed74552985f4afa157fe3887afe48b65
|
refs/heads/master
| 2020-06-06T12:25:23.111232 | 2014-01-31T07:03:02 | 2014-01-31T07:03:02 | 26,940,140 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 10,181 |
py
|
'''
@summary:
monitor the input queue where command sent from UI is stored, and send to where the worker belongs to\
A manager worker has members as below:
instance of central manager,
instance of central manager delegate,
list of discovered peripheral workers
'''
from Foundation import *
#from PyObjCTools import AppHelper
from IOBluetooth import *
from objc import *
from PyObjCTools import AppHelper
from Queue import Queue
import time
from EcoBTWorker import EcoBTWorker
from EcoBTCentralManagerDelegateWorker import EcoBTCentralManagerDelegateWorker
from EcoBTPeripheralWorker import EcoBTPeripheralWorker
from Peripheral import Peripheral
class EcoBTCentralManagerWorker(NSObject, EcoBTWorker):
def init(self):
EcoBTWorker.__init__(self)
self.peripheralWorkers = []
# initialize CMdelegate worker
self.delegateWorker = EcoBTCentralManagerDelegateWorker()
self.delegateWorker.setEcoBTWorker(self)
self.delegateWorker.start()
self.pNum = 0 # this number is for each peripheral to identify themselves
'''
0: if down,
1: if up but not startScan,
2: up and startScan,
3: has node connected, still scanning
4: stopScan, but has peripheral connected
'''
self.state = 0
# initialize manager with delegate
NSLog("Initialize CBCentralManager Worker")
self.manager = CBCentralManager.alloc().initWithDelegate_queue_(self, nil)
return self
def setSockets(self, sockets):
self.sockets = sockets
self.delegateWorker.setGlobalSockets(sockets)
def stop(self): # clean up
NSLog("Cleaning Up")
for w in self.peripheralWorkers:
w.delegateWorker.getQueue().put('stop')
w.delegateWorker.join()
self.delegateWorker.getQueue().put('stop')
self.delegateWorker.join()
def connectPeripheral(self, peripheral):
#NSLog("Trying to connnect peripheral %@", peripheral._.UUID)
options = NSDictionary.dictionaryWithObject_forKey_(
NSNumber.numberWithBool_(YES),
CBConnectPeripheralOptionNotifyOnDisconnectionKey
)
self.manager.connectPeripheral_options_(peripheral, options)
def cancelPeripheralConnection(self, peripheral):
if type(peripheral) == Peripheral:
self.manager.cancelPeripheralConnection_(peripheral.instance)
NSLog("DISCONNECTING FROM PERIPHERAL %@", peripheral.address)
else:
self.manager.cancelPeripheralConnection_(peripheral)
def cancelAllConnectionExcept(self, peripheral):
for worker in self.peripheralWorkers:
if worker.peripheral.address != peripheral.address:
self.cancelPeripheralConnection(worker.peripheral)
def cancelAllConnection(self):
for worker in self.peripheralWorkers:
self.cancelPeripheralConnection(worker.peripheral)
def findPeripheralWorkerByAddress(self, address):
for worker in self.peripheralWorkers:
if worker.peripheral.address == address:
return worker
return None
def startScan(self):
NSLog("STARTING SCAN")
options = NSDictionary.dictionaryWithObject_forKey_(
NSNumber.numberWithBool_(YES),
CBCentralManagerScanOptionAllowDuplicatesKey
)
self.manager.scanForPeripheralsWithServices_options_(
nil,
#[CBUUID.UUIDWithString_(u"180D"), CBUUID.UUIDWithString_(u"7780"), CBUUID.UUIDWithString_(u"7770")],
options
)
def stopScan(self):
NSLog("stop scan")
self.manager.stopScan()
def sendState(self):
data = {'type': 'state',
'value': self.state}
self.delegateWorker.getQueue().put(data)
def sendPeripheralList(self):
data = {'type': 'peripheralList',
'value': []
}
for worker in self.peripheralWorkers:
p = {'name': worker.peripheral.name,
'rssi': worker.peripheral.rssi,
'number': worker.peripheral.number,
'address': worker.peripheral.address,
'type': worker.peripheral.type
}
data['value'].append(p)
self.delegateWorker.getQueue().put(data)
def sendFailMessage(self, message):
msg = {
'type': 'message',
'value': message
}
self.delegateWorker.getQueue().put(msg)
# CBCentralManager delegate methods
def centralManagerDidUpdateState_(self, central):
ble_state = central._.state
if ble_state == CBCentralManagerStateUnkown:
NSLog("state unkown")
self.state = 0
self.sendFailMessage("state unkown")
elif ble_state == CBCentralManagerStateResetting:
NSLog("resetting")
self.state = 0
self.sendFailMessage("resetting")
elif ble_state == CBCentralManagerStateUnsupported:
NSLog("BLE is not supported")
self.state = 0
self.sendFailMessage("BLE is not supported")
self.sendState()
#AppHelper.stopEventLoop()
elif ble_state == CBCentralManagerStateUnauthorized:
NSLog("unauthorized")
self.state = 0
self.sendFailMessage("unauthorized")
elif ble_state == CBCentralManagerStatePoweredOff:
NSLog("power off")
self.state = 0
self.sendFailMessage("power off")
elif ble_state == CBCentralManagerStatePoweredOn:
NSLog("ble is ready!!")
self.state = 1
self.sendState()
'''
# for test purpose
self.startScan()
self.state = 2
self.sendState()
'''
#self.startScan()
else:
NSLog("Can't get Central Manager's state!")
raise Exception
'''
Invoked when the central discovers a EcoBT node while scanning.
add peripheral list and send to UI
'''
def centralManager_didDiscoverPeripheral_advertisementData_RSSI_(self,
central,
peripheral,
advtisement_data,
rssi):
NSLog("Found Peripheral %@ %@", peripheral._.name, rssi)
NSLog("%@", advtisement_data)
# update self's state and send to UI
self.state = 3
self.sendState()
# check if the peripheral has already been added to the list
found = self.findWorkerForPeripheralInstance(peripheral)
if found == False:
# initializae peripheral worker when peripheral is added to the list
worker = EcoBTPeripheralWorker.alloc().init()
worker.setSockets(self.sockets)
#print 'Peripheral socket: ', worker.sockets
worker.setPeripheral(Peripheral(peripheral, peripheral._.name, rssi, self.pNum))
self.pNum += 1
self.peripheralWorkers.append(worker)
# for test
self.connectPeripheral(peripheral)
self.startScan()
#send peripherals list to UI !!!!!!!
#print "Connect, stopScan"
#self.stopScan()
def centralManager_didRetrivePeripherals_(self, central, peripherals):
NSLog("Retrive peripherals")
def centralManager_didConnectPeripheral_(self, central, peripheral):
# Update UI
NSLog("Connected to peripheral %@", peripheral._.name)
#delegate.sockets = self.sockets
NSLog("number of peripherals: %@", len(self.peripheralWorkers))
w = self.findWorkerForPeripheralInstance(peripheral)
if w != False:
# start peripheral's delegate worker only when it's connected
w.peripheral.instance.setDelegate_(w)
w.delegateWorker.start()
# for test
NSLog("DISCOVERING SERVICES FOR NODE %@", w.peripheral.address)
w.discoverServices()
else:
NSLog("error, peripheral hasn't been added to watch list")
raise Exception
#peripheral.discoverServices_(None)
'''
lost connection from EcoBT node
'''
def centralManager_didDisconnectPeripheral_error_(self,
central,
peripheral,
error):
worker = self.findWorkerForPeripheralInstance(peripheral)
# dispose worker and remove peripheral
if worker != False:
worker.stop()
self.peripheralWorkers.remove(worker)
NSLog("Disconnect from Peripheral No %@", worker.peripheral.number)
self.sendFailMessage("Disconnect from Peripheral %s" % worker.peripheral.name)
else:
NSLog("Didn't find the peripheral to remove from peripherhal list!")
# update UI
self.sendPeripheralList()
#AppHelper.stopEventLoop()
#sys.exit()
def centralManager_didFailToConnectPeripheral_error_(self,
central,
peripheral,
error):
NSLog("Fail to Connect")
def findWorkerForPeripheralInstance(self, peripheralInstance):
for w in self.peripheralWorkers:
if w.peripheral.instance == peripheralInstance:
return w
return False # not found
|
[
"[email protected]"
] | |
d6309c0542fa1789c852ee9441c523c99edc7a90
|
bf57ccabb586e624ec33f0aa2cdce298c99459e1
|
/python/lsst/meas/extensions/simpleShape/__init__.py
|
185b138198bdc79b230f2ed8bc985197b1c91488
|
[] |
no_license
|
jonathansick-shadow/meas_extensions_simpleShape
|
3ac60daef4f5f878cd13bb341630af1ea3d2934f
|
556670ae1ca6d4c6fbbef52c4837fafb245c00c3
|
refs/heads/master
| 2020-04-05T23:09:11.347445 | 2014-06-20T22:43:32 | 2014-06-20T22:43:32 | 54,810,102 | 0 | 0 | null | 2016-03-27T03:00:37 | 2016-03-27T03:00:36 | null |
UTF-8
|
Python
| false | false | 214 |
py
|
from .simpleShapeLib import *
from .version import * # generated by sconsUtils
from lsst.meas.algorithms.algorithmRegistry import AlgorithmRegistry
AlgorithmRegistry.register("shape.simple", SimpleShapeControl)
|
[
"[email protected]"
] | |
b2f419d19da9a9fcdc4e997e3782e947b7dfb813
|
46fb9eea28f92c637273b2065cb8c38abe5a2007
|
/tbjh/jupyterhub_config.py
|
746ca8fb6be519a7ce3ad49cc49e9b5d278a5c76
|
[
"BSD-3-Clause"
] |
permissive
|
yuvipanda/the-batchiest-jupyterhub
|
0337838e2f094bcee21c6a8a0a1ed261c4e3970d
|
c3d34f55f844f307850e879ce8e464e7f367d1e3
|
refs/heads/master
| 2022-10-02T20:23:07.252736 | 2020-06-04T13:04:21 | 2020-06-04T13:04:21 | 264,124,457 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,237 |
py
|
import json
import pwd
import os
import pathlib
import asyncio
import subprocess
from glob import glob
from jupyterhub_traefik_proxy import TraefikTomlProxy
from tbjh import constants
# Don't kill servers when JupyterHub restarts
c.JupyterHub.cleanup_servers = False
# Traefik should be started by systemd
c.JupyterHub.proxy_class = TraefikTomlProxy
c.TraefikTomlProxy.should_start = False
with open(constants.TRAEFIK_CREDS_PATH) as f:
creds = json.load(f)
if 'version' not in creds or creds['version'] != 'v1':
# FIXME: Better error message
raise ValueError("Invalid traefik-creds.json file")
c.TraefikTomlProxy.traefik_api_username = creds['username']
c.TraefikTomlProxy.traefik_api_password = creds['password']
async def check_call_process(cmd):
"""
Asynchronously execute a process, throw an error when it fails
"""
proc = await asyncio.create_subprocess_exec(
*cmd,
stdout=asyncio.subprocess.PIPE, stderr=asyncio.subprocess.PIPE
)
stdout, stderr = await proc.communicate()
if proc.returncode != 0:
raise subprocess.CalledProcessError(
returncode=proc.returncode,
cmd=cmd,
stderr=stderr,
output=stdout
)
# Make sure there's a conda install
async def pre_spawn_hook(spawner):
username = spawner.user.name
homedir = pathlib.Path(pwd.getpwnam(username).pw_dir)
if (homedir / 'conda').exists():
# If 'conda' dir exists, assume we are good
# In the future, we might have more sophisticated checks
return
# Install miniforge
# FIXME: Show this as progress in spawn call
await check_call_process([
'/bin/sh',
str(constants.MINIFORGE_INSTALLER_PATH),
'-b', '-p', str(homedir / 'conda'),
])
# Install packages we want
await check_call_process([
str(homedir / 'conda/bin/conda'),
'env', 'create',
'-f', str(constants.NOTEBOOK_ENVIRONMENT_YML)
])
c.Spawner.pre_spawn_hook = pre_spawn_hook
# Load arbitrary .py config files if they exist.
# This is our escape hatch
extra_configs = sorted(glob(os.path.join(constants.JUPYTERHUB_CONFIG_D_DIR, '*.py')))
for ec in extra_configs:
load_subconfig(ec)
|
[
"[email protected]"
] | |
f2149e2231b00c5ed68eeabea58a9727811fe6b8
|
a0eb6744e6f7f509b96d21f0bc8b3f8387f6861c
|
/notebook/os_stat.py
|
2ac10452705a12d3368f8ecb6e8cc6cfde8f5ca3
|
[
"MIT"
] |
permissive
|
nkmk/python-snippets
|
a6c66bdf999502e52f4795a3074ced63bf440817
|
f9dd286a9cf93f474e20371f8fffc4732cb3c4d5
|
refs/heads/master
| 2023-08-03T04:20:05.606293 | 2023-07-26T13:21:11 | 2023-07-26T13:21:11 | 98,900,570 | 253 | 77 |
MIT
| 2020-10-25T01:12:53 | 2017-07-31T14:54:47 |
Jupyter Notebook
|
UTF-8
|
Python
| false | false | 2,890 |
py
|
import os
import pathlib
import datetime
import time
import platform
p = pathlib.Path('data/temp/test.txt')
p.write_text('test')
time.sleep(10)
p.write_text('update')
# 6
print(p.stat())
# os.stat_result(st_mode=33188, st_ino=8728494137, st_dev=16777220, st_nlink=1, st_uid=501, st_gid=20, st_size=6, st_atime=1549094615, st_mtime=1549094615, st_ctime=1549094615)
print(type(p.stat()))
# <class 'os.stat_result'>
print(os.stat('data/temp/test.txt'))
# os.stat_result(st_mode=33188, st_ino=8728494137, st_dev=16777220, st_nlink=1, st_uid=501, st_gid=20, st_size=6, st_atime=1549094615, st_mtime=1549094615, st_ctime=1549094615)
print(type(os.stat('data/temp/test.txt')))
# <class 'os.stat_result'>
print(os.stat(p))
# os.stat_result(st_mode=33188, st_ino=8728494137, st_dev=16777220, st_nlink=1, st_uid=501, st_gid=20, st_size=6, st_atime=1549094615, st_mtime=1549094615, st_ctime=1549094615)
print(type(os.stat(p)))
# <class 'os.stat_result'>
print(p.stat() == os.stat('data/temp/test.txt') == os.stat(p))
# True
st = p.stat()
print(st.st_atime)
# 1549094615.972488
print(st.st_mtime)
# 1549094615.9723485
print(st.st_ctime)
# 1549094615.9723485
print(st.st_birthtime)
# 1549094605.9650702
print(type(st.st_ctime))
# <class 'float'>
print(st.st_ctime_ns)
# 1549094615972348510
print(type(st.st_ctime_ns))
# <class 'int'>
print(os.path.getatime('data/temp/test.txt'))
# 1549094615.972488
print(os.path.getmtime('data/temp/test.txt'))
# 1549094615.9723485
print(os.path.getctime('data/temp/test.txt'))
# 1549094615.9723485
print(os.path.getctime(p))
# 1549094615.9723485
print(os.path.getctime(p) == p.stat().st_ctime)
# True
dt = datetime.datetime.fromtimestamp(p.stat().st_ctime)
print(dt)
# 2019-02-02 17:03:35.972348
print(type(dt))
# <class 'datetime.datetime'>
print(dt.strftime('%Y年%m月%d日 %H:%M:%S'))
# 2019年02月02日 17:03:35
print(dt.isoformat())
# 2019-02-02T17:03:35.972348
print(os.path.getmtime('data/temp/test.txt'))
# 1549094615.9723485
print(p.stat().st_mtime)
# 1549094615.9723485
print(datetime.datetime.fromtimestamp(p.stat().st_mtime))
# 2019-02-02 17:03:35.972348
def creation_date(path_to_file):
"""
Try to get the date that a file was created, falling back to when it was
last modified if that isn't possible.
See http://stackoverflow.com/a/39501288/1709587 for explanation.
"""
if platform.system() == 'Windows':
return os.path.getctime(path_to_file)
else:
stat = os.stat(path_to_file)
try:
return stat.st_birthtime
except AttributeError:
# We're probably on Linux. No easy way to get creation dates here,
# so we'll settle for when its content was last modified.
return stat.st_mtime
print(creation_date(p))
# 1549094605.9650702
print(datetime.datetime.fromtimestamp(creation_date(p)))
# 2019-02-02 17:03:25.965070
|
[
"[email protected]"
] | |
858748ae013e2904d796045042e9433ef8f91d9c
|
b47a619f6ccd0f76ccce989e62d0c963a1c14ab4
|
/Python/String general/Remove All Adjacent Duplicates In String.py
|
f29cbb7022b54f28f8b3c440b9a95d36bd92b889
|
[] |
no_license
|
GreatTwang/lccc_solution
|
0799d19097549ef3c9beeebf6dc9960db9f9eb54
|
e75899634f45b0d60f8b3cb854ab9e503d676a57
|
refs/heads/master
| 2020-07-07T02:45:18.984502 | 2019-10-09T04:53:35 | 2019-10-09T04:53:35 | 203,219,848 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 340 |
py
|
# O(N) O(N)
# if c==stack top one then pop, else push into stack
class Solution:
def removeDuplicates(self, S: str) -> str:
stack=[]
for c in S:
if stack and c==stack[-1]:
stack.pop()
else:
stack.append(c)
return ''.join(stack))
|
[
"[email protected]"
] | |
c8b5bad84514b74417ff2eb13f76d4404db322ca
|
fe096ed06c34ae3adf958760886dd5f2fc64fa90
|
/Heap (Priority Queue)/kClosestPointsToOrigin.py
|
8b49044cf7e3c6b3a888bdad5e1abcf7473f227d
|
[] |
no_license
|
harshmalviya7/LeetCode_Coding_Questions
|
c9d8a93f4a5664dcf57098cd58f3f1d95667b0c0
|
47edb51e55e390861ed539972d8bf66b41b4cdd7
|
refs/heads/master
| 2023-08-23T01:09:40.110710 | 2021-10-21T12:53:36 | 2021-10-21T12:53:36 | 373,072,675 | 4 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 543 |
py
|
# 973. K Closest Points to Origin
# https://leetcode.com/problems/k-closest-points-to-origin/
import heapq
class Solution:
def kClosest(self, points: List[List[int]], k: int) -> List[List[int]]:
out = 0
e = []
for j, i in enumerate(points):
heapq.heappush(e, (-(i[0] * i[0] + i[1] * i[1]), i))
if len(e) > k:
heapq.heappop(e)
# a=(i[0]**2+i[1]**2)
# e.append([a,i])
return [j for i, j in e]
# return [x[1] for x in sorted(e)][:k]
|
[
"[email protected]"
] | |
834321d61bdd025df9f0b9f1bf249d10cdfcb5b4
|
c28ac3e0dd887e25d40e019dde062e73cb4a433c
|
/scripts/TargetTaxaGenes/TargetTaxaGenes.py
|
4c96216dec48f4715b883e77e28fb2b29d58b027
|
[] |
no_license
|
ZhikunWu/Bioinformatic-resources
|
16abc66f19d95dd14c11b2a453f7b3df0ed1fa16
|
2695dd0e249a49b948ac56cd71574b84c24cbf8a
|
refs/heads/master
| 2021-06-01T22:29:40.731595 | 2020-10-10T01:41:07 | 2020-10-10T01:41:07 | 134,114,964 | 10 | 10 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,679 |
py
|
#!/usr/bin/env python
import collections
import argparse
#usage: python TargetTaxaGenes.py --gene /home/wzk/Project/C128/NCyc/representive.faa.annotation.xls --taxonomy /home/wzk/Project/C128/NR/representive.faa.diamond_taxonomy_species.txt --out representive.faa.diamond_taxonomy_species_NCyc.txt
def get_NCyc_gene(gene_file):
Genes = {}
in_h = open(gene_file, "r")
header = in_h.readline()
for line in in_h:
lines = line.strip().split("\t")
gene = lines[0]
target = lines[1]
Genes[gene] = target
in_h.close()
return Genes
def taxonomy_gene(gene_file, taxonomy_file, out_file):
Genes = get_NCyc_gene(gene_file)
TaxaGenes = collections.defaultdict(set)
in_h = open(taxonomy_file, "r")
for line in in_h:
lines = line.strip().split("\t")
gene = lines[0]
taxa = lines[-1]
if gene in Genes:
target = Genes[gene]
TaxaGenes[taxa].add(target)
in_h.close()
out_h = open(out_file, "w")
for t in TaxaGenes:
genes = TaxaGenes[t]
sortGenes = sorted(list(genes))
out_h.write("%s\t%s\n" % (t, "|".join(sortGenes)))
out_h.close()
def main():
parser = argparse.ArgumentParser(description="Get the genes of the taxonomy.")
parser.add_argument("-g", "--gene", help="The file contain genes.")
parser.add_argument("-t", "--taxonomy", help="The file contain gene and taxonomy.")
parser.add_argument("-o","--out", help="The output file.")
args = parser.parse_args()
taxonomy_gene(args.gene, args.taxonomy, args.out)
if __name__ == "__main__":
main()
|
[
"[email protected]"
] | |
d581043ac8d2272090dc353c9017bcb459065be1
|
b99b32fb0b4597bee94809ebd3b2ddae43064bee
|
/landmark_detection/menpofit/error/human/face.py
|
5aa7f554748c565e7cf57e1e939a3b1c9827b545
|
[] |
no_license
|
HongwenZhang/ECT-FaceAlignment
|
c0129dc2aa20bc2bdba03a9ed1cabebcd5e5d848
|
e94b446db73fca5ba751d6d9a81d42633208f228
|
refs/heads/master
| 2023-01-29T14:25:19.502350 | 2020-12-13T09:18:55 | 2020-12-13T09:18:55 | 111,511,579 | 31 | 19 | null | null | null | null |
UTF-8
|
Python
| false | false | 15,076 |
py
|
import numpy as np
from menpo.shape import PointCloud
from menpo.landmark import (face_ibug_68_to_face_ibug_49,
face_ibug_68_to_face_ibug_68,
face_ibug_49_to_face_ibug_49)
from menpofit.error import euclidean_error
from menpofit.error.base import (distance_normalised_error,
distance_indexed_normalised_error,
bb_normalised_error)
def _convert_68_to_51(shape):
return PointCloud(shape.points[17:])
def _convert_68_to_49(shape):
sp = shape.points.copy()
sp = np.delete(sp, 64, 0)
sp = np.delete(sp, 60, 0)
sp = sp[17:]
return PointCloud(sp)
def _convert_66_to_49(shape):
return PointCloud(shape.points[17:])
def _convert_51_to_49(shape):
sp = shape.points.copy()
sp = np.delete(sp, 47, 0)
sp = np.delete(sp, 43, 0)
return PointCloud(sp)
def mean_pupil_68_error(shape, gt_shape):
r"""
Computes the Euclidean error based on 68 points normalised with the
distance between the mean eye points (pupils), i.e.
.. math::
\frac{\mathcal{F}(s,s^*)}{\mathcal{N}(s)}
where
.. math::
\mathcal{F}(s,s^*) = \frac{1}{68}\sum_{i=1}^{68}\sqrt{(s_{i,x}-s^*_{i,x})^2 + (s_{i,y}-s^*_{i,y})^2}
where :math:`s` and :math:`s^*` are the final and ground truth shapes,
respectively. :math:`(s_{i,x}, s_{i,y})` are the `x` and `y` coordinates of
the :math:`i`'th point of the final shape, :math:`(s^*_{i,x}, s^*_{i,y})`
are the `x` and `y` coordinates of the :math:`i`'th point of the ground
truth shape. Finally, :math:`\mathcal{N}(s)` is the distance between the
mean eye points (pupils).
Parameters
----------
shape : `menpo.shape.PointCloud`
The input shape (e.g. the final shape of a fitting procedure). It
must have 68 points.
gt_shape : `menpo.shape.PointCloud`
The ground truth shape. It must have 68 points.
Returns
-------
normalised_error : `float`
The computed normalised Euclidean error.
Raises
------
ValueError
Final shape must have 68 points
ValueError
Ground truth shape must have 68 points
"""
if shape.n_points != 68:
raise ValueError('Final shape must have 68 points')
if gt_shape.n_points != 68:
raise ValueError('Ground truth shape must have 68 points')
def pupil_dist(_, s):
_, mapping = face_ibug_68_to_face_ibug_68(s, include_mapping=True)
return euclidean_error(np.mean(s[mapping['left_eye']], axis=0),
np.mean(s[mapping['right_eye']], axis=0))
return distance_normalised_error(euclidean_error, pupil_dist, shape,
gt_shape)
def mean_pupil_49_error(shape, gt_shape):
r"""
Computes the euclidean error based on 49 points normalised with the
distance between the mean eye points (pupils), i.e.
.. math::
\frac{\mathcal{F}(s,s^*)}{\mathcal{N}(s)}
where
.. math::
\mathcal{F}(s,s^*) = \frac{1}{49}\sum_{i=1}^{49}\sqrt{(s_{i,x}-s^*_{i,x})^2 + (s_{i,y}-s^*_{i,y})^2}
where :math:`s` and :math:`s^*` are the final and ground truth shapes,
respectively. :math:`(s_{i,x}, s_{i,y})` are the `x` and `y` coordinates of
the :math:`i`'th point of the final shape, :math:`(s^*_{i,x}, s^*_{i,y})`
are the `x` and `y` coordinates of the :math:`i`'th point of the ground
truth shape. Finally, :math:`\mathcal{N}(s)` is the distance between the
mean eye points (pupils).
Parameters
----------
shape : `menpo.shape.PointCloud`
The input shape (e.g. the final shape of a fitting procedure). It
must have either 68 or 66 or 51 or 49 points.
gt_shape : `menpo.shape.PointCloud`
The ground truth shape. It must have either 68 or 66 or 51 or 49 points.
Returns
-------
normalised_error : `float`
The computed normalised Euclidean error.
Raises
------
ValueError
Final shape must have 68 or 66 or 51 or 49 points
ValueError
Ground truth shape must have 68 or 66 or 51 or 49 points
"""
if shape.n_points not in [68, 66, 51, 49]:
raise ValueError('Final shape must have 68 or 66 or 51 or 49 points')
if gt_shape.n_points not in [68, 66, 51, 49]:
raise ValueError('Ground truth shape must have 68 or 66 or 51 or 49 '
'points')
def pupil_dist(_, s):
_, mapping = face_ibug_49_to_face_ibug_49(s, include_mapping=True)
return euclidean_error(np.mean(s[mapping['left_eye']], axis=0),
np.mean(s[mapping['right_eye']], axis=0))
if shape.n_points == 68:
shape = _convert_68_to_49(shape)
elif shape.n_points == 66:
shape = _convert_66_to_49(shape)
elif shape.n_points == 51:
shape = _convert_51_to_49(shape)
if gt_shape.n_points == 68:
gt_shape = _convert_68_to_49(gt_shape)
elif gt_shape.n_points == 66:
gt_shape = _convert_66_to_49(gt_shape)
elif gt_shape.n_points == 51:
gt_shape = _convert_51_to_49(gt_shape)
return distance_normalised_error(euclidean_error, pupil_dist, shape,
gt_shape)
def outer_eye_corner_68_euclidean_error(shape, gt_shape):
r"""
Computes the Euclidean error based on 68 points normalised with the
distance between the mean eye points (pupils), i.e.
.. math::
\frac{\mathcal{F}(s,s^*)}{\mathcal{N}(s^*)}
where
.. math::
\mathcal{F}(s,s^*) = \frac{1}{68}\sum_{i=1}^{68}\sqrt{(s_{i,x}-s^*_{i,x})^2 + (s_{i,y}-s^*_{i,y})^2}
where :math:`s` and :math:`s^*` are the final and ground truth shapes,
respectively. :math:`(s_{i,x}, s_{i,y})` are the `x` and `y` coordinates of
the :math:`i`'th point of the final shape, :math:`(s^*_{i,x}, s^*_{i,y})`
are the `x` and `y` coordinates of the :math:`i`'th point of the ground
truth shape. Finally, :math:`\mathcal{N}(s^*)` is the distance between the
``36``-th and ``45``-th points.
Parameters
----------
shape : `menpo.shape.PointCloud`
The input shape (e.g. the final shape of a fitting procedure). It
must have 68 points.
gt_shape : `menpo.shape.PointCloud`
The ground truth shape. It must have 68 points.
Returns
-------
normalised_error : `float`
The computed normalised Euclidean error.
Raises
------
ValueError
Final shape must have 68 points
ValueError
Ground truth shape must have 68 points
"""
if shape.n_points != 68:
raise ValueError('Final shape must have 68 points')
if gt_shape.n_points != 68:
raise ValueError('Ground truth shape must have 68 points')
return distance_indexed_normalised_error(euclidean_error, 36, 45, shape,
gt_shape)
def outer_eye_corner_51_euclidean_error(shape, gt_shape):
r"""
Computes the Euclidean error based on 51 points normalised with the
distance between the mean eye points (pupils), i.e.
.. math::
\frac{\mathcal{F}(s,s^*)}{\mathcal{N}(s^*)}
where
.. math::
\mathcal{F}(s,s^*) = \frac{1}{51}\sum_{i=1}^{51}\sqrt{(s_{i,x}-s^*_{i,x})^2 + (s_{i,y}-s^*_{i,y})^2}
where :math:`s` and :math:`s^*` are the final and ground truth shapes,
respectively. :math:`(s_{i,x}, s_{i,y})` are the `x` and `y` coordinates of
the :math:`i`'th point of the final shape, :math:`(s^*_{i,x}, s^*_{i,y})`
are the `x` and `y` coordinates of the :math:`i`'th point of the ground
truth shape. Finally, :math:`\mathcal{N}(s^*)` is the distance between the
``19``-th and ``28``-th points.
Parameters
----------
shape : `menpo.shape.PointCloud`
The input shape (e.g. the final shape of a fitting procedure). It
must 68 or 51 points.
gt_shape : `menpo.shape.PointCloud`
The ground truth shape. It must have 68 or 51 points.
Returns
-------
normalised_error : `float`
The computed normalised Euclidean error.
Raises
------
ValueError
Final shape must have 68 or 51 points
ValueError
Ground truth shape must have 68 or 51 points
"""
if shape.n_points not in [68, 51]:
raise ValueError('Final shape must have 68 or 51 points')
if gt_shape.n_points not in [68, 51]:
raise ValueError('Ground truth shape must have 68 or 51 points')
if shape.n_points == 68:
shape = _convert_68_to_51(shape)
if gt_shape.n_points == 68:
gt_shape = _convert_68_to_51(gt_shape)
return distance_indexed_normalised_error(euclidean_error, 19, 28, shape,
gt_shape)
def outer_eye_corner_49_euclidean_error(shape, gt_shape):
r"""
Computes the Euclidean error based on 49 points normalised with the
distance between the mean eye points (pupils), i.e.
.. math::
\frac{\mathcal{F}(s,s^*)}{\mathcal{N}(s^*)}
where
.. math::
\mathcal{F}(s,s^*) = \frac{1}{49}\sum_{i=1}^{49}\sqrt{(s_{i,x}-s^*_{i,x})^2 + (s_{i,y}-s^*_{i,y})^2}
where :math:`s` and :math:`s^*` are the final and ground truth shapes,
respectively. :math:`(s_{i,x}, s_{i,y})` are the `x` and `y` coordinates of
the :math:`i`'th point of the final shape, :math:`(s^*_{i,x}, s^*_{i,y})`
are the `x` and `y` coordinates of the :math:`i`'th point of the ground
truth shape. Finally, :math:`\mathcal{N}(s^*)` is the distance between the
``19``-th and ``28``-th points.
Parameters
----------
shape : `menpo.shape.PointCloud`
The input shape (e.g. the final shape of a fitting procedure). It
must 68 or 66 or 51 or 49 points.
gt_shape : `menpo.shape.PointCloud`
The ground truth shape. It must have 68 or 66 or 51 or 49 points.
Returns
-------
normalised_error : `float`
The computed normalised Euclidean error.
Raises
------
ValueError
Final shape must have 68 or 66 or 51 or 49 points
ValueError
Ground truth shape must have 68 or 66 or 51 or 49 points
"""
if shape.n_points not in [68, 66, 51, 49]:
raise ValueError('Final shape must have 68 or 66 or 51 or 49 points')
if gt_shape.n_points not in [68, 66, 51, 49]:
raise ValueError('Ground truth shape must have 68 or 66 or 51 or 49 '
'points')
if shape.n_points == 68:
shape = _convert_68_to_49(shape)
elif shape.n_points == 66:
shape = _convert_66_to_49(shape)
elif shape.n_points == 51:
shape = _convert_51_to_49(shape)
if gt_shape.n_points == 68:
gt_shape = _convert_68_to_49(gt_shape)
elif gt_shape.n_points == 66:
gt_shape = _convert_66_to_49(gt_shape)
elif gt_shape.n_points == 51:
gt_shape = _convert_51_to_49(gt_shape)
return distance_indexed_normalised_error(euclidean_error, 19, 28, shape,
gt_shape)
def bb_avg_edge_length_68_euclidean_error(shape, gt_shape):
r"""
Computes the Euclidean error based on 68 points normalised by the average
edge length of the 68-point ground truth shape's bounding box, i.e.
.. math::
\frac{\mathcal{F}(s,s^*)}{\mathcal{N}(s^*)}
where
.. math::
\mathcal{F}(s,s^*) = \frac{1}{68}\sum_{i=1}^{68}\sqrt{(s_{i,x}-s^*_{i,x})^2 + (s_{i,y}-s^*_{i,y})^2}
where :math:`s` and :math:`s^*` are the final and ground truth shapes,
respectively. :math:`(s_{i,x}, s_{i,y})` are the `x` and `y` coordinates of
the :math:`i`'th point of the final shape, :math:`(s^*_{i,x}, s^*_{i,y})`
are the `x` and `y` coordinates of the :math:`i`'th point of the ground
truth shape. Finally, :math:`\mathcal{N}(s^*)` is a normalising function
that returns the average edge length of the bounding box of the 68-point
ground truth shape (:map:`bb_avg_edge_length`).
Parameters
----------
shape : `menpo.shape.PointCloud`
The input shape (e.g. the final shape of a fitting procedure). It
must have 68 points.
gt_shape : `menpo.shape.PointCloud`
The ground truth shape. It must have 68 points.
Returns
-------
normalised_error : `float`
The computed Euclidean normalised error.
Raises
------
ValueError
Final shape must have 68 points
ValueError
Ground truth shape must have 68 points
"""
if shape.n_points != 68:
raise ValueError('Final shape must have 68 points')
if gt_shape.n_points != 68:
raise ValueError('Ground truth shape must have 68 points')
return bb_normalised_error(euclidean_error, shape, gt_shape,
norm_type='avg_edge_length', norm_shape=gt_shape)
def bb_avg_edge_length_49_euclidean_error(shape, gt_shape):
r"""
Computes the Euclidean error based on 49 points normalised by the average
edge length of the 68-point ground truth shape's bounding box, i.e.
.. math::
\frac{\mathcal{F}(s,s^*)}{\mathcal{N}(s^*)}
where
.. math::
\mathcal{F}(s,s^*) = \frac{1}{49}\sum_{i=1}^{49}\sqrt{(s_{i,x}-s^*_{i,x})^2 + (s_{i,y}-s^*_{i,y})^2}
where :math:`s` and :math:`s^*` are the final and ground truth shapes,
respectively. :math:`(s_{i,x}, s_{i,y})` are the `x` and `y` coordinates of
the :math:`i`'th point of the final shape, :math:`(s^*_{i,x}, s^*_{i,y})`
are the `x` and `y` coordinates of the :math:`i`'th point of the ground
truth shape. Finally, :math:`\mathcal{N}(s^*)` is a normalising function
that returns the average edge length of the bounding box of the 68-point
ground truth shape (:map:`bb_avg_edge_length`).
Parameters
----------
shape : `menpo.shape.PointCloud`
The input shape (e.g. the final shape of a fitting procedure). It
must have 68 or 66 or 51 or 49 points.
gt_shape : `menpo.shape.PointCloud`
The ground truth shape. It must have 68 points.
Returns
-------
normalised_error : `float`
The computed Euclidean normalised error.
Raises
------
ValueError
Final shape must have 68 or 51 or 49 points
ValueError
Ground truth shape must have 68 points
"""
if shape.n_points not in [68, 66, 51, 49]:
raise ValueError('Final shape must have 68 or 66 or 51 or 49 points')
if gt_shape.n_points != 68:
raise ValueError('Ground truth shape must have 68 points')
if shape.n_points == 68:
shape = _convert_68_to_49(shape)
elif shape.n_points == 66:
shape = _convert_66_to_49(shape)
elif shape.n_points == 51:
shape = _convert_51_to_49(shape)
gt_shape_68 = gt_shape.copy()
gt_shape = _convert_68_to_49(gt_shape)
return bb_normalised_error(euclidean_error, shape, gt_shape,
norm_type='avg_edge_length',
norm_shape=gt_shape_68)
|
[
"[email protected]"
] | |
39b8024b1f674cfea0f2b7a50c585c89ddc5546a
|
13a70bdc3ac997d0d6c839fe633deed3ca7fc5ab
|
/ch07-Linear_regrs/Ridge_regre.py
|
bbec67ae57bb14c87fff6654ab05b19258ddceb5
|
[] |
no_license
|
Y1ran/Machine-Learning-in-Action-Python3
|
5546a777d78aee6445da1621b2deaddb099ae6ef
|
5aca5f9b865be449793e50ce32cba7c9b1ef286b
|
refs/heads/master
| 2022-10-09T06:30:08.122526 | 2022-09-30T10:04:53 | 2022-09-30T10:04:53 | 139,533,418 | 403 | 233 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,279 |
py
|
# -*- coding: utf-8 -*-
"""
Created on Sat Jul 14 15:04:10 2018
@author: Administrator
"""
def ridgeRegres(xMat,yMat,lam=0.2):
'''
#岭回归
@xMat:样本数据
@yMat:样本对应的原始值
@lam:惩罚项系数lamda,默认值为0.2
'''
#计算矩阵内积
xTx=xMat.T*xMat
#添加惩罚项,使矩阵xTx变换后可逆
denom=xTx+eye(shape(xMat)[1])*lam
#判断行列式值是否为0,确定是否可逆
if linalg.det(denom)==0.0:
print('This matrix is singular,cannot do inverse')
return
#计算回归系数
ws=denom.I*(xMat.T*yMat)
return ws
#特征需要标准化处理,使所有特征具有相同重要性
def ridgeTest(xArr,yArr):
xMat=mat(xArr);yMat=mat(yArr).T
#计算均值
yMean=mean(yMat,0)
yMat=yMat-yMean
xMeans=mean(xMat,0)
#计算各个特征的方差
xVar=var(xMat,0)
#特征-均值/方差
xMat=(xMat-xMeans)/xVar
#在30个不同的lamda下进行测试
numTestpts=30
#30次的结果保存在wMat中
wMat=zeros((numTestpts,shape(xMat)[1]))
for i in range(numTestpts):
#计算对应lamda回归系数,lamda以指数形式变换
ws=ridgeRegres(xMat,yMat,exp(i-10))
wMat[i,:]=ws.T
return wMat
|
[
"[email protected]"
] | |
41e006767298bb8cdc28e9c8887b69e6fe238271
|
d11f077623502b9ff682ddb38effba388b248f0a
|
/sdk/search/azure-search-documents/azure/search/documents/_search_client.py
|
79fce398aad8b53f89e2056f08cf3ebae8346a29
|
[
"MIT",
"LGPL-2.1-or-later",
"LicenseRef-scancode-generic-cla"
] |
permissive
|
digimaun/azure-sdk-for-python
|
a840d2e8a125e0e9ed966ab78e28528186dafa3e
|
9f91027314715209d7a483d8a439651c38b71272
|
refs/heads/main
| 2023-08-25T12:35:35.687748 | 2021-10-19T00:10:05 | 2021-10-19T00:10:05 | 418,723,546 | 0 | 0 |
MIT
| 2021-10-19T01:12:10 | 2021-10-19T01:12:09 | null |
UTF-8
|
Python
| false | false | 31,804 |
py
|
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
from typing import cast, List, TYPE_CHECKING
import six
from azure.core.credentials import AzureKeyCredential
from azure.core.tracing.decorator import distributed_trace
from ._api_versions import DEFAULT_VERSION
from ._generated import SearchClient as SearchIndexClient
from ._generated.models import IndexingResult
from ._search_documents_error import RequestEntityTooLargeError
from ._index_documents_batch import IndexDocumentsBatch
from ._paging import SearchItemPaged, SearchPageIterator
from ._queries import AutocompleteQuery, SearchQuery, SuggestQuery
from ._headers_mixin import HeadersMixin
from ._utils import get_authentication_policy
from ._version import SDK_MONIKER
if TYPE_CHECKING:
# pylint:disable=unused-import,ungrouped-imports
from typing import Any, Union
from azure.core.credentials import TokenCredential
def odata(statement, **kwargs):
"""Escape an OData query string.
The statement to prepare should include fields to substitute given inside
braces, e.g. `{somevar}` and then pass the corresponding value as a keyword
argument, e.g. `somevar=10`.
:param statement: An OData query string to prepare
:type statement: str
:rtype: str
.. admonition:: Example:
>>> odata("name eq {name} and age eq {age}", name="O'Neil", age=37)
"name eq 'O''Neil' and age eq 37"
"""
kw = dict(kwargs)
for key in kw:
value = kw[key]
if isinstance(value, six.string_types):
value = value.replace("'", "''")
if "'{{{}}}'".format(key) not in statement:
kw[key] = "'{}'".format(value)
return statement.format(**kw)
class SearchClient(HeadersMixin):
"""A client to interact with an existing Azure search index.
:param endpoint: The URL endpoint of an Azure search service
:type endpoint: str
:param index_name: The name of the index to connect to
:type index_name: str
:param credential: A credential to authorize search client requests
:type credential: ~azure.core.credentials.AzureKeyCredential or ~azure.core.credentials.TokenCredential
:keyword str api_version: The Search API version to use for requests.
.. admonition:: Example:
.. literalinclude:: ../samples/sample_authentication.py
:start-after: [START create_search_client_with_key]
:end-before: [END create_search_client_with_key]
:language: python
:dedent: 4
:caption: Creating the SearchClient with an API key.
"""
_ODATA_ACCEPT = "application/json;odata.metadata=none" # type: str
def __init__(self, endpoint, index_name, credential, **kwargs):
# type: (str, str, Union[AzureKeyCredential, TokenCredential], **Any) -> None
self._api_version = kwargs.pop("api_version", DEFAULT_VERSION)
self._endpoint = endpoint # type: str
self._index_name = index_name # type: str
self._credential = credential
if isinstance(credential, AzureKeyCredential):
self._aad = False
self._client = SearchIndexClient(
endpoint=endpoint,
index_name=index_name,
sdk_moniker=SDK_MONIKER,
api_version=self._api_version,
**kwargs
) # type: SearchIndexClient
else:
self._aad = True
authentication_policy = get_authentication_policy(credential)
self._client = SearchIndexClient(
endpoint=endpoint,
index_name=index_name,
authentication_policy=authentication_policy,
sdk_moniker=SDK_MONIKER,
api_version=self._api_version,
**kwargs
) # type: SearchIndexClient
def __repr__(self):
# type: () -> str
return "<SearchClient [endpoint={}, index={}]>".format(
repr(self._endpoint), repr(self._index_name)
)[:1024]
def close(self):
# type: () -> None
"""Close the :class:`~azure.search.documents.SearchClient` session."""
return self._client.close()
@distributed_trace
def get_document_count(self, **kwargs):
# type: (**Any) -> int
"""Return the number of documents in the Azure search index.
:rtype: int
"""
kwargs["headers"] = self._merge_client_headers(kwargs.get("headers"))
return int(self._client.documents.count(**kwargs))
@distributed_trace
def get_document(self, key, selected_fields=None, **kwargs):
# type: (str, List[str], **Any) -> dict
"""Retrieve a document from the Azure search index by its key.
:param key: The primary key value for the document to retrieve
:type key: str
:param selected_fields: a allowlist of fields to include in the results
:type selected_fields: List[str]
:rtype: dict
.. admonition:: Example:
.. literalinclude:: ../samples/sample_get_document.py
:start-after: [START get_document]
:end-before: [END get_document]
:language: python
:dedent: 4
:caption: Get a specific document from the search index.
"""
kwargs["headers"] = self._merge_client_headers(kwargs.get("headers"))
result = self._client.documents.get(
key=key, selected_fields=selected_fields, **kwargs
)
return cast(dict, result)
@distributed_trace
def search(self, search_text, **kwargs): # pylint:disable=too-many-locals
# type: (str, **Any) -> SearchItemPaged[dict]
"""Search the Azure search index for documents.
:param str search_text: A full-text search query expression; Use "*" or omit this parameter to
match all documents.
:keyword bool include_total_count: A value that specifies whether to fetch the total count of
results. Default is false. Setting this value to true may have a performance impact. Note that
the count returned is an approximation.
:keyword list[str] facets: The list of facet expressions to apply to the search query. Each facet
expression contains a field name, optionally followed by a comma-separated list of name:value
pairs.
:keyword str filter: The OData $filter expression to apply to the search query.
:keyword str highlight_fields: The comma-separated list of field names to use for hit highlights.
Only searchable fields can be used for hit highlighting.
:keyword str highlight_post_tag: A string tag that is appended to hit highlights. Must be set with
highlightPreTag. Default is </em>.
:keyword str highlight_pre_tag: A string tag that is prepended to hit highlights. Must be set with
highlightPostTag. Default is <em>.
:keyword float minimum_coverage: A number between 0 and 100 indicating the percentage of the index that
must be covered by a search query in order for the query to be reported as a success. This
parameter can be useful for ensuring search availability even for services with only one
replica. The default is 100.
:keyword list[str] order_by: The list of OData $orderby expressions by which to sort the results. Each
expression can be either a field name or a call to either the geo.distance() or the
search.score() functions. Each expression can be followed by asc to indicate ascending, and
desc to indicate descending. The default is ascending order. Ties will be broken by the match
scores of documents. If no OrderBy is specified, the default sort order is descending by
document match score. There can be at most 32 $orderby clauses.
:keyword query_type: A value that specifies the syntax of the search query. The default is
'simple'. Use 'full' if your query uses the Lucene query syntax. Possible values include:
'simple', 'full', "semantic".
:paramtype query_type: str or ~azure.search.documents.models.QueryType
:keyword list[str] scoring_parameters: The list of parameter values to be used in scoring functions (for
example, referencePointParameter) using the format name-values. For example, if the scoring
profile defines a function with a parameter called 'mylocation' the parameter string would be
"mylocation--122.2,44.8" (without the quotes).
:keyword str scoring_profile: The name of a scoring profile to evaluate match scores for matching
documents in order to sort the results.
:keyword list[str] search_fields: The list of field names to which to scope the full-text search. When
using fielded search (fieldName:searchExpression) in a full Lucene query, the field names of
each fielded search expression take precedence over any field names listed in this parameter.
:keyword search_mode: A value that specifies whether any or all of the search terms must be
matched in order to count the document as a match. Possible values include: 'any', 'all'.
:paramtype search_mode: str or ~azure.search.documents.models.SearchMode
:keyword query_language: A value that specifies the language of the search query. Possible values
include: "none", "en-us".
:paramtype query_language: str or ~azure.search.documents.models.QueryLanguage
:keyword query_speller: A value that specified the type of the speller to use to spell-correct
individual search query terms. Possible values include: "none", "lexicon".
:paramtype query_speller: str or ~azure.search.documents.models.QuerySpellerType
:keyword query_answer: This parameter is only valid if the query type is 'semantic'. If set,
the query returns answers extracted from key passages in the highest ranked documents.
Possible values include: "none", "extractive".
:paramtype query_answer: str or ~azure.search.documents.models.QueryAnswerType
:keyword int query_answer_count: This parameter is only valid if the query type is 'semantic' and
query answer is 'extractive'. Configures the number of answers returned. Default count is 1.
:keyword query_caption: This parameter is only valid if the query type is 'semantic'. If set, the
query returns captions extracted from key passages in the highest ranked documents.
Defaults to 'None'. Possible values include: "none", "extractive".
:paramtype query_caption: str or ~azure.search.documents.models.QueryCaptionType
:keyword bool query_caption_highlight: This parameter is only valid if the query type is 'semantic' when
query caption is set to 'extractive'. Determines whether highlighting is enabled.
Defaults to 'true'.
:keyword list[str] semantic_fields: The list of field names used for semantic search.
:keyword list[str] select: The list of fields to retrieve. If unspecified, all fields marked as retrievable
in the schema are included.
:keyword int skip: The number of search results to skip. This value cannot be greater than 100,000.
If you need to scan documents in sequence, but cannot use $skip due to this limitation,
consider using $orderby on a totally-ordered key and $filter with a range query instead.
:keyword int top: The number of search results to retrieve. This can be used in conjunction with
$skip to implement client-side paging of search results. If results are truncated due to
server-side paging, the response will include a continuation token that can be used to issue
another Search request for the next page of results.
:rtype: SearchItemPaged[dict]
.. admonition:: Example:
.. literalinclude:: ../samples/sample_simple_query.py
:start-after: [START simple_query]
:end-before: [END simple_query]
:language: python
:dedent: 4
:caption: Search on a simple text term.
.. admonition:: Example:
.. literalinclude:: ../samples/sample_filter_query.py
:start-after: [START filter_query]
:end-before: [END filter_query]
:language: python
:dedent: 4
:caption: Filter and sort search results.
.. admonition:: Example:
.. literalinclude:: ../samples/sample_facet_query.py
:start-after: [START facet_query]
:end-before: [END facet_query]
:language: python
:dedent: 4
:caption: Get search result facets.
"""
include_total_result_count = kwargs.pop("include_total_count", None)
facets = kwargs.pop("facets", None)
filter_arg = kwargs.pop("filter", None)
highlight_fields = kwargs.pop("highlight_fields", None)
highlight_post_tag = kwargs.pop("highlight_post_tag", None)
highlight_pre_tag = kwargs.pop("highlight_pre_tag", None)
minimum_coverage = kwargs.pop("minimum_coverage", None)
order_by = kwargs.pop("order_by", None)
query_type = kwargs.pop("query_type", None)
scoring_parameters = kwargs.pop("scoring_parameters", None)
scoring_profile = kwargs.pop("scoring_profile", None)
search_fields = kwargs.pop("search_fields", None)
search_fields_str = ",".join(search_fields) if search_fields else None
search_mode = kwargs.pop("search_mode", None)
query_language = kwargs.pop("query_language", None)
query_speller = kwargs.pop("query_speller", None)
select = kwargs.pop("select", None)
skip = kwargs.pop("skip", None)
top = kwargs.pop("top", None)
query_answer = kwargs.pop("query_answer", None)
query_answer_count = kwargs.pop("query_answer_count", None)
answers = query_answer if not query_answer_count else '{}|count-{}'.format(
query_answer, query_answer_count
)
query_caption = kwargs.pop("query_caption", None)
query_caption_highlight = kwargs.pop("query_caption_highlight", None)
captions = query_caption if not query_caption_highlight else '{}|highlight-{}'.format(
query_caption, query_caption_highlight
)
semantic_fields = kwargs.pop("semantic_fields", None)
query = SearchQuery(
search_text=search_text,
include_total_result_count=include_total_result_count,
facets=facets,
filter=filter_arg,
highlight_fields=highlight_fields,
highlight_post_tag=highlight_post_tag,
highlight_pre_tag=highlight_pre_tag,
minimum_coverage=minimum_coverage,
order_by=order_by,
query_type=query_type,
scoring_parameters=scoring_parameters,
scoring_profile=scoring_profile,
search_fields=search_fields_str,
search_mode=search_mode,
query_language=query_language,
speller=query_speller,
answers=answers,
captions=captions,
semantic_fields=",".join(semantic_fields) if semantic_fields else None,
select=select if isinstance(select, six.string_types) else None,
skip=skip,
top=top,
)
if isinstance(select, list):
query.select(select)
kwargs["headers"] = self._merge_client_headers(kwargs.get("headers"))
kwargs["api_version"] = self._api_version
return SearchItemPaged(
self._client, query, kwargs, page_iterator_class=SearchPageIterator
)
@distributed_trace
def suggest(self, search_text, suggester_name, **kwargs):
# type: (str, str, **Any) -> List[dict]
"""Get search suggestion results from the Azure search index.
:param str search_text: Required. The search text to use to suggest documents. Must be at least 1
character, and no more than 100 characters.
:param str suggester_name: Required. The name of the suggester as specified in the suggesters
collection that's part of the index definition.
:keyword str filter: An OData expression that filters the documents considered for suggestions.
:keyword bool use_fuzzy_matching: A value indicating whether to use fuzzy matching for the suggestions
query. Default is false. When set to true, the query will find terms even if there's a
substituted or missing character in the search text. While this provides a better experience in
some scenarios, it comes at a performance cost as fuzzy suggestions queries are slower and
consume more resources.
:keyword str highlight_post_tag: A string tag that is appended to hit highlights. Must be set with
highlightPreTag. If omitted, hit highlighting of suggestions is disabled.
:keyword str highlight_pre_tag: A string tag that is prepended to hit highlights. Must be set with
highlightPostTag. If omitted, hit highlighting of suggestions is disabled.
:keyword float minimum_coverage: A number between 0 and 100 indicating the percentage of the index that
must be covered by a suggestions query in order for the query to be reported as a success. This
parameter can be useful for ensuring search availability even for services with only one
replica. The default is 80.
:keyword list[str] order_by: The list of OData $orderby expressions by which to sort the results. Each
expression can be either a field name or a call to either the geo.distance() or the
search.score() functions. Each expression can be followed by asc to indicate ascending, or desc
to indicate descending. The default is ascending order. Ties will be broken by the match scores
of documents. If no $orderby is specified, the default sort order is descending by document
match score. There can be at most 32 $orderby clauses.
:keyword list[str] search_fields: The list of field names to search for the specified search text. Target
fields must be included in the specified suggester.
:keyword list[str] select: The list of fields to retrieve. If unspecified, only the key field will be
included in the results.
:keyword int top: The number of suggestions to retrieve. The value must be a number between 1 and
100. The default is 5.
:rtype: List[dict]
.. admonition:: Example:
.. literalinclude:: ../samples/sample_suggestions.py
:start-after: [START suggest_query]
:end-before: [END suggest_query]
:language: python
:dedent: 4
:caption: Get search suggestions.
"""
filter_arg = kwargs.pop("filter", None)
use_fuzzy_matching = kwargs.pop("use_fuzzy_matching", None)
highlight_post_tag = kwargs.pop("highlight_post_tag", None)
highlight_pre_tag = kwargs.pop("highlight_pre_tag", None)
minimum_coverage = kwargs.pop("minimum_coverage", None)
order_by = kwargs.pop("order_by", None)
search_fields = kwargs.pop("search_fields", None)
search_fields_str = ",".join(search_fields) if search_fields else None
select = kwargs.pop("select", None)
top = kwargs.pop("top", None)
query = SuggestQuery(
search_text=search_text,
suggester_name=suggester_name,
filter=filter_arg,
use_fuzzy_matching=use_fuzzy_matching,
highlight_post_tag=highlight_post_tag,
highlight_pre_tag=highlight_pre_tag,
minimum_coverage=minimum_coverage,
order_by=order_by,
search_fields=search_fields_str,
select=select if isinstance(select, six.string_types) else None,
top=top,
)
if isinstance(select, list):
query.select(select)
kwargs["headers"] = self._merge_client_headers(kwargs.get("headers"))
response = self._client.documents.suggest_post(
suggest_request=query.request, **kwargs
)
results = [r.as_dict() for r in response.results]
return results
@distributed_trace
def autocomplete(self, search_text, suggester_name, **kwargs):
# type: (str, str, **Any) -> List[dict]
"""Get search auto-completion results from the Azure search index.
:param str search_text: The search text on which to base autocomplete results.
:param str suggester_name: The name of the suggester as specified in the suggesters
collection that's part of the index definition.
:keyword mode: Specifies the mode for Autocomplete. The default is 'oneTerm'. Use
'twoTerms' to get shingles and 'oneTermWithContext' to use the current context while producing
auto-completed terms. Possible values include: 'oneTerm', 'twoTerms', 'oneTermWithContext'.
:paramtype mode: str or ~azure.search.documents.models.AutocompleteMode
:keyword str filter: An OData expression that filters the documents used to produce completed terms
for the Autocomplete result.
:keyword bool use_fuzzy_matching: A value indicating whether to use fuzzy matching for the
autocomplete query. Default is false. When set to true, the query will find terms even if
there's a substituted or missing character in the search text. While this provides a better
experience in some scenarios, it comes at a performance cost as fuzzy autocomplete queries are
slower and consume more resources.
:keyword str highlight_post_tag: A string tag that is appended to hit highlights. Must be set with
highlightPreTag. If omitted, hit highlighting is disabled.
:keyword str highlight_pre_tag: A string tag that is prepended to hit highlights. Must be set with
highlightPostTag. If omitted, hit highlighting is disabled.
:keyword float minimum_coverage: A number between 0 and 100 indicating the percentage of the index that
must be covered by an autocomplete query in order for the query to be reported as a success.
This parameter can be useful for ensuring search availability even for services with only one
replica. The default is 80.
:keyword list[str] search_fields: The list of field names to consider when querying for auto-completed
terms. Target fields must be included in the specified suggester.
:keyword int top: The number of auto-completed terms to retrieve. This must be a value between 1 and
100. The default is 5.
:rtype: List[dict]
.. admonition:: Example:
.. literalinclude:: ../samples/sample_autocomplete.py
:start-after: [START autocomplete_query]
:end-before: [END autocomplete_query]
:language: python
:dedent: 4
:caption: Get a auto-completions.
"""
autocomplete_mode = kwargs.pop("mode", None)
filter_arg = kwargs.pop("filter", None)
use_fuzzy_matching = kwargs.pop("use_fuzzy_matching", None)
highlight_post_tag = kwargs.pop("highlight_post_tag", None)
highlight_pre_tag = kwargs.pop("highlight_pre_tag", None)
minimum_coverage = kwargs.pop("minimum_coverage", None)
search_fields = kwargs.pop("search_fields", None)
search_fields_str = ",".join(search_fields) if search_fields else None
top = kwargs.pop("top", None)
query = AutocompleteQuery(
search_text=search_text,
suggester_name=suggester_name,
autocomplete_mode=autocomplete_mode,
filter=filter_arg,
use_fuzzy_matching=use_fuzzy_matching,
highlight_post_tag=highlight_post_tag,
highlight_pre_tag=highlight_pre_tag,
minimum_coverage=minimum_coverage,
search_fields=search_fields_str,
top=top,
)
kwargs["headers"] = self._merge_client_headers(kwargs.get("headers"))
response = self._client.documents.autocomplete_post(
autocomplete_request=query.request, **kwargs
)
results = [r.as_dict() for r in response.results]
return results
def upload_documents(self, documents, **kwargs):
# type: (List[dict], **Any) -> List[IndexingResult]
"""Upload documents to the Azure search index.
An upload action is similar to an "upsert" where the document will be
inserted if it is new and updated/replaced if it exists. All fields are
replaced in the update case.
:param documents: A list of documents to upload.
:type documents: List[dict]
:rtype: List[IndexingResult]
.. admonition:: Example:
.. literalinclude:: ../samples/sample_crud_operations.py
:start-after: [START upload_document]
:end-before: [END upload_document]
:language: python
:dedent: 4
:caption: Upload new documents to an index
"""
batch = IndexDocumentsBatch()
batch.add_upload_actions(documents)
kwargs["headers"] = self._merge_client_headers(kwargs.get("headers"))
results = self.index_documents(batch, **kwargs)
return cast(List[IndexingResult], results)
def delete_documents(self, documents, **kwargs):
# type: (List[dict], **Any) -> List[IndexingResult]
"""Delete documents from the Azure search index
Delete removes the specified document from the index. Any field you
specify in a delete operation, other than the key field, will be
ignored. If you want to remove an individual field from a document, use
`merge_documents` instead and set the field explicitly to None.
Delete operations are idempotent. That is, even if a document key does
not exist in the index, attempting a delete operation with that key will
result in a 200 status code.
:param documents: A list of documents to delete.
:type documents: List[dict]
:rtype: List[IndexingResult]
.. admonition:: Example:
.. literalinclude:: ../samples/sample_crud_operations.py
:start-after: [START delete_document]
:end-before: [END delete_document]
:language: python
:dedent: 4
:caption: Delete existing documents to an index
"""
batch = IndexDocumentsBatch()
batch.add_delete_actions(documents)
kwargs["headers"] = self._merge_client_headers(kwargs.get("headers"))
results = self.index_documents(batch, **kwargs)
return cast(List[IndexingResult], results)
def merge_documents(self, documents, **kwargs):
# type: (List[dict], **Any) -> List[IndexingResult]
"""Merge documents in to existing documents in the Azure search index.
Merge updates an existing document with the specified fields. If the
document doesn't exist, the merge will fail. Any field you specify in a
merge will replace the existing field in the document. This also applies
to collections of primitive and complex types.
:param documents: A list of documents to merge.
:type documents: List[dict]
:rtype: List[IndexingResult]
.. admonition:: Example:
.. literalinclude:: ../samples/sample_crud_operations.py
:start-after: [START merge_document]
:end-before: [END merge_document]
:language: python
:dedent: 4
:caption: Merge fields into existing documents to an index
"""
batch = IndexDocumentsBatch()
batch.add_merge_actions(documents)
kwargs["headers"] = self._merge_client_headers(kwargs.get("headers"))
results = self.index_documents(batch, **kwargs)
return cast(List[IndexingResult], results)
def merge_or_upload_documents(self, documents, **kwargs):
# type: (List[dict], **Any) -> List[IndexingResult]
"""Merge documents in to existing documents in the Azure search index,
or upload them if they do not yet exist.
This action behaves like `merge_documents` if a document with the given
key already exists in the index. If the document does not exist, it
behaves like `upload_documents` with a new document.
:param documents: A list of documents to merge or upload.
:type documents: List[dict]
:rtype: List[IndexingResult]
"""
batch = IndexDocumentsBatch()
batch.add_merge_or_upload_actions(documents)
kwargs["headers"] = self._merge_client_headers(kwargs.get("headers"))
results = self.index_documents(batch, **kwargs)
return cast(List[IndexingResult], results)
@distributed_trace
def index_documents(self, batch, **kwargs):
# type: (IndexDocumentsBatch, **Any) -> List[IndexingResult]
"""Specify a document operations to perform as a batch.
:param batch: A batch of document operations to perform.
:type batch: IndexDocumentsBatch
:rtype: List[IndexingResult]
:raises :class:`~azure.search.documents.RequestEntityTooLargeError`
"""
return self._index_documents_actions(actions=batch.actions, **kwargs)
def _index_documents_actions(self, actions, **kwargs):
# type: (List[IndexAction], **Any) -> List[IndexingResult]
error_map = {413: RequestEntityTooLargeError}
kwargs["headers"] = self._merge_client_headers(kwargs.get("headers"))
try:
batch_response = self._client.documents.index(
actions=actions, error_map=error_map, **kwargs
)
return cast(List[IndexingResult], batch_response.results)
except RequestEntityTooLargeError:
if len(actions) == 1:
raise
pos = round(len(actions) / 2)
batch_response_first_half = self._index_documents_actions(
actions=actions[:pos], error_map=error_map, **kwargs
)
if batch_response_first_half:
result_first_half = cast(
List[IndexingResult], batch_response_first_half.results
)
else:
result_first_half = []
batch_response_second_half = self._index_documents_actions(
actions=actions[pos:], error_map=error_map, **kwargs
)
if batch_response_second_half:
result_second_half = cast(
List[IndexingResult], batch_response_second_half.results
)
else:
result_second_half = []
return result_first_half.extend(result_second_half)
def __enter__(self):
# type: () -> SearchClient
self._client.__enter__() # pylint:disable=no-member
return self
def __exit__(self, *args):
# type: (*Any) -> None
self._client.__exit__(*args) # pylint:disable=no-member
|
[
"[email protected]"
] | |
05dc4db4abdbe277bd748d7b419322f0001a5306
|
6bd9d7679011042f46104d97080786423ae58879
|
/1204/a/a.py
|
d8d5e6116795f57a81dca052975687f30ae9b0d3
|
[
"CC-BY-4.0"
] |
permissive
|
lucifer1004/codeforces
|
20b77bdd707a1e04bc5b1230f5feb4452d5f4c78
|
d1fe331d98d6d379723939db287a499dff24c519
|
refs/heads/master
| 2023-04-28T16:00:37.673566 | 2023-04-17T03:40:27 | 2023-04-17T03:40:27 | 212,258,015 | 3 | 1 | null | 2020-10-27T06:54:02 | 2019-10-02T04:53:36 |
C++
|
UTF-8
|
Python
| false | false | 81 |
py
|
t = int(input(), 2) - 1
ans = 0
while t > 0:
ans += 1
t //= 4
print(ans)
|
[
"[email protected]"
] | |
13a9675c88f2c319982ff7d6f346121a7255f7ed
|
80b7f2a10506f70477d8720e229d7530da2eff5d
|
/ixnetwork_restpy/testplatform/sessions/ixnetwork/vport/protocols/groupbucketdescstatlearnedinformation_c5c1fdcf0cd8750ead47c9919177d367.py
|
6b312374bb89b93e31c3fff29a61dbf52b7037cc
|
[
"MIT"
] |
permissive
|
OpenIxia/ixnetwork_restpy
|
00fdc305901aa7e4b26e4000b133655e2d0e346a
|
c8ecc779421bffbc27c906c1ea51af3756d83398
|
refs/heads/master
| 2023-08-10T02:21:38.207252 | 2023-07-19T14:14:57 | 2023-07-19T14:14:57 | 174,170,555 | 26 | 16 |
MIT
| 2023-02-02T07:02:43 | 2019-03-06T15:27:20 |
Python
|
UTF-8
|
Python
| false | false | 8,164 |
py
|
# MIT LICENSE
#
# Copyright 1997 - 2020 by IXIA Keysight
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import sys
from ixnetwork_restpy.base import Base
from ixnetwork_restpy.files import Files
if sys.version_info >= (3, 5):
from typing import List, Any, Union
class GroupBucketDescStatLearnedInformation(Base):
"""NOT DEFINED
The GroupBucketDescStatLearnedInformation class encapsulates a list of groupBucketDescStatLearnedInformation resources that are managed by the system.
A list of resources can be retrieved from the server using the GroupBucketDescStatLearnedInformation.find() method.
"""
__slots__ = ()
_SDM_NAME = "groupBucketDescStatLearnedInformation"
_SDM_ATT_MAP = {
"ActionCount": "actionCount",
"DataPathId": "dataPathId",
"DataPathIdAsHex": "dataPathIdAsHex",
"GroupId": "groupId",
"LocalIp": "localIp",
"RemoteIp": "remoteIp",
"WatchGroup": "watchGroup",
"WatchPort": "watchPort",
"Weight": "weight",
}
_SDM_ENUM_MAP = {}
def __init__(self, parent, list_op=False):
super(GroupBucketDescStatLearnedInformation, self).__init__(parent, list_op)
@property
def ActionCount(self):
# type: () -> int
"""
Returns
-------
- number: NOT DEFINED
"""
return self._get_attribute(self._SDM_ATT_MAP["ActionCount"])
@property
def DataPathId(self):
# type: () -> str
"""
Returns
-------
- str: The Data Path ID of the OpenFlow switch.
"""
return self._get_attribute(self._SDM_ATT_MAP["DataPathId"])
@property
def DataPathIdAsHex(self):
# type: () -> str
"""
Returns
-------
- str: The Data Path ID of the OpenFlow switch in hexadecimal format.
"""
return self._get_attribute(self._SDM_ATT_MAP["DataPathIdAsHex"])
@property
def GroupId(self):
# type: () -> int
"""
Returns
-------
- number: A 32-bit integer uniquely identifying the group.
"""
return self._get_attribute(self._SDM_ATT_MAP["GroupId"])
@property
def LocalIp(self):
# type: () -> str
"""
Returns
-------
- str: The Data Path ID of the OpenFlow switch.
"""
return self._get_attribute(self._SDM_ATT_MAP["LocalIp"])
@property
def RemoteIp(self):
# type: () -> str
"""
Returns
-------
- str: The Remote IP address of the selected interface.
"""
return self._get_attribute(self._SDM_ATT_MAP["RemoteIp"])
@property
def WatchGroup(self):
# type: () -> int
"""
Returns
-------
- number: A group whose state determines whether this bucket is live or not. Default value OFPG_ANY(4,294,967,295) indicates that Watch Group is not specified in ofp_group_mod packets.
"""
return self._get_attribute(self._SDM_ATT_MAP["WatchGroup"])
@property
def WatchPort(self):
# type: () -> int
"""
Returns
-------
- number: A Port whose state determines whether this bucket is live or not. Default value OFPP_ANY(4,294,967,295) indicates that Watch Port is not specified in ofp_group_mod packets.
"""
return self._get_attribute(self._SDM_ATT_MAP["WatchPort"])
@property
def Weight(self):
# type: () -> int
"""
Returns
-------
- number: Specify the weight of buckets. The range allowed is 0-65535.
"""
return self._get_attribute(self._SDM_ATT_MAP["Weight"])
def add(self):
"""Adds a new groupBucketDescStatLearnedInformation resource on the json, only valid with batch add utility
Returns
-------
- self: This instance with all currently retrieved groupBucketDescStatLearnedInformation resources using find and the newly added groupBucketDescStatLearnedInformation resources available through an iterator or index
Raises
------
- Exception: if this function is not being used with config assistance
"""
return self._add_xpath(self._map_locals(self._SDM_ATT_MAP, locals()))
def find(
self,
ActionCount=None,
DataPathId=None,
DataPathIdAsHex=None,
GroupId=None,
LocalIp=None,
RemoteIp=None,
WatchGroup=None,
WatchPort=None,
Weight=None,
):
# type: (int, str, str, int, str, str, int, int, int) -> GroupBucketDescStatLearnedInformation
"""Finds and retrieves groupBucketDescStatLearnedInformation resources from the server.
All named parameters are evaluated on the server using regex. The named parameters can be used to selectively retrieve groupBucketDescStatLearnedInformation resources from the server.
To retrieve an exact match ensure the parameter value starts with ^ and ends with $
By default the find method takes no parameters and will retrieve all groupBucketDescStatLearnedInformation resources from the server.
Args
----
- ActionCount (number): NOT DEFINED
- DataPathId (str): The Data Path ID of the OpenFlow switch.
- DataPathIdAsHex (str): The Data Path ID of the OpenFlow switch in hexadecimal format.
- GroupId (number): A 32-bit integer uniquely identifying the group.
- LocalIp (str): The Data Path ID of the OpenFlow switch.
- RemoteIp (str): The Remote IP address of the selected interface.
- WatchGroup (number): A group whose state determines whether this bucket is live or not. Default value OFPG_ANY(4,294,967,295) indicates that Watch Group is not specified in ofp_group_mod packets.
- WatchPort (number): A Port whose state determines whether this bucket is live or not. Default value OFPP_ANY(4,294,967,295) indicates that Watch Port is not specified in ofp_group_mod packets.
- Weight (number): Specify the weight of buckets. The range allowed is 0-65535.
Returns
-------
- self: This instance with matching groupBucketDescStatLearnedInformation resources retrieved from the server available through an iterator or index
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
return self._select(self._map_locals(self._SDM_ATT_MAP, locals()))
def read(self, href):
"""Retrieves a single instance of groupBucketDescStatLearnedInformation data from the server.
Args
----
- href (str): An href to the instance to be retrieved
Returns
-------
- self: This instance with the groupBucketDescStatLearnedInformation resources from the server available through an iterator or index
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
return self._read(href)
|
[
"[email protected]"
] | |
b360b1b3eb8df1b4bdb56e27bd41976012314685
|
4dde2454c42df46eb519743ddb7a7db05697b4a6
|
/alexa/remindMeOfXWhenZone/remindMeOfXWhenZoneIntent.py
|
3f8dddf1c2722ae1034c0d7c95721f162f99f5ef
|
[
"MIT"
] |
permissive
|
pippyn/appdaemon-scripts
|
7e4231e9c28c8f906f97e8bb7d353d4297453426
|
615cdfeaaf039ffbe1be041eb07c35a2494f008d
|
refs/heads/master
| 2020-04-20T22:14:07.608237 | 2019-02-26T10:16:21 | 2019-02-26T10:16:21 | 169,133,019 | 0 | 0 |
MIT
| 2019-02-19T14:49:54 | 2019-02-04T19:11:20 |
Python
|
UTF-8
|
Python
| false | false | 2,881 |
py
|
import appdaemon.plugins.hass.hassapi as hass
import datetime
import globals
__ZONE_ACTION_ENTER__ = "kommen"
__ZONE_ACTION_LEAVE__ = "verlassen"
class RemindMeOfXWhenZoneIntent(hass.Hass):
def initialize(self):
self.timer_handle_list = []
self.listen_state_handle_list = []
self.device_tracker = globals.get_arg(self.args,"device_tracker")
self.notify_name = globals.get_arg(self.args,"notify_name")
self.remindMessageSkeleton = globals.get_arg(self.args,"remindMessageSkeleton")
self.notifier = self.get_app('Notifier')
return
def getIntentResponse(self, slots, devicename):
############################################
# an Intent to give back the state from a light.
# but it also can be any other kind of entity
############################################
try:
# get zone_name for friendly name used when talking to alexa
zone_name = None
for key, value in self.args["zoneMapping"].items():
if key == slots["zone"].lower():
zone_name = value
# listen to a state change of the zone
if zone_name == None:
raise Exception("Could not find zonemapping for: {}".format(slots["zone"].lower()))
else:
self.listen_state_handle_list.append(self.listen_state(self.remind_callback, self.device_tracker, zone=slots["zone"], zoneAction=slots["zoneAction"], reminder=slots["reminder"]))
# set correct zoneAction response
if slots["zoneAction"] == __ZONE_ACTION_ENTER__:
text = self.args["textLine"] + self.args["textEnter"]
else:
text = self.args["textLine"] + self.args["textLeave"]
except Exception as e:
self.log("Exception: {}".format(e))
self.log("slots: {}".format(slots))
text = self.random_arg(self.args["Error"])
return text
def remind_callback(self, entity, attribute, old, new, kwargs):
if kwargs["zoneAction"] == __ZONE_ACTION_ENTER__:
if new != old and new == kwargs["zone"]:
self.log("Notifying")
self.notifier.notify(self.notify_name, self.remindMessageSkeleton + kwargs["reminder"], useAlexa=False)
elif kwargs["zoneAction"] == __ZONE_ACTION_LEAVE__:
if new != old and old == kwargs["zone"]:
self.log("Notifying")
self.notifier.notify(self.notify_name, self.remindMessageSkeleton + kwargs["reminder"], useAlexa=False)
def terminate(self):
for timer_handle in self.timer_handle_list:
self.cancel_timer(timer_handle)
for listen_state_handle in self.listen_state_handle_list:
self.cancel_listen_state(listen_state_handle)
|
[
"[email protected]"
] | |
8c3da2bbe7f5f87820bcb9005c4e3ef16da39be2
|
13f78c34e80a52442d72e0aa609666163233e7e0
|
/Other/Kattis/Waterloo 2004-06-12/workreduction.py
|
c80361090eb8f0fba8bb708dfdf2b4d4c124c2a4
|
[] |
no_license
|
Giantpizzahead/comp-programming
|
0d16babe49064aee525d78a70641ca154927af20
|
232a19fdd06ecef7be845c92db38772240a33e41
|
refs/heads/master
| 2023-08-17T20:23:28.693280 | 2023-08-11T22:18:26 | 2023-08-11T22:18:26 | 252,904,746 | 2 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 691 |
py
|
# https://open.kattis.com/problems/reduction
import re
C = int(input())
for c in range(C):
print('Case', c+1)
N, M, L = map(int, input().split())
A = []
for i in range(L):
name, a, b = re.compile('(.*):(.*),(.*)').match(input()).groups()
a = int(a)
b = int(b)
# print(name, a, b)
best_c = a * (N-M)
curr_n = N
curr_c = 0
while curr_n // 2 >= M:
curr_n //= 2
curr_c += b
best_c = min(curr_c + a * (curr_n-M), best_c)
A.append({'name': name, 'cost': best_c})
A = sorted(A, key=lambda x: (x['cost'], x['name']))
for x in A:
print(x['name'], x['cost'])
|
[
"[email protected]"
] | |
1c94a0c295b12369a8682ebb5c180c3eae6f1936
|
7a09af404f29389504742a3d5f1727bfbe562750
|
/TrekBot2_WS/build/razor_imu_9dof/catkin_generated/generate_cached_setup.py
|
1190411b71f9bdcde84c39269e3159441d673eab
|
[
"MIT"
] |
permissive
|
Rafcin/TrekBot
|
4baa2ed93b90920b36adba0b72384ac320d2de01
|
d3dc63e6c16a040b16170f143556ef358018b7da
|
refs/heads/master
| 2020-03-30T02:15:35.361254 | 2018-12-14T03:30:25 | 2018-12-14T03:30:25 | 150,622,252 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,367 |
py
|
# -*- coding: utf-8 -*-
from __future__ import print_function
import argparse
import os
import stat
import sys
# find the import for catkin's python package - either from source space or from an installed underlay
if os.path.exists(os.path.join('/opt/ros/melodic/share/catkin/cmake', 'catkinConfig.cmake.in')):
sys.path.insert(0, os.path.join('/opt/ros/melodic/share/catkin/cmake', '..', 'python'))
try:
from catkin.environment_cache import generate_environment_script
except ImportError:
# search for catkin package in all workspaces and prepend to path
for workspace in "/xavier_ssd/TrekBot/TrekBot2_WS/devel;/opt/ros/melodic".split(';'):
python_path = os.path.join(workspace, 'lib/python3/dist-packages')
if os.path.isdir(os.path.join(python_path, 'catkin')):
sys.path.insert(0, python_path)
break
from catkin.environment_cache import generate_environment_script
code = generate_environment_script('/xavier_ssd/TrekBot/TrekBot2_WS/devel/.private/razor_imu_9dof/env.sh')
output_filename = '/xavier_ssd/TrekBot/TrekBot2_WS/build/razor_imu_9dof/catkin_generated/setup_cached.sh'
with open(output_filename, 'w') as f:
#print('Generate script for cached setup "%s"' % output_filename)
f.write('\n'.join(code))
mode = os.stat(output_filename).st_mode
os.chmod(output_filename, mode | stat.S_IXUSR)
|
[
"[email protected]"
] | |
d8dc6570935d4aa953a40d069eed60b9c1bd97c3
|
b174893c1b90296e0e4612c4b28901970f643bbb
|
/manage.py
|
54c3cc035f32d428c9a6383877a5189ae555dae8
|
[
"MIT"
] |
permissive
|
laetitia123/akikatest
|
63628f4ed9ad69c8660b37ac4bb86f9ff1a17360
|
812f26155b6e3d003ac7e48c08c16df406e11086
|
refs/heads/master
| 2023-01-28T17:04:03.650586 | 2020-12-10T08:37:57 | 2020-12-10T08:37:57 | 312,552,152 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 809 |
py
|
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "akikaproject.settings")
try:
from django.core.management import execute_from_command_line
except ImportError:
# The above import may fail for some other reason. Ensure that the
# issue is really that Django is missing to avoid masking other
# exceptions on Python 2.
try:
import django
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
raise
execute_from_command_line(sys.argv)
|
[
"[email protected]"
] | |
e390b936805ed8e4841c5543cf4ce0d3c7ddee54
|
ef0e9cf79347ecde056d947fe157da95c39b1bac
|
/mundo_3/exercicios/ex115/115.py
|
83de4f5643ad7dca477b9d3aa4baee5f3c681b33
|
[] |
no_license
|
Iuri-Almeida/CeV-Python
|
0c78a1b365bdbd3345ea894ddd6c01b4c81761e7
|
cfc3ff1853fdc998a9ea2301d86165263d0f216d
|
refs/heads/master
| 2023-04-20T17:07:39.454125 | 2021-05-11T21:04:29 | 2021-05-11T21:04:29 | 329,114,163 | 0 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 271 |
py
|
from lib import menu, opc_0, opc_1, opc_2, opc_3
print(f'{" Exercício 115 ":=^31}')
# Programa Principal
while True:
menu()
opc = opc_0()
if opc == '1':
opc_1()
elif opc == '2':
opc_2()
elif opc == '3':
opc_3()
break
|
[
"iurilopesalmeida.gmail.com"
] |
iurilopesalmeida.gmail.com
|
64166d1e37d47f49e54088c204e381b2bbb42098
|
845e3c428e18232777f17b701212dcbb1b72acc1
|
/lib/fast_rcnn/cg_train.py
|
8efe4947ae0b657239f2bfe57734ef2cf645a678
|
[
"BSD-2-Clause",
"MIT",
"LicenseRef-scancode-generic-cla"
] |
permissive
|
chuanxinlan/ohem-1
|
dd10b2f5ff15e81ab9e42e936bb44d98e01c6795
|
b7552ceb8ed1e9768e0d522258caa64b79834b54
|
refs/heads/master
| 2021-09-16T18:31:25.651432 | 2018-06-23T10:09:24 | 2018-06-23T10:09:24 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 12,760 |
py
|
# --------------------------------------------------------
# Fast R-CNN
# Copyright (c) 2015 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by Ross Girshick
# --------------------------------------------------------
"""Train a Fast R-CNN network."""
import google.protobuf.text_format
import matplotlib.pyplot as plt
import caffe
from fast_rcnn.config import cfg
import roi_data_layer.roidb as rdl_roidb
from utils.timer import Timer
import numpy as np
import os, cv2
import shutil
from caffe.proto import caffe_pb2
import google.protobuf as pb2
class SolverWrapper(object):
"""A simple wrapper around Caffe's solver.
This wrapper gives us control over he snapshotting process, which we
use to unnormalize the learned bounding-box regression weights.
"""
def __init__(self, solver_prototxt, roidb, output_dir,
pretrained_model=None):
"""Initialize the SolverWrapper."""
self.output_dir = output_dir
if (cfg.TRAIN.HAS_RPN and cfg.TRAIN.BBOX_REG and
cfg.TRAIN.BBOX_NORMALIZE_TARGETS):
# RPN can only use precomputed normalization because there are no
# fixed statistics to compute a priori
assert cfg.TRAIN.BBOX_NORMALIZE_TARGETS_PRECOMPUTED
if cfg.TRAIN.BBOX_REG:
print 'Computing bounding-box regression targets...'
self.bbox_means, self.bbox_stds = \
rdl_roidb.add_bbox_regression_targets(roidb)
print 'done'
self.solver = caffe.SGDSolver(solver_prototxt)
if pretrained_model is not None:
print ('Loading pretrained model '
'weights from {:s}').format(pretrained_model)
self.solver.net.copy_from(pretrained_model)
self.solver_param = caffe_pb2.SolverParameter()
with open(solver_prototxt, 'rt') as f:
pb2.text_format.Merge(f.read(), self.solver_param)
self.solver.net.layers[0].set_roidb(roidb)
def snapshot(self):
"""Take a snapshot of the network after unnormalizing the learned
bounding-box regression weights. This enables easy use at test-time.
"""
infix = ('_' + cfg.TRAIN.SNAPSHOT_INFIX
if cfg.TRAIN.SNAPSHOT_INFIX != '' else '')
self.solver.snapshot()
caffemodel = (self.solver_param.snapshot_prefix + infix +
'_iter_{:d}'.format(self.solver.iter) + '.caffemodel')
caffemodelFull = os.path.join(self.output_dir, caffemodel)
shutil.copyfile(caffemodel, caffemodelFull)
os.remove(caffemodel)
solverstate = (self.solver_param.snapshot_prefix + infix +
'_iter_{:d}'.format(self.solver.iter) + '.solverstate')
solverstateFull = os.path.join(self.output_dir, solverstate)
shutil.copyfile(solverstate, solverstateFull)
os.remove(solverstate)
return caffemodelFull
def vis_detections(self, im, dets, pred_kp, labels = None):
"""Visual debugging of detections."""
import matplotlib.pyplot as plt
im = im[:, :, (2, 1, 0)]
print('dets.shape', dets.shape)
for i in xrange(len(dets)):
if labels is None or labels[i] == 1.:
fig, ax = plt.subplots(figsize=(12, 12))
fig = ax.imshow(im, aspect='equal')
plt.axis('off')
fig.axes.get_xaxis().set_visible(False)
fig.axes.get_yaxis().set_visible(False)
print('dets.shape', dets.shape)
bbox = dets[i]
kp = pred_kp[i]
ax.add_patch(
plt.Rectangle((bbox[0], bbox[1]),
bbox[2] - bbox[0],
bbox[3] - bbox[1], fill=False,
edgecolor='red', linewidth=1.5)
)
ax.text(bbox[0], bbox[1] - 2,
'{:d}, {:d}'.format(int(bbox[2] - bbox[0]), int(bbox[3] - bbox[1])),
bbox=dict(facecolor='blue', alpha=0.2),
fontsize=8, color='white')
for j in range(14):
x, y = kp[j * 2 : (j + 1) * 2]
r = (j % 3) * 0.333
g = ((j / 3) % 3) * 0.333
b = (j / 3 / 3) * 0.333
ax.add_patch(
plt.Circle((x, y), 10,
fill=True,
color=(r, g, b),
edgecolor = (r, g, b),
linewidth=2.0)
)
plt.show('x')
def gao(self):
net = self.solver.net
print(net.params['conv1_1/conv'][0].data[0,0])
exit(0)
from fast_rcnn.bbox_transform_kp import clip_boxes, bbox_transform_inv, kp_transform_inv, clip_kps
im = net.blobs['data'].data.copy()
im = im[0, :, :, :]
im = im.transpose(1, 2, 0)
im += cfg.PIXEL_MEANS
im = im.astype(np.uint8, copy=False)
rois = net.blobs['rois'].data.copy()
boxes = rois[:, 1:5]
# bbox_targets = net.blobs['head_targets_hard_repool'].data.copy()
labels = net.blobs['labels'].data.copy()
bbox_gt = net.blobs['bbox_targets'].data.copy()
bbox_targets = net.blobs['bbox_pred'].data.copy()
bbox_targets[:, 4:] *= np.array(cfg.TRAIN.BBOX_NORMALIZE_STDS)
bbox_targets[:, 4:] += np.array(cfg.TRAIN.BBOX_NORMALIZE_MEANS)
pred_boxes = bbox_transform_inv(boxes, bbox_targets)
pred_boxes = clip_boxes(pred_boxes, im.shape)
cls_boxes = pred_boxes[:, 4:]
kp_gt = net.blobs['kp_targets'].data.copy()
kp_targets = net.blobs['kp_pred'].data.copy()
kp_targets[:, :] *= np.array(cfg.TRAIN.KP_NORMALIZE_STDS)
kp_targets[:, :] += np.array(cfg.TRAIN.KP_NORMALIZE_MEANS)
pred_kp = kp_transform_inv(boxes, kp_targets)
pred_kp = clip_kps(pred_kp, im.shape)
print(boxes.shape)
print(kp_targets.shape)
print(pred_kp.shape)
print(cls_boxes.shape)
print(labels[0])
print(bbox_targets[0])
print(bbox_gt[0])
print(kp_targets[0])
print(kp_gt[0])
print(net.blobs['kp_inside_weights'].data.copy()[0])
# pred_kp = clip_boxes(pred_boxes, im.shape)
self.vis_detections(im, cls_boxes, pred_kp, labels)
exit(0)
def gao_fcn_reg(self, iter_num):
net = self.solver.net
im = net.blobs['data'].data.copy()
im = im[0, :, :, :]
im = im.transpose(1, 2, 0)
im += cfg.PIXEL_MEANS
im = im.astype(np.uint8, copy=False)
reg_targets = net.blobs['reg_targets'].data.copy()
rpn_cls_reg = net.blobs['upsample/rpn_cls_reg'].data.copy()
reg_targets = np.abs(reg_targets * 255)
rpn_cls_reg = np.abs(rpn_cls_reg * 255)
cv2.imwrite(str(iter_num) + 'reg_targets.png', reg_targets[0,0])
cv2.imwrite(str(iter_num) + 'rpn_reg.png' , rpn_cls_reg[0,0])
def showImage(self, im, labels, rois, kpFcnLabel, kpFcnPred, imageId):
classToColor = ['', 'red', 'yellow', 'blue', 'magenta']
im = im[:, :, (2, 1, 0)]
thresh = 0.5
line = [[13, 14], [14, 4], [4, 5], [5, 6], [14, 1], [1, 2], [2, 3], \
[14, 10], [10, 11], [11, 12], [14, 7], [7, 8], [8, 9]]
c = ['b', 'g', 'r', 'c', 'm', 'y', 'k', 'w']
fig, ax = plt.subplots(figsize=(12, 12))
fig = ax.imshow(im, aspect='equal')
plt.axis('off')
fig.axes.get_xaxis().set_visible(False)
fig.axes.get_yaxis().set_visible(False)
for i, box in enumerate(rois):
if labels[i] != 0:
ax.add_patch(
plt.Rectangle((box[0], box[1]),
box[2] - box[0],
box[3] - box[1], fill=False,
edgecolor= 'r', linewidth=2.0)
)
kpLabel = kpFcnLabel[i, 0]
kpPred = kpFcnPred[i, 1]
print(np.min(kpLabel), np.max(kpLabel),
np.min(kpPred), np.max(kpPred))
kpPred /= np.max(kpPred)
cv2.imwrite('{}_kpFcnLabel.png'.format(i), kpLabel * 255)
cv2.imwrite('{}_kpFcnPred.png'.format(i), kpPred * 255)
'''
for j in range(14):
x, y, p = kp[j * 3 : (j + 1) * 3]
ax.add_patch(
plt.Circle((x, y), 3,
fill=True,
color = c[1],
linewidth=2.0)
)
ax.text(x, y - 2, '{:.3f}'.format(kp_scores[i, j]),
bbox=dict(facecolor='blue', alpha=0.2),
fontsize=8, color='white')
for l in line:
i0 = l[0] - 1
p0 = kp[i0 * 3 : (i0 + 1) * 3]
i1 = l[1] - 1
p1 = kp[i1 * 3 : (i1 + 1) * 3]
ax.add_patch(
plt.Arrow(p0[0], p0[1], p1[0] - p0[0], p1[1] - p0[1],
color = c[2])
)
'''
plt.savefig(str(imageId) , bbox_inches='tight', pad_inches=0)
exit(0)
def gao_cluster_fcn(self, iter_num):
net = self.solver.net
im = net.blobs['data'].data.copy()
im = im[0, :, :, :]
im = im.transpose(1, 2, 0)
im += cfg.PIXEL_MEANS
im = im.astype(np.uint8, copy=False)
rois = net.blobs['rois_repool'].data.copy()
boxes = rois[:, 1:5]
scores = net.blobs['labels'].data.copy()
kpLabel = net.blobs['kp_targets'].data.copy().reshape(-1, 14, 192, 192)
kpFcn = net.blobs['pred_fcn_reshape'].data.copy().reshape(-1, 28, 192, 192)
self.showImage(im, scores, boxes, kpLabel, kpFcn, iter_num)
exit(0)
def train_model(self, max_iters):
"""Network training loop."""
last_snapshot_iter = -1
timer = Timer()
model_paths = []
while self.solver.iter < max_iters:
# Make one SGD update
# self.gao()
timer.tic()
self.solver.step(1)
timer.toc()
# self.gao_cluster_fcn(self.solver.iter)
if self.solver.iter % (10 * self.solver_param.display) == 0:
print 'speed: {:.3f}s / iter'.format(timer.average_time)
if self.solver.iter % cfg.TRAIN.SNAPSHOT_ITERS == 0:
last_snapshot_iter = self.solver.iter
model_paths.append(self.snapshot())
if last_snapshot_iter != self.solver.iter:
model_paths.append(self.snapshot())
return model_paths
def get_training_roidb(imdb):
"""Returns a roidb (Region of Interest database) for use in training."""
if cfg.TRAIN.USE_FLIPPED:
print 'Appending horizontally-flipped training examples...'
imdb.append_flipped_images()
print 'done'
print 'Preparing training data...'
rdl_roidb.prepare_roidb(imdb)
print 'done'
return imdb.roidb
def filter_roidb(roidb):
"""Remove roidb entries that have no usable RoIs."""
def is_valid(entry):
# Valid images have:
# (1) At least one foreground RoI OR
# (2) At least one background RoI
overlaps = entry['max_overlaps']
# find boxes with sufficient overlap
fg_inds = np.where(overlaps >= cfg.TRAIN.FG_THRESH)[0]
# Select background RoIs as those within [BG_THRESH_LO, BG_THRESH_HI)
bg_inds = np.where((overlaps < cfg.TRAIN.BG_THRESH_HI) &
(overlaps >= cfg.TRAIN.BG_THRESH_LO))[0]
# image is only valid if such boxes exist
valid = len(fg_inds) > 0 or len(bg_inds) > 0
return valid
num = len(roidb)
filtered_roidb = [entry for entry in roidb if is_valid(entry)]
num_after = len(filtered_roidb)
print 'Filtered {} roidb entries: {} -> {}'.format(num - num_after,
num, num_after)
return filtered_roidb
def train_net(solver_prototxt, roidb, output_dir,
pretrained_model=None, max_iters=40000):
"""Train a Fast R-CNN network."""
roidb = filter_roidb(roidb)
sw = SolverWrapper(solver_prototxt, roidb, output_dir,
pretrained_model=pretrained_model)
print 'Solving...'
model_paths = sw.train_model(max_iters)
print 'done solving'
return model_paths
|
[
"[email protected]"
] | |
aeeff36ea48d96733e9ec151a94c9382d4596cc6
|
f72fe33d1a181f89d2464cc07744dbd275a7d071
|
/CNNectome/networks/custom_ops.py
|
989abdd29eae6ad7b3e40f2b0ca1ec7239ace8f0
|
[
"BSD-2-Clause"
] |
permissive
|
saalfeldlab/CNNectome
|
6c8d44d8cc2e161a91b10abb7b4a425d7fc64d1b
|
c043e3111ff5ec6707a68edffae54eb902a1652d
|
refs/heads/master
| 2023-04-03T15:11:36.586030 | 2022-06-15T14:12:17 | 2022-06-15T14:12:17 | 124,144,317 | 8 | 10 |
BSD-2-Clause
| 2023-03-24T22:16:04 | 2018-03-06T22:04:16 |
Python
|
UTF-8
|
Python
| false | false | 1,672 |
py
|
import numpy as np
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import graph_util
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_nn_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
# go/tf-wildcard-import
# pylint: disable=wildcard-import
from tensorflow.python.ops.gen_nn_ops import *
import tensorflow as tf
def ignore(x, binary_tensor, name=None):
with ops.name_scope(name, "ignore", [x]) as name:
x = ops.convert_to_tensor(x, name="x")
keep_ratio = math_ops.divide(
math_ops.reduce_sum(binary_tensor),
math_ops.reduce_prod(
array_ops.shape(binary_tensor, out_type=dtypes.float32)
),
)
keep_ratio.get_shape().assert_is_compatible_with(tensor_shape.scalar())
with tf.Session() as sess:
print(keep_ratio.eval(session=sess))
ret = math_ops.div(x, keep_ratio) * binary_tensor
ret.set_shape(x.get_shape())
return ret
def tf_var_summary(var):
# compute mean of variable
mean = tf.reduce_mean(var)
tf.summary.scalar("mean_" + var.name, mean)
# compute std of variable
stddev = tf.sqrt(tf.reduce_mean(tf.square(var - mean)))
tf.summary.scalar("stddev_" + var.name, stddev)
tf.summary.scalar("max_" + var.name, tf.reduce_max(var))
tf.summary.scalar("min_" + var.name, tf.reduce_min(var))
tf.summary.histogram("histogram_" + var.name, var)
|
[
"[email protected]"
] | |
73551d725e67569eda29757bfcb488d0f9880e01
|
a74418a8adacfdf67ecf172310d3c13905466f08
|
/pg/tests/test_pg.py
|
087ad38046799ae1b1b18a481cd1e28922484e7a
|
[
"BSD-3-Clause"
] |
permissive
|
eduardolujan/sqlalchemist
|
ac98cf67c78d367df2f3627f068500b4bc908431
|
7d0d04051ad11fe046fbb6295d9ecec72912d3a4
|
refs/heads/master
| 2022-07-19T13:53:45.326090 | 2020-05-22T11:12:24 | 2020-05-22T11:12:24 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 360 |
py
|
import pytest
import psycopg2
from ..config import get_config
from ..models import Session
@pytest.fixture
def config():
return get_config()
def test_configuration(config):
assert "database" in config
assert "conn_str" in config.database
def test_connection(config):
config = get_config()
psycopg2.connect(config.database.conn_str)
|
[
"[email protected]"
] | |
96ef7908b5993d5104612be601d4c170f6ea3135
|
a234ecbf8a71075ba90c84f19443d2235c0f4234
|
/test_numpy/test_sklearn/lightgbm/lightgbm_cv.py
|
4b9befec8d5a724d9b9d2a1dffd05f1b79465ee7
|
[] |
no_license
|
hkxIron/hkx_tf_practice
|
b9b58f7c52b07d4f10709804efc964cf17e5e3ff
|
76d0c12750d66b17e71a7102263e1d1fc637611a
|
refs/heads/master
| 2023-08-04T16:51:30.852650 | 2023-08-04T13:05:35 | 2023-08-04T13:05:35 | 118,223,247 | 10 | 13 | null | 2022-12-07T23:32:58 | 2018-01-20T08:14:06 |
Jupyter Notebook
|
UTF-8
|
Python
| false | false | 5,940 |
py
|
import time
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.model_selection import StratifiedKFold, RepeatedStratifiedKFold, KFold, cross_val_predict, cross_validate
from sklearn.metrics import roc_auc_score
from lightgbm import LGBMClassifier
import os
import socket
class LGBMClassifierCV(object):
"""cross_val_predict"""
def __init__(self, params=None, cv=5, random_state=None, n_repeats=None):
self.clf = LGBMClassifier()
if params:
self.clf.set_params(**params)
if n_repeats:
self._kf = RepeatedStratifiedKFold(cv, True, random_state) # 复制N次
self._num_preds = cv * n_repeats
else:
self._kf = StratifiedKFold(cv, True, random_state)
self._num_preds = cv
def fit(self, X, y, X_test=None,
feval=roc_auc_score,
sample_weight=None,
init_score=None,
eval_metric='auc',
early_stopping_rounds=100,
verbose=100,
feature_name='auto',
categorical_feature='auto',
callbacks=None):
"""输入数组"""
if X_test is None:
X_test = X[:1] # 将第一行作为test集
self.oof_train = np.zeros(len(X))
self.oof_test = np.zeros((len(X_test), self._num_preds)) # num_preds:有多少折
for n_fold, (train_index, valid_index) in enumerate(self._kf.split(X, y)):
if verbose:
print("\033[94mFold %s started at %s\033[0m" % (n_fold + 1, time.ctime()))
X_train, y_train = X[train_index], y[train_index]
X_valid, y_valid = X[valid_index], y[valid_index]
eval_set = [(X_train, y_train), (X_valid, y_valid)] # 需要同时验证两个集合
########################################################################
self.clf.fit(X_train,
y_train,
sample_weight,
init_score,
eval_set,
eval_names=('Train', 'Valid'),
eval_sample_weight=None,
eval_class_weight=None,
eval_init_score=None,
eval_metric=eval_metric,
early_stopping_rounds=early_stopping_rounds,
verbose=verbose,
feature_name=feature_name,
categorical_feature=categorical_feature,
callbacks=callbacks)
self.oof_train[valid_index] = self.clf.predict_proba(X_valid)[:, 1]
self.oof_test[:, n_fold] = self.clf.predict_proba(X_test)[:, 1]
########################################################################
# 输出 测试集 out-of-fold
self.oof_test_rank = (pd.DataFrame(self.oof_test).rank().mean(axis=1)/len(self.oof_test)).values
self.oof_test = self.oof_test.mean(axis=1) # 测试集的oof score算平均
assert len(X) == len(self.oof_train)
assert len(X_test) == len(self.oof_test)
# 计算 训练集 oof 得分(out_of_fold)
if feval:
self.oof_train_score = feval(y, self.oof_train)
print(f"\n\033[94mtrain CV Score: {self.oof_train_score} ended at {time.ctime()}\033[0m")
return self.oof_train_score
def oof_submit(self, ids, pred_ranking=False, file=None, preds=None):
"""preds分用于submit"""
if file is None:
file = f'submit_{self.oof_train_score}.csv'
print(f'Save {file} ...')
if preds is None:
preds = self.oof_test if pred_ranking else self.oof_test_rank
if not isinstance(ids, pd.DataFrame):
ids = pd.DataFrame(ids)
ids.assign(preds=preds).to_csv(file, index=False, header=False)
@property
def oof_train_and_test(self):
return np.r_[self.oof_train, self.oof_test]
def oof_save(self, file='./oof_train_and_test.csv'):
pd.DataFrame(self.oof_train_and_test, columns=['oof_train_and_test']).to_csv(file, index=False)
def plot_feature_importances(self, feature_names=None, topk=20, figsize=(10, 6), pic_name=None):
columns = ['Importances', 'Features']
importances = self.clf.feature_importances_.tolist()
if feature_names is None:
feature_names = list(map(lambda x: f'F_{x}', range(len(importances))))
_ = list(zip(importances, feature_names))
df = pd.DataFrame(_, columns=columns).sort_values('Importances', 0, False)
plt.figure(figsize=figsize)
sns.barplot(*columns, data=df[:topk])
plt.title('Features Importances\n')
plt.tight_layout()
if pic_name is None:
plt.savefig(f'importances_{self.oof_train_score}.png')
if __name__ == "__main__":
from sklearn.datasets import make_classification
X, y = make_classification()
X_test, _ = make_classification()
clf = LGBMClassifierCV()
clf.fit(X, y, X_test)
clf.plot_feature_importances()
"""
一组lightgbmcv参数:
params = {
'class_weight':'balanced',
'metric': 'auc',
'boosting_type': 'gbdt',
'objective': 'binary',
'max_depth': -1,
'num_leaves': 16,
'learning_rate': 0.005,
'min_split_gain': 0.884,
'min_child_weight': 0.01,
'min_child_samples': 31,
'subsample': 0.788,
'subsample_freq': 8,
'colsample_bytree': 0.617,
'reg_alpha': 0.631,
'reg_lambda': 0.81,
'scale_pos_weight': 1,
'random_state': 666,
'verbosity': -1,
'n_jobs': -1,
'n_estimators': 30000} # 300分数好像很高
oof8 = LGBMClassifierCV(params, 8, 999)
oof8.fit(X, y, X_test, early_stopping_rounds=300)
"""
|
[
"[email protected]"
] | |
fa4a6b6fafa904dd6b8f73b059e98f06cc017a3d
|
cc2029f40a12e82712072275fc76a07ac59b5940
|
/levelup/practice/python/introduction/05_python_division.py
|
2a18c52f2b3f31ea8c46490d14b69dcfef6dc1e2
|
[
"MIT"
] |
permissive
|
heitorchang/learn-code
|
d3fb8e45d539d302372126fe28e85032590b5707
|
5e6e56f7257de1910830619c01d470e892d7f9d8
|
refs/heads/master
| 2023-08-09T13:46:18.623772 | 2023-07-21T16:57:11 | 2023-07-21T16:57:11 | 147,522,837 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 144 |
py
|
def solution(a, b):
print(a // b)
print(a / b)
if __name__ == "__main__":
a = int(input())
b = int(input())
solution(a, b)
|
[
"[email protected]"
] | |
56bfe735a21870f3ff4b0bfa624cdd7cda45126a
|
9f1039075cc611198a988034429afed6ec6d7408
|
/tensorflow-stubs/contrib/grid_rnn/python/ops/grid_rnn_cell.pyi
|
67a5259cffa6ae62b64e32131f4ff72733496071
|
[] |
no_license
|
matangover/tensorflow-stubs
|
9422fbb1cb3a3638958d621461291c315f9c6ec2
|
664bd995ef24f05ba2b3867d979d23ee845cb652
|
refs/heads/master
| 2020-05-23T12:03:40.996675 | 2019-05-15T06:21:43 | 2019-05-15T06:21:43 | 186,748,093 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,905 |
pyi
|
# Stubs for tensorflow.contrib.grid_rnn.python.ops.grid_rnn_cell (Python 3)
#
# NOTE: This dynamically typed stub was automatically generated by stubgen.
from collections import namedtuple as namedtuple
from tensorflow.contrib import layers as layers, rnn as rnn
from tensorflow.python.ops import array_ops as array_ops, math_ops as math_ops, nn as nn
from typing import Any as Any, Optional as Optional
class GridRNNCell(rnn.RNNCell):
def __init__(self, num_units: Any, num_dims: int = ..., input_dims: Optional[Any] = ..., output_dims: Optional[Any] = ..., priority_dims: Optional[Any] = ..., non_recurrent_dims: Optional[Any] = ..., tied: bool = ..., cell_fn: Optional[Any] = ..., non_recurrent_fn: Optional[Any] = ..., state_is_tuple: bool = ..., output_is_tuple: bool = ...) -> None: ...
@property
def output_size(self): ...
@property
def state_size(self): ...
def __call__(self, inputs: Any, state: Any, scope: Optional[Any] = ...): ...
class Grid1BasicRNNCell(GridRNNCell):
def __init__(self, num_units: Any, state_is_tuple: bool = ..., output_is_tuple: bool = ...) -> None: ...
class Grid2BasicRNNCell(GridRNNCell):
def __init__(self, num_units: Any, tied: bool = ..., non_recurrent_fn: Optional[Any] = ..., state_is_tuple: bool = ..., output_is_tuple: bool = ...) -> None: ...
class Grid1BasicLSTMCell(GridRNNCell):
def __init__(self, num_units: Any, forget_bias: int = ..., state_is_tuple: bool = ..., output_is_tuple: bool = ...) -> None: ...
class Grid2BasicLSTMCell(GridRNNCell):
def __init__(self, num_units: Any, tied: bool = ..., non_recurrent_fn: Optional[Any] = ..., forget_bias: int = ..., state_is_tuple: bool = ..., output_is_tuple: bool = ...) -> None: ...
class Grid1LSTMCell(GridRNNCell):
def __init__(self, num_units: Any, use_peepholes: bool = ..., forget_bias: float = ..., state_is_tuple: bool = ..., output_is_tuple: bool = ...) -> None: ...
class Grid2LSTMCell(GridRNNCell):
def __init__(self, num_units: Any, tied: bool = ..., non_recurrent_fn: Optional[Any] = ..., use_peepholes: bool = ..., forget_bias: float = ..., state_is_tuple: bool = ..., output_is_tuple: bool = ...) -> None: ...
class Grid3LSTMCell(GridRNNCell):
def __init__(self, num_units: Any, tied: bool = ..., non_recurrent_fn: Optional[Any] = ..., use_peepholes: bool = ..., forget_bias: float = ..., state_is_tuple: bool = ..., output_is_tuple: bool = ...) -> None: ...
class Grid2GRUCell(GridRNNCell):
def __init__(self, num_units: Any, tied: bool = ..., non_recurrent_fn: Optional[Any] = ..., state_is_tuple: bool = ..., output_is_tuple: bool = ...) -> None: ...
_GridRNNDimension = namedtuple('_GridRNNDimension', ['idx', 'is_input', 'is_output', 'is_priority', 'non_recurrent_fn'])
_GridRNNConfig = namedtuple('_GridRNNConfig', ['num_dims', 'dims', 'inputs', 'outputs', 'recurrents', 'priority', 'non_priority', 'tied', 'num_units'])
|
[
"[email protected]"
] | |
8ce9a4c443487d77f258782e07d82452b044ae5a
|
3fc029440f5d6c2ae2e9f2402ce8d92906d4a789
|
/betomax_shop/celery.py
|
ea647618bddb84d4ab230bdc41232a6285a011e6
|
[] |
no_license
|
NickVazovsky/betomax-app
|
8d832986202165483d222de9fe53f88bc4476c8e
|
4a99a990f5e162948592e18bb595aa6b05375c80
|
refs/heads/master
| 2020-05-22T08:22:25.083415 | 2019-05-12T16:02:25 | 2019-05-12T16:02:25 | 186,273,815 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 275 |
py
|
import os
from celery import Celery
from django.conf import settings
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'betomax_shop.settings')
app = Celery('betomax_shop')
app.config_from_object('django.conf:settings')
app.autodiscover_tasks(lambda: settings.INSTALLED_APPS)
|
[
"[email protected]"
] | |
c42df29bc0afa54406c074c4736e6481149e8f18
|
f07a42f652f46106dee4749277d41c302e2b7406
|
/Data Set/bug-fixing-5/19496b5b7395085ac068dfd071ec6773b471d1fa-<main>-fix.py
|
ba56216710ec092e27b73c3fe73ccd79f93bfc99
|
[] |
no_license
|
wsgan001/PyFPattern
|
e0fe06341cc5d51b3ad0fe29b84098d140ed54d1
|
cc347e32745f99c0cd95e79a18ddacc4574d7faa
|
refs/heads/main
| 2023-08-25T23:48:26.112133 | 2021-10-23T14:11:22 | 2021-10-23T14:11:22 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 5,521 |
py
|
def main():
module = AnsibleModule(argument_spec=dict(name=dict(required=True, type='str', aliases=['unit', 'service']), state=dict(choices=['started', 'stopped', 'restarted', 'reloaded'], type='str'), enabled=dict(type='bool'), masked=dict(type='bool'), daemon_reload=dict(type='bool', default=False, aliases=['daemon-reload']), user=dict(type='bool', default=False), no_block=dict(type='bool', default=False)), supports_check_mode=True, required_one_of=[['state', 'enabled', 'masked', 'daemon_reload']])
systemctl = module.get_bin_path('systemctl')
if module.params['user']:
systemctl = (systemctl + ' --user')
if module.params['no_block']:
systemctl = (systemctl + ' --no-block')
unit = module.params['name']
rc = 0
out = err = ''
result = {
'name': unit,
'changed': False,
'status': {
},
'warnings': [],
}
if module.params['daemon_reload']:
(rc, out, err) = module.run_command(('%s daemon-reload' % systemctl))
if (rc != 0):
module.fail_json(msg=('failure %d during daemon-reload: %s' % (rc, err)))
found = False
is_initd = sysv_exists(unit)
is_systemd = False
(rc, out, err) = module.run_command(("%s show '%s'" % (systemctl, unit)))
if (rc == 0):
multival = []
if out:
k = None
for line in to_native(out).split('\n'):
if line.strip():
if (k is None):
if ('=' in line):
(k, v) = line.split('=', 1)
if v.lstrip().startswith('{'):
if (not v.rstrip().endswith('}')):
multival.append(line)
continue
result['status'][k] = v.strip()
k = None
elif line.rstrip().endswith('}'):
result['status'][k] = '\n'.join(multival).strip()
multival = []
k = None
else:
multival.append(line)
is_systemd = (('LoadState' in result['status']) and (result['status']['LoadState'] != 'not-found'))
if (is_systemd and ('LoadError' in result['status'])):
module.fail_json(msg=("Error loading unit file '%s': %s" % (unit, result['status']['LoadError'])))
found = (is_systemd or is_initd)
if (is_initd and (not is_systemd)):
result['warnings'].append(('The service (%s) is actually an init script but the system is managed by systemd' % unit))
if (module.params['masked'] is not None):
masked = (('LoadState' in result['status']) and (result['status']['LoadState'] == 'masked'))
if (masked != module.params['masked']):
result['changed'] = True
if module.params['masked']:
action = 'mask'
else:
action = 'unmask'
if (not module.check_mode):
(rc, out, err) = module.run_command(("%s %s '%s'" % (systemctl, action, unit)))
if (rc != 0):
fail_if_missing(module, found, unit, msg='host')
if (module.params['enabled'] is not None):
if module.params['enabled']:
action = 'enable'
else:
action = 'disable'
fail_if_missing(module, found, unit, msg='host')
enabled = False
(rc, out, err) = module.run_command(("%s is-enabled '%s'" % (systemctl, unit)))
if (rc == 0):
enabled = True
elif (rc == 1):
if ((not module.params['user']) and is_initd and ((not out.strip().endswith('disabled')) or sysv_is_enabled(unit))):
enabled = True
result['enabled'] = enabled
if (enabled != module.params['enabled']):
result['changed'] = True
if (not module.check_mode):
(rc, out, err) = module.run_command(("%s %s '%s'" % (systemctl, action, unit)))
if (rc != 0):
module.fail_json(msg=('Unable to %s service %s: %s' % (action, unit, (out + err))))
result['enabled'] = (not enabled)
if (module.params['state'] is not None):
fail_if_missing(module, found, unit, msg='host')
result['state'] = module.params['state']
if ('ActiveState' in result['status']):
action = None
if (module.params['state'] == 'started'):
if (not is_running_service(result['status'])):
action = 'start'
elif (module.params['state'] == 'stopped'):
if is_running_service(result['status']):
action = 'stop'
else:
if (not is_running_service(result['status'])):
action = 'start'
else:
action = module.params['state'][:(- 2)]
result['state'] = 'started'
if action:
result['changed'] = True
if (not module.check_mode):
(rc, out, err) = module.run_command(("%s %s '%s'" % (systemctl, action, unit)))
if (rc != 0):
module.fail_json(msg=('Unable to %s service %s: %s' % (action, unit, err)))
else:
module.fail_json(msg='Service is in unknown state', status=result['status'])
module.exit_json(**result)
|
[
"[email protected]"
] | |
35b7ce4b755869a2e037875ab1b19018617ee3aa
|
fdf1e1f4efc51bc024311d44a2fa4524f9b88bce
|
/girleffect/solutions/migrations/0035_auto_20171121_1708.py
|
0b2c8358c706a6809388d3d9448629bd84fda80b
|
[] |
no_license
|
girleffect/girleffect.org
|
8327ffd6bbd1103033c92fbd4cbe5461aa1c7f03
|
55731b1c024f207211a161fd6d3ca796beea7a61
|
refs/heads/master
| 2023-04-07T21:40:43.910892 | 2022-06-14T11:50:21 | 2022-06-14T11:50:21 | 112,452,828 | 1 | 2 | null | 2023-04-01T12:05:55 | 2017-11-29T09:13:18 |
Python
|
UTF-8
|
Python
| false | false | 7,882 |
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.3 on 2017-11-21 17:08
from __future__ import unicode_literals
from django.db import migrations
import girleffect.utils.models
import wagtail.wagtailcore.blocks
import wagtail.wagtailcore.fields
import wagtail.wagtaildocs.blocks
import wagtail.wagtailembeds.blocks
import wagtail.wagtailimages.blocks
import wagtail.wagtailsnippets.blocks
class Migration(migrations.Migration):
dependencies = [
('solutions', '0034_auto_20171121_1533'),
]
operations = [
migrations.AlterField(
model_name='solutionpage',
name='body',
field=wagtail.wagtailcore.fields.StreamField((('heading', wagtail.wagtailcore.blocks.CharBlock(classname='full title')), ('body_text', wagtail.wagtailcore.blocks.RichTextBlock(features=['h4', 'bold', 'italic', 'link', 'ol', 'ul', 'hr'], label='Body Text')), ('large_text', wagtail.wagtailcore.blocks.RichTextBlock(features=['bold', 'italic', 'link', 'document-link'], icon='pilcrow', label='Large Text', max_length=350, required=False)), ('image', wagtail.wagtailcore.blocks.StructBlock((('image', wagtail.wagtailimages.blocks.ImageChooserBlock()), ('caption', wagtail.wagtailcore.blocks.CharBlock(required=False))))), ('quote', wagtail.wagtailcore.blocks.ListBlock(wagtail.wagtailcore.blocks.StructBlock((('title', wagtail.wagtailcore.blocks.CharBlock(max_length=80, required=False)), ('image', wagtail.wagtailimages.blocks.ImageChooserBlock(required=False)), ('text', wagtail.wagtailcore.blocks.RichTextBlock(features=['bold', 'italic', 'ol', 'ul', 'link', 'document-link'], max_length=255, required=True)), ('citation', wagtail.wagtailcore.blocks.CharBlock(max_length=80, required=False)), ('link_block', wagtail.wagtailcore.blocks.StructBlock((('external_link', wagtail.wagtailcore.blocks.URLBlock(label='External Link', required=False)), ('internal_link', wagtail.wagtailcore.blocks.PageChooserBlock(label='Internal Link', required=False)), ('document_link', wagtail.wagtaildocs.blocks.DocumentChooserBlock(label='Document Link', required=False)), ('link_text', wagtail.wagtailcore.blocks.CharBlock(label='Link Text', max_length=255, required=False))), required=False)))), icon='openquote', template='blocks/quote_block.html')), ('video', wagtail.wagtailcore.blocks.StructBlock((('heading', wagtail.wagtailcore.blocks.CharBlock(max_length=30, required=False)), ('text', wagtail.wagtailcore.blocks.RichTextBlock(features=['bold', 'italic', 'ol', 'ul', 'link', 'document-link'], max_length=255, required=False)), ('youtube_embed', wagtail.wagtailembeds.blocks.EmbedBlock(help_text="Your YouTube URL goes here. Only YouTube video URLs will be accepted. The custom 'play' button will be created for valid YouTube URLs.", label='YouTube Video URL')), ('link', wagtail.wagtailcore.blocks.StructBlock((('external_link', wagtail.wagtailcore.blocks.URLBlock(label='External Link', required=False)), ('internal_link', wagtail.wagtailcore.blocks.PageChooserBlock(label='Internal Link', required=False)), ('document_link', wagtail.wagtaildocs.blocks.DocumentChooserBlock(label='Document Link', required=False)), ('link_text', wagtail.wagtailcore.blocks.CharBlock(label='Link Text', max_length=255, required=False))), required=False))), label='Girl Effect YouTube Video')), ('carousel', wagtail.wagtailcore.blocks.ListBlock(wagtail.wagtailcore.blocks.StructBlock((('image', wagtail.wagtailimages.blocks.ImageChooserBlock()), ('label', wagtail.wagtailcore.blocks.CharBlock(help_text='Carousel item small label, for example Our Reach', max_length=30)), ('title', wagtail.wagtailcore.blocks.CharBlock(help_text='Carousel item large title', max_length=30)), ('text', wagtail.wagtailcore.blocks.RichTextBlock(features=['bold', 'italic', 'ol', 'ul', 'link', 'document-link'], help_text='Carousel item text', max_length=75, required=False)), ('link', wagtail.wagtailcore.blocks.StructBlock((('external_link', wagtail.wagtailcore.blocks.URLBlock(label='External Link', required=False)), ('internal_link', wagtail.wagtailcore.blocks.PageChooserBlock(label='Internal Link', required=False)), ('document_link', wagtail.wagtaildocs.blocks.DocumentChooserBlock(label='Document Link', required=False)), ('link_text', wagtail.wagtailcore.blocks.CharBlock(label='Link Text', max_length=255, required=False))), required=False)))), icon='image', template='blocks/carousel_block.html')), ('media_text_overlay', wagtail.wagtailcore.blocks.StructBlock((('title', wagtail.wagtailcore.blocks.CharBlock(help_text='Appears above the module.', label='Title Text', max_length=25, required=False)), ('image', wagtail.wagtailimages.blocks.ImageChooserBlock()), ('logo', wagtail.wagtailimages.blocks.ImageChooserBlock(label='Title Logo', required=False)), ('text', wagtail.wagtailcore.blocks.RichTextBlock(features=['bold', 'italic', 'ol', 'ul', 'link', 'document-link'], max_length=75, required=False)), ('link', wagtail.wagtailcore.blocks.StructBlock((('external_link', wagtail.wagtailcore.blocks.URLBlock(label='External Link', required=False)), ('internal_link', wagtail.wagtailcore.blocks.PageChooserBlock(label='Internal Link', required=False)), ('document_link', wagtail.wagtaildocs.blocks.DocumentChooserBlock(label='Document Link', required=False)), ('link_text', wagtail.wagtailcore.blocks.CharBlock(label='Link Text', max_length=255, required=False))), required=False))), label='Full Width Media with Text Overlay')), ('list_block', wagtail.wagtailcore.blocks.ListBlock(wagtail.wagtailcore.blocks.StructBlock((('image', wagtail.wagtailimages.blocks.ImageChooserBlock(required=False)), ('title', wagtail.wagtailcore.blocks.CharBlock(max_length=80)), ('description', wagtail.wagtailcore.blocks.RichTextBlock(features=['bold', 'italic', 'link', 'document-link'], icon='pilcrow', max_length=250, required=False)), ('link', wagtail.wagtailcore.blocks.StructBlock((('external_link', wagtail.wagtailcore.blocks.URLBlock(label='External Link', required=False)), ('internal_link', wagtail.wagtailcore.blocks.PageChooserBlock(label='Internal Link', required=False)), ('document_link', wagtail.wagtaildocs.blocks.DocumentChooserBlock(label='Document Link', required=False)), ('link_text', wagtail.wagtailcore.blocks.CharBlock(label='Link Text', max_length=255, required=False))), required=False)))), icon='list-ul', template='blocks/list_column_block.html')), ('link_row', wagtail.wagtailcore.blocks.ListBlock(wagtail.wagtailcore.blocks.StructBlock((('external_link', wagtail.wagtailcore.blocks.URLBlock(label='External Link', required=False)), ('internal_link', wagtail.wagtailcore.blocks.PageChooserBlock(label='Internal Link', required=False)), ('document_link', wagtail.wagtaildocs.blocks.DocumentChooserBlock(label='Document Link', required=False)), ('link_text', wagtail.wagtailcore.blocks.CharBlock(label='Link Text', max_length=255, required=False)))), icon='link', template='blocks/inline_link_block.html')), ('statistic', wagtail.wagtailcore.blocks.StructBlock((('title', wagtail.wagtailcore.blocks.CharBlock(max_length=80, required=False)), ('statistics', wagtail.wagtailcore.blocks.ListBlock(wagtail.wagtailsnippets.blocks.SnippetChooserBlock(girleffect.utils.models.Statistic))), ('link', wagtail.wagtailcore.blocks.StructBlock((('external_link', wagtail.wagtailcore.blocks.URLBlock(label='External Link', required=False)), ('internal_link', wagtail.wagtailcore.blocks.PageChooserBlock(label='Internal Link', required=False)), ('document_link', wagtail.wagtaildocs.blocks.DocumentChooserBlock(label='Document Link', required=False)), ('link_text', wagtail.wagtailcore.blocks.CharBlock(label='Link Text', max_length=255, required=False))), required=False))), label='Statistic Block')), ('call_to_action', wagtail.wagtailsnippets.blocks.SnippetChooserBlock(girleffect.utils.models.CallToActionSnippet, template='blocks/call_to_action.html')))),
),
]
|
[
"[email protected]"
] | |
e882ef3aee4b0683d0a470824ba429a2bde7b12e
|
39206c42c70818066839a6a6edbd63057a9636cf
|
/tfx/examples/chicago_taxi_pipeline/taxi_pipeline_simple_test.py
|
59c22ea10e876ac11d2589725371affb12020288
|
[
"Apache-2.0"
] |
permissive
|
azrin-dev/tfx
|
8e55d6ddad5490f39eacbbef85eb56ea71c78954
|
88d999b1ab767940aef96805e29bc7784652e8f0
|
refs/heads/master
| 2020-08-30T09:30:52.528197 | 2019-10-29T15:46:51 | 2019-10-29T15:47:31 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,960 |
py
|
# Copyright 2019 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for tfx.examples.chicago_taxi_pipeline.taxi_pipeline_simple."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import datetime
import os
from airflow import models
import tensorflow as tf
from tfx.examples.chicago_taxi_pipeline import taxi_pipeline_simple
from tfx.orchestration.airflow.airflow_dag_runner import AirflowDagRunner
class TaxiPipelineSimpleTest(tf.test.TestCase):
def setUp(self):
super(TaxiPipelineSimpleTest, self).setUp()
self._test_dir = os.path.join(
os.environ.get('TEST_UNDECLARED_OUTPUTS_DIR', self.get_temp_dir()),
self._testMethodName)
def testTaxiPipelineCheckDagConstruction(self):
airflow_config = {
'schedule_interval': None,
'start_date': datetime.datetime(2019, 1, 1),
}
logical_pipeline = taxi_pipeline_simple._create_pipeline(
pipeline_name='Test',
pipeline_root=self._test_dir,
data_root=self._test_dir,
module_file=self._test_dir,
serving_model_dir=self._test_dir,
metadata_path=self._test_dir,
direct_num_workers=1)
self.assertEqual(9, len(logical_pipeline.components))
pipeline = AirflowDagRunner(airflow_config).run(logical_pipeline)
self.assertIsInstance(pipeline, models.DAG)
if __name__ == '__main__':
tf.test.main()
|
[
"[email protected]"
] | |
5a6fda791aaf455984c75d0b4a48e62137ce6558
|
d8fe3b5243bec2b61fd7907c4ff799b24bb617e5
|
/Bloomberg_codecon/2015_Finals/conference_room_scheduler.py
|
4bb1db33982bbc78e65bad6fc5afe88eb073136e
|
[
"Unlicense"
] |
permissive
|
SelvorWhim/competitive
|
b89ed252512d88d9346d168dc6b48e0a42a6142d
|
1c73a5c7b2d0dc1b6c4f3f06ace69cdf5c6a34c0
|
refs/heads/master
| 2023-04-13T01:02:52.083519 | 2023-04-11T10:14:38 | 2023-04-11T10:14:38 | 96,573,533 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,358 |
py
|
### INSTRUCTIONS ###
'''
Bloomberg needs a new system to schedule conference rooms! To keep things simple, the system operates in 15 minutes blocks, and only during an 8 hour workday, so that there are 32 available time slots per day for each room.
Users will submit one of two different commands: Create Booking, and Query Available.
When a user attempts to create a booking, they will submit a Room Id, a starting Timeslot, and a length (in timeslots). Each conference room can only be occupied by one user, and is booked in increments of timeslots (a minimum booking is length 1, maximum is length 32). Any user can book any room for as many slots as possible, so long as their booking does not interfere with an already occupied room. If the booking overlaps with any other bookings for that conference room (even if it's only for one slot of many), the entire booking command is rejected (i.e., the room schedule remains unchanged).
A user can also query availability to ask which rooms are available during a certain time block (a starting timeslot + a length, in timeslots). The system should report to the user which rooms are available for the entire length of their requested time. If a room is unavailable for any amount of time during the requested window, it is not returned from the Query.
> Input Specifications
Input will be the number of rooms N on the first line (1<=N<=100), followed by any number of the following request types:
Booking: RoomId-TimeSlot-#OfSlots
Query: TimeSlot-#OfSlots
You can assume that no more than 100 requests will be made. Also, RoomIds and TimeSlots are indexed starting at 1, not 0.
> Output Specifications
Output as many lines as necessary to answer each request in the order they were received.
Booking: You will output Y if the booking is possible and N otherwise.
Query: You will output a list of space-delimited room ids in order. There should be no trailing or preceding spaces. If there are no rooms available that match the Query, print None
'''
### MY SOLUTION (accepted) ###
#Problem : Finals Spring 2015 - Conference Room Scheduler
#Language : Python 3
#Compiled Using : py_compile
#Version : Python 3.4.3
#Input for your program will be provided from STDIN
#Print out all output from your program to STDOUT
import sys
# room and slot numbers indexed from 1
def areSlotsEmpty(mat,room,slot1,slots):
if slot1+slots-1 > 32:
return False
return sum(mat[room-1][slot1-1:slots+slot1-1])==0 # all slots have 0 or 1, if sum is 0 all are 0
data = sys.stdin.read().splitlines()
N=int(data[0])
queries = [[int(n) for n in line.split('-')] for line in data[1:]]
slot_mat = [[0 for x in range(32)] for y in range(N)] # table of rooms and slots. Will be 1 for taken - no identity need be saved
for q in queries:
if len(q) == 3: # Booking
if(areSlotsEmpty(slot_mat,q[0],q[1],q[2])):
for i in range(q[1]-1,q[2]+q[1]-1):
slot_mat[q[0]-1][i] = 1
print('Y')
else:
print('N')
else: # assumed len(q)==2 -> Query
free_rooms=''
for room in range(1,N+1):
if(areSlotsEmpty(slot_mat,room,q[0],q[1])):
free_rooms += str(room) + ' '
if free_rooms=='':
print('None')
else:
print(free_rooms.rstrip())
|
[
"[email protected]"
] | |
0b0a70e9f2d420767ef23ff86c91c8e79597d405
|
3ec9d3aa7e59475683dba30a87ca68242a7ec181
|
/cn/study/days100/days09/10扑克游戏.py
|
04ac207e67cdf30f300135ee8f51d0247c5e8d5b
|
[
"Apache-2.0"
] |
permissive
|
Jasonandy/Python-X
|
58bf36499572cdfb7d7bf80c6a3cd0c818f62c1e
|
2f02b9a17bd5495dd1f8746b191f11ec2d7bccbe
|
refs/heads/master
| 2021-06-16T17:07:29.277404 | 2021-03-07T14:17:05 | 2021-03-07T14:17:05 | 175,353,402 | 0 | 2 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,739 |
py
|
"""
扑克游戏
"""
import random
class Card(object):
"""一张牌"""
def __init__(self, suite, face):
self._suite = suite
self._face = face
@property
def face(self):
return self._face
@property
def suite(self):
return self._suite
# 类似于重写str方法?
def __str__(self):
if self._face == 1:
face_str = 'A'
elif self._face == 11:
face_str = 'J'
elif self._face == 12:
face_str = 'Q'
elif self._face == 13:
face_str = 'K'
else:
face_str = str(self._face)
# 字符串的替换 if 1 = A if 11 = J if 12 = Q if 13 = K
return '%s%s' % (self._suite, face_str)
def __repr__(self):
return self.__str__()
class Poker(object):
"""一副牌"""
def __init__(self):
# 洗牌随机
self._cards = [Card(suite, face)
# 花色四种 + 随机的数字 1 - 13 A-K
for suite in '♠♥♣♦'
for face in range(1, 14)]
self._current = 0
@property
def cards(self):
return self._cards
def shuffle(self):
"""洗牌(随机乱序)"""
self._current = 0
# shuffle 随机洗牌 方法将序列的所有元素随机排序
random.shuffle(self._cards)
@property
def next(self):
"""发牌"""
# 从list里面取出数据
card = self._cards[self._current]
self._current += 1
return card
@property
def has_next(self):
"""还有没有牌 判断下面的游标 指向还有无数据 """
return self._current < len(self._cards)
class Player(object):
"""玩家"""
def __init__(self, name):
self._name = name
self._cards_on_hand = []
@property
def name(self):
return self._name
@property
def cards_on_hand(self):
# 第一手牌
return self._cards_on_hand
def get(self, card):
"""摸牌"""
self._cards_on_hand.append(card)
def arrange(self, card_key):
"""玩家整理手上的牌"""
self._cards_on_hand.sort(key=card_key)
# 排序规则-先根据花色再根据点数排序
def get_key(card):
return (card.suite, card.face)
def main():
p = Poker()
p.shuffle()
players = [Player('东邪'), Player('西毒'), Player('南帝'), Player('北丐')]
for _ in range(13):
for player in players:
player.get(p.next)
for player in players:
print(player.name + ':', end=' ')
player.arrange(get_key)
print(player.cards_on_hand)
if __name__ == '__main__':
main()
|
[
"[email protected]"
] | |
2ddfd5c25378e7cf46008dcc0ac1fd74913231a8
|
3f9f7fe32c655e612f351302ad1945e92e514a31
|
/ut/ml/diag/multiple_two_component_manifold_learning.py
|
2f4cbcb18311d9b8895f4644319128008c5f0fc5
|
[
"MIT"
] |
permissive
|
thorwhalen/ut
|
12ea7e0fd9bc452d71b0cc3d8ecdb527335a3c17
|
72dbdf41b0250708ad525030128cc7c3948b3f41
|
refs/heads/master
| 2023-02-17T06:44:11.053826 | 2023-02-07T13:22:07 | 2023-02-07T13:22:07 | 32,152,452 | 6 | 4 |
MIT
| 2023-02-16T00:34:33 | 2015-03-13T11:32:31 |
Python
|
UTF-8
|
Python
| false | false | 9,197 |
py
|
__author__ = 'thor'
"""
An illustration of various embeddings, based on Pedregosa, Grisel, Blondel, and Varoguaux's code
for the digits dataset. See http://scikit-learn.org/stable/auto_examples/manifold/plot_lle_digits.html
The RandomTreesEmbedding, from the :mod:`sklearn.ensemble` module, is not
technically a manifold embedding method, as it learn a high-dimensional
representation on which we apply a dimensionality reduction method.
However, it is often useful to cast a dataset into a representation in
which the classes are linearly-separable.
t-SNE will be initialized with the embedding that is generated by PCA in
this example, which is not the default setting. It ensures global stability
of the embedding, i.e., the embedding does not depend on random
initialization.
"""
from time import time
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import offsetbox
from sklearn import manifold, datasets, decomposition, ensemble, lda, random_projection
def scatter_plot(X, y):
plt.scatter(X[:, 0], X[:, 1], c=y)
def analyze(X=None, y=None, plot_fun=scatter_plot, data_name='data'):
if X is None:
digits = datasets.load_digits(n_class=6)
X = digits.data
y = digits.target
n_samples, n_features = X.shape
n_neighbors = 30
def plot_embedding(X, title=None):
x_min, x_max = np.min(X, 0), np.max(X, 0)
X = (X - x_min) / (x_max - x_min)
plot_fun(X, y)
if title is not None:
plt.title(title)
# #----------------------------------------------------------------------
# # Scale and visualize the embedding vectors
# def plot_embedding(X, title=None):
# x_min, x_max = np.min(X, 0), np.max(X, 0)
# X = (X - x_min) / (x_max - x_min)
#
# plt.figure()
# ax = plt.subplot(111)
# for i in range(X.shape[0]):
# plt.text(X[i, 0], X[i, 1], str(digits.target[i]),
# color=plt.cm.Set1(y[i] / 10.),
# fontdict={'weight': 'bold', 'size': 9})
#
# if hasattr(offsetbox, 'AnnotationBbox'):
# # only print thumbnails with matplotlib > 1.0
# shown_images = np.array([[1., 1.]]) # just something big
# for i in range(digits.data.shape[0]):
# dist = np.sum((X[i] - shown_images) ** 2, 1)
# if np.min(dist) < 4e-3:
# # don't show points that are too close
# continue
# shown_images = np.r_[shown_images, [X[i]]]
# imagebox = offsetbox.AnnotationBbox(
# offsetbox.OffsetImage(digits.images[i], cmap=plt.cm.gray_r),
# X[i])
# ax.add_artist(imagebox)
# plt.xticks([]), plt.yticks([])
# if title is not None:
# plt.title(title)
#
#
# #----------------------------------------------------------------------
# # Plot images of the digits
# n_img_per_row = 20
# img = np.zeros((10 * n_img_per_row, 10 * n_img_per_row))
# for i in range(n_img_per_row):
# ix = 10 * i + 1
# for j in range(n_img_per_row):
# iy = 10 * j + 1
# img[ix:ix + 8, iy:iy + 8] = X[i * n_img_per_row + j].reshape((8, 8))
# plt.imshow(img, cmap=plt.cm.binary)
# plt.xticks([])
# plt.yticks([])
# plt.title('A selection from the 64-dimensional digits dataset')
# ----------------------------------------------------------------------
# Random 2D projection using a random unitary matrix
print('Computing random projection')
rp = random_projection.SparseRandomProjection(n_components=2, random_state=42)
X_projected = rp.fit_transform(X)
plot_embedding(X_projected, 'Random Projection of the {}'.format(data_name))
# ----------------------------------------------------------------------
# Projection on to the first 2 principal components
print('Computing PCA projection')
t0 = time()
X_pca = decomposition.TruncatedSVD(n_components=2).fit_transform(X)
plot_embedding(
X_pca,
'Principal Components projection of the {} (time {:.2f})'.format(
data_name, time() - t0
),
)
# ----------------------------------------------------------------------
# Projection on to the first 2 linear discriminant components
print('Computing LDA projection')
X2 = X.copy()
X2.flat[:: X.shape[1] + 1] += 0.01 # Make X invertible
t0 = time()
X_lda = lda.LDA(n_components=2).fit_transform(X2, y)
plot_embedding(
X_lda,
'Linear Discriminant projection of the {} (time {:.2f})'.format(
data_name, time() - t0
),
)
# ----------------------------------------------------------------------
# Isomap projection of the dataset
print('Computing Isomap embedding')
t0 = time()
X_iso = manifold.Isomap(n_neighbors, n_components=2).fit_transform(X)
print('Done.')
plot_embedding(
X_iso,
'Isomap projection of the {} (time {:.2f})'.format(data_name, time() - t0),
)
# ----------------------------------------------------------------------
# Locally linear embedding of the dataset
print('Computing LLE embedding')
clf = manifold.LocallyLinearEmbedding(
n_neighbors, n_components=2, method='standard'
)
t0 = time()
X_lle = clf.fit_transform(X)
print(('Done. Reconstruction error: %g' % clf.reconstruction_error_))
plot_embedding(
X_lle,
'Locally Linear Embedding of the {} (time {:.2f})'.format(
data_name, time() - t0
),
)
# ----------------------------------------------------------------------
# Modified Locally linear embedding of the dataset
print('Computing modified LLE embedding')
clf = manifold.LocallyLinearEmbedding(
n_neighbors, n_components=2, method='modified'
)
t0 = time()
X_mlle = clf.fit_transform(X)
print(('Done. Reconstruction error: %g' % clf.reconstruction_error_))
plot_embedding(
X_mlle,
'Modified Locally Linear Embedding of the {} (time {:.2f})'.format(
data_name, time() - t0
),
)
# ----------------------------------------------------------------------
# HLLE embedding of the dataset
print('Computing Hessian LLE embedding')
clf = manifold.LocallyLinearEmbedding(n_neighbors, n_components=2, method='hessian')
t0 = time()
X_hlle = clf.fit_transform(X)
print(('Done. Reconstruction error: %g' % clf.reconstruction_error_))
plot_embedding(
X_hlle,
'Hessian Locally Linear Embedding of the {} (time {:.2f})'.format(
data_name, time() - t0
),
)
# ----------------------------------------------------------------------
# LTSA embedding of the dataset
print('Computing LTSA embedding')
clf = manifold.LocallyLinearEmbedding(n_neighbors, n_components=2, method='ltsa')
t0 = time()
X_ltsa = clf.fit_transform(X)
print(('Done. Reconstruction error: %g' % clf.reconstruction_error_))
plot_embedding(
X_ltsa,
'Local Tangent Space Alignment of the {} (time {:.2f})'.format(
data_name, time() - t0
),
)
# ----------------------------------------------------------------------
# MDS embedding of the dataset
print('Computing MDS embedding')
clf = manifold.MDS(n_components=2, n_init=1, max_iter=100)
t0 = time()
X_mds = clf.fit_transform(X)
print(('Done. Stress: %f' % clf.stress_))
plot_embedding(
X_mds, 'MDS embedding of the {} (time {:.2f})'.format(data_name, time() - t0)
)
# ----------------------------------------------------------------------
# Random Trees embedding of the dataset
print('Computing Totally Random Trees embedding')
hasher = ensemble.RandomTreesEmbedding(
n_estimators=200, random_state=0, max_depth=5
)
t0 = time()
X_transformed = hasher.fit_transform(X)
pca = decomposition.TruncatedSVD(n_components=2)
X_reduced = pca.fit_transform(X_transformed)
plot_embedding(
X_reduced,
'Random forest embedding of the {} (time {:.2f})'.format(
data_name, time() - t0
),
)
# ----------------------------------------------------------------------
# Spectral embedding of the digits dataset
print('Computing Spectral embedding')
embedder = manifold.SpectralEmbedding(
n_components=2, random_state=0, eigen_solver='arpack'
)
t0 = time()
X_se = embedder.fit_transform(X)
plot_embedding(
X_se,
'Spectral embedding of the {} (time {:.2f})'.format(data_name, time() - t0),
)
# ----------------------------------------------------------------------
# t-SNE embedding of the digits dataset
print('Computing t-SNE embedding')
tsne = manifold.TSNE(n_components=2, init='pca', random_state=0)
t0 = time()
X_tsne = tsne.fit_transform(X)
plot_embedding(
X_tsne, 't-SNE embedding of the {} (time {:.2f})'.format(data_name, time() - t0)
)
plt.show()
|
[
"[email protected]"
] | |
3b3c4b37598cbee3b7cd1bdeb617baa477c1b5b0
|
53fab060fa262e5d5026e0807d93c75fb81e67b9
|
/backup/user_285/ch63_2019_09_04_16_30_36_096314.py
|
de45a44ec20c2bdaed00b6cd7662603836b4dfb8
|
[] |
no_license
|
gabriellaec/desoft-analise-exercicios
|
b77c6999424c5ce7e44086a12589a0ad43d6adca
|
01940ab0897aa6005764fc220b900e4d6161d36b
|
refs/heads/main
| 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 200 |
py
|
def pos_arroba(email):
for i in len(email):
for e in range(len(email)):
if e == "@":
return i
|
[
"[email protected]"
] | |
ddba12613afd79a75b8623640c9173ad91938b85
|
ba0e07b34def26c37ee22b9dac1714867f001fa5
|
/azure-mgmt-resource/azure/mgmt/resource/policy/operations/policy_definitions_operations.py
|
d3be1bc6002c5d4c4415488a8d18e752af0343d8
|
[
"MIT"
] |
permissive
|
CharaD7/azure-sdk-for-python
|
b11a08ac7d24a22a808a18203072b4c7bd264dfa
|
9fdf0aac0cec8a15a5bb2a0ea27dd331dbfa2f5c
|
refs/heads/master
| 2023-05-12T12:34:26.172873 | 2016-10-26T21:35:20 | 2016-10-26T21:35:20 | 72,448,760 | 1 | 0 |
MIT
| 2023-05-04T17:15:01 | 2016-10-31T15:14:09 |
Python
|
UTF-8
|
Python
| false | false | 12,400 |
py
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.pipeline import ClientRawResponse
from msrestazure.azure_exceptions import CloudError
import uuid
from .. import models
class PolicyDefinitionsOperations(object):
"""PolicyDefinitionsOperations operations.
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An objec model deserializer.
"""
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self.config = config
def create_or_update(
self, policy_definition_name, parameters, custom_headers=None, raw=False, **operation_config):
"""Create or update a policy definition.
:param policy_definition_name: The policy definition name.
:type policy_definition_name: str
:param parameters: The policy definition properties.
:type parameters: :class:`PolicyDefinition
<azure.mgmt.resource.policy.models.PolicyDefinition>`
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`PolicyDefinition
<azure.mgmt.resource.policy.models.PolicyDefinition>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/providers/Microsoft.Authorization/policydefinitions/{policyDefinitionName}'
path_format_arguments = {
'policyDefinitionName': self._serialize.url("policy_definition_name", policy_definition_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.config.api_version", self.config.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(parameters, 'PolicyDefinition')
# Construct and send request
request = self._client.put(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, **operation_config)
if response.status_code not in [201]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 201:
deserialized = self._deserialize('PolicyDefinition', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def delete(
self, policy_definition_name, custom_headers=None, raw=False, **operation_config):
"""Deletes the policy definition.
:param policy_definition_name: The policy definition name.
:type policy_definition_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: None
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/providers/Microsoft.Authorization/policydefinitions/{policyDefinitionName}'
path_format_arguments = {
'policyDefinitionName': self._serialize.url("policy_definition_name", policy_definition_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.config.api_version", self.config.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.delete(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [204, 200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def get(
self, policy_definition_name, custom_headers=None, raw=False, **operation_config):
"""Gets the policy definition.
:param policy_definition_name: The policy definition name.
:type policy_definition_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`PolicyDefinition
<azure.mgmt.resource.policy.models.PolicyDefinition>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/providers/Microsoft.Authorization/policydefinitions/{policyDefinitionName}'
path_format_arguments = {
'policyDefinitionName': self._serialize.url("policy_definition_name", policy_definition_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.config.api_version", self.config.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('PolicyDefinition', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def list(
self, filter=None, custom_headers=None, raw=False, **operation_config):
"""Gets all the policy definitions of a subscription.
:param filter: The filter to apply on the operation.
:type filter: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`PolicyDefinitionPaged
<azure.mgmt.resource.policy.models.PolicyDefinitionPaged>`
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = '/subscriptions/{subscriptionId}/providers/Microsoft.Authorization/policydefinitions'
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
if filter is not None:
query_parameters['$filter'] = self._serialize.query("filter", filter, 'str')
query_parameters['api-version'] = self._serialize.query("self.config.api_version", self.config.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(
request, header_parameters, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
# Deserialize response
deserialized = models.PolicyDefinitionPaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.PolicyDefinitionPaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
|
[
"[email protected]"
] | |
a905870f281dc4a19eaf29a250bc1be566801d46
|
bdccb54daf0d0b0a19fabfe9ea9b90fcfc1bdfbf
|
/Language Proficiency/Python/Math/Polar Coordinates/polar_coordinates.py
|
b946928570014778c1eaafad4530b5403724398a
|
[
"MIT"
] |
permissive
|
xuedong/hacker-rank
|
aba1ad8587bc88efda1e90d7ecfef8dbd74ccd68
|
1ee76899d555850a257a7d3000d8c2be78339dc9
|
refs/heads/master
| 2022-08-08T07:43:26.633759 | 2022-07-16T11:02:27 | 2022-07-16T11:02:27 | 120,025,883 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 119 |
py
|
#!/usr/bin/env python3
import cmath
if __name__ == "__main__":
print(*cmath.polar(complex(input())), sep="\n")
|
[
"[email protected]"
] | |
34d0876905040aa34d7ff199773c19627d21f202
|
61b475c33745dbe11d88ea288cbdee279f89c610
|
/src/izi/apps/analytics/models.py
|
881f973e80cc48cc81f2c745f541d7868235c9ee
|
[
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference",
"BSD-2-Clause"
] |
permissive
|
izi-ecommerce/izi-core
|
a092ea285d0dbd83d17427de3157a9f1e77d6c51
|
21176be2d41f0cf54ca954f294209c585f643dba
|
refs/heads/master
| 2020-03-30T08:37:39.045514 | 2018-10-08T02:58:46 | 2018-10-08T02:58:46 | 151,029,291 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 824 |
py
|
from izi.apps.analytics.abstract_models import (
AbstractProductRecord, AbstractUserProductView,
AbstractUserRecord, AbstractUserSearch)
from izi.core.loading import is_model_registered
__all__ = []
if not is_model_registered('analytics', 'ProductRecord'):
class ProductRecord(AbstractProductRecord):
pass
__all__.append('ProductRecord')
if not is_model_registered('analytics', 'UserRecord'):
class UserRecord(AbstractUserRecord):
pass
__all__.append('UserRecord')
if not is_model_registered('analytics', 'UserProductView'):
class UserProductView(AbstractUserProductView):
pass
__all__.append('UserProductView')
if not is_model_registered('analytics', 'UserSearch'):
class UserSearch(AbstractUserSearch):
pass
__all__.append('UserSearch')
|
[
"[email protected]"
] | |
8b0779994b8a00dc87933b5788fc4e363116d694
|
67583749bab9e87fe2b890dd0aee2c09e8f4bbfb
|
/yandex-algorithm-training/dz5-b-e.py
|
857af6911b1a88bdcb9e2b9def7ab77864cb8e69
|
[] |
no_license
|
allburov/codeforces
|
41e6d0f5155a0ee635d864053aad996b5880aabe
|
e7455f9fc85e66988b2a195234efdec155c5f8a6
|
refs/heads/master
| 2023-04-13T07:28:00.920469 | 2023-04-06T11:53:01 | 2023-04-06T11:53:01 | 160,622,387 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,360 |
py
|
def read_input():
with open('threesum.in') as input_:
s = int(input_.readline())
_, *num1 = list(map(int, input_.readline().split()))
_, *num2 = list(map(int, input_.readline().split()))
_, *num3 = list(map(int, input_.readline().split()))
return s, num1, num2, num3
def findMiddle(numsA, numsB, S):
nA = len(numsA)
nB = len(numsB)
i, j = 0, nB - 1
answers = []
while i < nA and j >= 0:
sumN = numsA[i][0] + numsB[j][0]
if sumN == S:
answers.append((numsA[i][1], numsB[j][1]))
j-=1
elif sumN < S:
i += 1
else:
j -= 1
if not answers:
return None
answers.sort()
return answers[0]
# num, pos
def task(s, nums1, nums2, nums3):
minSum = min(nums2) + min(nums3)
maxSum = max(nums2) + max(nums3)
nums2 = [(num, i) for i, num in enumerate(nums2)]
nums2.sort()
nums3 = [(num, i) for i, num in enumerate(nums3)]
nums3.sort()
for i, num in enumerate(nums1):
find = s - num
if find > maxSum or find < minSum:
continue
res = findMiddle(nums2, nums3, S=s - num)
if res:
return i, res[0], res[1]
return [-1]
if __name__ == "__main__":
args = read_input()
res = task(*args)
print(" ".join(map(str, res)))
|
[
"[email protected]"
] | |
d45c49565c362994196ebddf86e40e55475ec38f
|
21e1d00c48c1732cc44af077572299831b93ffc2
|
/DATA_SCIENCE/NumPy/NUM-PY_02/Array/Ravel.py
|
3bfb3c88cdf34985886d177ec21e60040c816a27
|
[] |
no_license
|
GolamRabbani20/PYTHON-A2Z
|
7be72041407e4417359b3a610ced0919f3939993
|
7c89223f253aa559fa15caacb89c68e0b78ff915
|
refs/heads/master
| 2023-05-09T00:43:03.012963 | 2021-05-26T07:56:56 | 2021-05-26T07:56:56 | 317,953,879 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 88 |
py
|
import numpy as np
x = np.array([[1,2],[3,4],[5,6]])
x.ravel()
print(x)
print(x.min())
|
[
"[email protected]"
] | |
1386d20539b4efddf945c6345a59b476a6126afa
|
e262e64415335060868e9f7f73ab8701e3be2f7b
|
/.history/read_pyexcal_20201111164148.py
|
ea7974f00258d36f2ceeacd3dc9d4247825542bd
|
[] |
no_license
|
Allison001/developer_test
|
6e211f1e2bd4287ee26fd2b33baf1c6a8d80fc63
|
b8e04b4b248b0c10a35e93128a5323165990052c
|
refs/heads/master
| 2023-06-18T08:46:40.202383 | 2021-07-23T03:31:54 | 2021-07-23T03:31:54 | 322,807,303 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 165 |
py
|
>>> from openpyxl import load_workbook
>>> wb = load_workbook(filename = 'empty_book.xlsx')
>>> sheet_ranges = wb['range names']
>>> print(sheet_ranges['D18'].value)
|
[
"[email protected]"
] | |
334a1f105f8d090005013ca75b048975e5761708
|
f9d564f1aa83eca45872dab7fbaa26dd48210d08
|
/huaweicloud-sdk-eip/huaweicloudsdkeip/v2/model/list_resource_resp.py
|
a4d35a9e336cac44e1b8ff9b5883573a8a79921f
|
[
"Apache-2.0"
] |
permissive
|
huaweicloud/huaweicloud-sdk-python-v3
|
cde6d849ce5b1de05ac5ebfd6153f27803837d84
|
f69344c1dadb79067746ddf9bfde4bddc18d5ecf
|
refs/heads/master
| 2023-09-01T19:29:43.013318 | 2023-08-31T08:28:59 | 2023-08-31T08:28:59 | 262,207,814 | 103 | 44 |
NOASSERTION
| 2023-06-22T14:50:48 | 2020-05-08T02:28:43 |
Python
|
UTF-8
|
Python
| false | false | 6,036 |
py
|
# coding: utf-8
import six
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class ListResourceResp:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'resource_detail': 'object',
'resource_id': 'str',
'resource_name': 'str',
'tags': 'list[ResourceTagResp]'
}
attribute_map = {
'resource_detail': 'resource_detail',
'resource_id': 'resource_id',
'resource_name': 'resource_name',
'tags': 'tags'
}
def __init__(self, resource_detail=None, resource_id=None, resource_name=None, tags=None):
"""ListResourceResp
The model defined in huaweicloud sdk
:param resource_detail: 资源详情。 资源对象,用于扩展。默认为空
:type resource_detail: object
:param resource_id: 资源ID
:type resource_id: str
:param resource_name: 资源名称,没有默认为空字符串
:type resource_name: str
:param tags: 标签列表,没有标签默认为空数组
:type tags: list[:class:`huaweicloudsdkeip.v2.ResourceTagResp`]
"""
self._resource_detail = None
self._resource_id = None
self._resource_name = None
self._tags = None
self.discriminator = None
if resource_detail is not None:
self.resource_detail = resource_detail
if resource_id is not None:
self.resource_id = resource_id
if resource_name is not None:
self.resource_name = resource_name
if tags is not None:
self.tags = tags
@property
def resource_detail(self):
"""Gets the resource_detail of this ListResourceResp.
资源详情。 资源对象,用于扩展。默认为空
:return: The resource_detail of this ListResourceResp.
:rtype: object
"""
return self._resource_detail
@resource_detail.setter
def resource_detail(self, resource_detail):
"""Sets the resource_detail of this ListResourceResp.
资源详情。 资源对象,用于扩展。默认为空
:param resource_detail: The resource_detail of this ListResourceResp.
:type resource_detail: object
"""
self._resource_detail = resource_detail
@property
def resource_id(self):
"""Gets the resource_id of this ListResourceResp.
资源ID
:return: The resource_id of this ListResourceResp.
:rtype: str
"""
return self._resource_id
@resource_id.setter
def resource_id(self, resource_id):
"""Sets the resource_id of this ListResourceResp.
资源ID
:param resource_id: The resource_id of this ListResourceResp.
:type resource_id: str
"""
self._resource_id = resource_id
@property
def resource_name(self):
"""Gets the resource_name of this ListResourceResp.
资源名称,没有默认为空字符串
:return: The resource_name of this ListResourceResp.
:rtype: str
"""
return self._resource_name
@resource_name.setter
def resource_name(self, resource_name):
"""Sets the resource_name of this ListResourceResp.
资源名称,没有默认为空字符串
:param resource_name: The resource_name of this ListResourceResp.
:type resource_name: str
"""
self._resource_name = resource_name
@property
def tags(self):
"""Gets the tags of this ListResourceResp.
标签列表,没有标签默认为空数组
:return: The tags of this ListResourceResp.
:rtype: list[:class:`huaweicloudsdkeip.v2.ResourceTagResp`]
"""
return self._tags
@tags.setter
def tags(self, tags):
"""Sets the tags of this ListResourceResp.
标签列表,没有标签默认为空数组
:param tags: The tags of this ListResourceResp.
:type tags: list[:class:`huaweicloudsdkeip.v2.ResourceTagResp`]
"""
self._tags = tags
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ListResourceResp):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
[
"[email protected]"
] | |
9f804e8a2a3a3536690acaf7823502557f6006c1
|
28eafbc96006c307d4c7994908cb8caf0e37e69c
|
/Python_recap/Unpacking_keyword_arguments.py
|
98b9cfa033b35331a7e8760e76b077d917b714eb
|
[] |
no_license
|
mikaelbeat/The_Complete_Python_and_PostgreSQL_Developer_Course
|
aa2f3369c78a92a74cb71ec701e7b1b83e0c41a8
|
b731bd2c1b3202c1fadf61e6b5516d84fc354fbb
|
refs/heads/master
| 2022-12-12T05:10:47.327480 | 2020-09-14T07:28:19 | 2020-09-14T07:28:19 | 265,159,279 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 97 |
py
|
def named(**kwargs):
print(kwargs)
details = {"name": "Bob", "age": 25}
named(**details)
|
[
"[email protected]"
] | |
1abee88f62ba11f1b8ce3c0bc2b45fa598623743
|
228ebc9fb20f25dd3ed2a6959aac41fd31314e64
|
/samples/model-builder/predict_text_entity_extraction_sample_test.py
|
3b123ff148acf504c79c9b0b9e72bba761b9628e
|
[
"Apache-2.0"
] |
permissive
|
orionnye/python-aiplatform
|
746e3df0c75025582af38223829faeb2656dc653
|
e3ea683bf754832340853a15bdb0a0662500a70f
|
refs/heads/main
| 2023-08-03T06:14:50.689185 | 2021-09-24T03:24:14 | 2021-09-24T03:24:14 | 410,091,957 | 1 | 0 |
Apache-2.0
| 2021-09-24T20:21:01 | 2021-09-24T20:21:00 | null |
UTF-8
|
Python
| false | false | 1,184 |
py
|
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import predict_text_entity_extraction_sample
import test_constants as constants
def test_predict_text_entity_extraction_sample(mock_sdk_init, mock_get_endpoint):
predict_text_entity_extraction_sample.predict_text_entity_extraction_sample(
project=constants.PROJECT,
location=constants.LOCATION,
endpoint_id=constants.ENDPOINT_NAME,
content=constants.PREDICTION_TEXT_INSTANCE,
)
mock_sdk_init.assert_called_once_with(
project=constants.PROJECT, location=constants.LOCATION
)
mock_get_endpoint.assert_called_once_with(constants.ENDPOINT_NAME,)
|
[
"[email protected]"
] | |
4ee899fc7c2d5bbf7189cb564d06e4bb4594a523
|
41898ee4cc597a19540d64c333687610d5792168
|
/Problems/1200. Minimum Absolute Difference.py
|
889e116531cce0b35ad923e6d519e77d91b0f6d4
|
[] |
no_license
|
aidardarmesh/leetcode
|
82c4e09a85dc5b6cf05bceb089b57b3a81e2406e
|
4509f4b2b83e172e6ccc21ff89fc1204e0c6b3f3
|
refs/heads/master
| 2021-07-06T15:56:04.244369 | 2020-11-15T20:47:16 | 2020-11-15T20:47:16 | 205,086,346 | 6 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 705 |
py
|
from typing import *
class Solution:
def minimumAbsDifference(self, arr: List[int]) -> List[List[int]]:
min_delta = 9999999
result = []
n = len(arr)
arr = sorted(arr)
for i in range(0, n-1):
delta = arr[i+1] - arr[i]
if delta < min_delta:
result = []
min_delta = delta
if delta == min_delta:
result.append([arr[i], arr[i+1]])
return result
s = Solution()
assert s.minimumAbsDifference([4,2,1,3]) == [[1,2],[2,3],[3,4]]
assert s.minimumAbsDifference([1,3,6,10,15]) == [[1,3]]
assert s.minimumAbsDifference([3,8,-10,23,19,-4,-14,27]) == [[-14,-10],[19,23],[23,27]]
|
[
"[email protected]"
] | |
49367f0e5d32687bdb5d111a90ac1d482dacc060
|
6121da376efe804fc8d9a5b33731c7c35f6d5fc0
|
/python_basics/ex35.py
|
f0442bfb4afd630f3417b06b74b0aadd4bcdfe64
|
[] |
no_license
|
Gitus-Maximus/Skills
|
4e67b5cdc19d695aef0ab1f768d9ab5c2a9591ac
|
1ba6bd63de18afe2ca698430aaa4b5bd5434351b
|
refs/heads/main
| 2023-04-30T18:35:31.654718 | 2021-05-22T11:56:06 | 2021-05-22T11:56:06 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 259 |
py
|
week = ["Poniedziałek", "Wtorek", "Środa", "Czwartek", "Piątek", "Sobota", "Niedziela"]
print("$".join (week[1:3])) #metoda .join sprawia, ze mozemy wyświetlić dowolony tekst pomiędzy argumentami z listy.
print("Pozbywam się:", week.pop(4))
print(week)
|
[
"[email protected]"
] | |
a30bc5873f3cbe1ae76f71b47f868d176dd3823c
|
998a965258a75e4a9d48805d7b2873afafae2f7d
|
/dpoll/polls/migrations/0012_auto_20190125_0654.py
|
1458061d046401d3c9263a40b9998a90ad538f70
|
[
"MIT"
] |
permissive
|
emre/dpoll.xyz
|
af64cb7d933c579d9cb8720e456fa3d3b7ae8d5e
|
15927cb82bc525f99068a0ab92c14087e88f7950
|
refs/heads/master
| 2022-12-12T18:35:56.351060 | 2022-04-24T22:11:36 | 2022-04-24T22:11:36 | 149,586,879 | 23 | 9 |
MIT
| 2021-06-10T20:48:55 | 2018-09-20T09:42:45 |
Python
|
UTF-8
|
Python
| false | false | 449 |
py
|
# Generated by Django 2.1.1 on 2019-01-25 06:54
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('polls', '0011_auto_20190125_0649'),
]
operations = [
migrations.AlterField(
model_name='promotiontransaction',
name='from_user',
field=models.CharField(default=1, max_length=255),
preserve_default=False,
),
]
|
[
"[email protected]"
] | |
fcf15e9c3d995c4b3e8db9f4b5975fb0ed8484a7
|
240dc81851dd0243c0b14511f6d8b563ab91c890
|
/libs/reportlab/graphics/charts/barcharts.py
|
a31a66ceebf7b9c44c0fe6cb90ea077b3e6297ee
|
[] |
no_license
|
prcek/TSReg
|
0aac7ffc7992b731d12dc3959d661bc8c3639744
|
ea6eac514d8e783ddaeeed6181b9ab45d5673c05
|
refs/heads/master
| 2020-05-30T03:19:46.737202 | 2017-06-08T08:14:00 | 2017-06-08T08:14:00 | 2,208,569 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 58,608 |
py
|
#Copyright ReportLab Europe Ltd. 2000-2004
#see license.txt for license details
#history http://www.reportlab.co.uk/cgi-bin/viewcvs.cgi/public/reportlab/trunk/reportlab/graphics/charts/barcharts.py
__version__=''' $Id$ '''
__doc__="""This module defines a variety of Bar Chart components.
The basic flavors are stacked and side-by-side, available in horizontal and
vertical versions.
"""
import copy
from reportlab.lib import colors
from reportlab.lib.validators import isNumber, isColor, isColorOrNone, isString,\
isListOfStrings, SequenceOf, isBoolean, isNoneOrShape, isStringOrNone,\
NoneOr, isListOfNumbersOrNone, EitherOr, OneOf
from reportlab.graphics.widgets.markers import uSymbol2Symbol, isSymbol
from reportlab.lib.formatters import Formatter
from reportlab.lib.attrmap import AttrMap, AttrMapValue
from reportlab.pdfbase.pdfmetrics import stringWidth
from reportlab.graphics.widgetbase import Widget, TypedPropertyCollection, PropHolder
from reportlab.graphics.shapes import Line, Rect, Group, Drawing, NotImplementedError
from reportlab.graphics.charts.axes import XCategoryAxis, YValueAxis, YCategoryAxis, XValueAxis
from reportlab.graphics.charts.textlabels import BarChartLabel, NA_Label, NoneOrInstanceOfNA_Label
from reportlab.graphics.charts.areas import PlotArea
from reportlab.graphics.charts.legends import _objStr
class BarChartProperties(PropHolder):
_attrMap = AttrMap(
strokeColor = AttrMapValue(isColorOrNone, desc='Color of the bar border.'),
fillColor = AttrMapValue(isColorOrNone, desc='Color of the bar interior area.'),
strokeWidth = AttrMapValue(isNumber, desc='Width of the bar border.'),
strokeDashArray = AttrMapValue(isListOfNumbersOrNone, desc='Dash array of a line.'),
symbol = AttrMapValue(None, desc='A widget to be used instead of a normal bar.',advancedUsage=1),
name = AttrMapValue(isString, desc='Text to be associated with a bar (eg seriesname)'),
swatchMarker = AttrMapValue(NoneOr(isSymbol), desc="None or makeMarker('Diamond') ...",advancedUsage=1),
)
def __init__(self):
self.strokeColor = None
self.fillColor = colors.blue
self.strokeWidth = 0.5
self.symbol = None
self.strokeDashArray = None
# Bar chart classes.
class BarChart(PlotArea):
"Abstract base class, unusable by itself."
_attrMap = AttrMap(BASE=PlotArea,
useAbsolute = AttrMapValue(EitherOr((isBoolean,EitherOr((isString,isNumber)))), desc='Flag to use absolute spacing values; use string of gsb for finer control\n(g=groupSpacing,s=barSpacing,b=barWidth). ',advancedUsage=1),
barWidth = AttrMapValue(isNumber, desc='The width of an individual bar.'),
groupSpacing = AttrMapValue(isNumber, desc='Width between groups of bars.'),
barSpacing = AttrMapValue(isNumber, desc='Width between individual bars.'),
bars = AttrMapValue(None, desc='Handle of the individual bars.'),
valueAxis = AttrMapValue(None, desc='Handle of the value axis.'),
categoryAxis = AttrMapValue(None, desc='Handle of the category axis.'),
data = AttrMapValue(None, desc='Data to be plotted, list of (lists of) numbers.'),
barLabels = AttrMapValue(None, desc='Handle to the list of bar labels.'),
barLabelFormat = AttrMapValue(None, desc='Formatting string or function used for bar labels.'),
barLabelCallOut = AttrMapValue(None, desc='Callout function(label)\nlabel._callOutInfo = (self,g,rowNo,colNo,x,y,width,height,x00,y00,x0,y0)',advancedUsage=1),
barLabelArray = AttrMapValue(None, desc='explicit array of bar label values, must match size of data if present.'),
reversePlotOrder = AttrMapValue(isBoolean, desc='If true, reverse common category plot order.',advancedUsage=1),
naLabel = AttrMapValue(NoneOrInstanceOfNA_Label, desc='Label to use for N/A values.',advancedUsage=1),
annotations = AttrMapValue(None, desc='list of callables, will be called with self, xscale, yscale.'),
categoryLabelBarSize = AttrMapValue(isNumber, desc='width to leave for a category label to go between categories.'),
categoryLabelBarOrder = AttrMapValue(OneOf('first','last','auto'), desc='where any label bar should appear first/last'),
barRecord = AttrMapValue(None, desc='callable(bar,label=labelText,value=value,**kwds) to record bar information', advancedUsage=1),
)
def makeSwatchSample(self, rowNo, x, y, width, height):
baseStyle = self.bars
styleIdx = rowNo % len(baseStyle)
style = baseStyle[styleIdx]
strokeColor = getattr(style, 'strokeColor', getattr(baseStyle,'strokeColor',None))
fillColor = getattr(style, 'fillColor', getattr(baseStyle,'fillColor',None))
strokeDashArray = getattr(style, 'strokeDashArray', getattr(baseStyle,'strokeDashArray',None))
strokeWidth = getattr(style, 'strokeWidth', getattr(style, 'strokeWidth',None))
swatchMarker = getattr(style, 'swatchMarker', getattr(baseStyle, 'swatchMarker',None))
if swatchMarker:
return uSymbol2Symbol(swatchMarker,x+width/2.,y+height/2.,fillColor)
return Rect(x,y,width,height,strokeWidth=strokeWidth,strokeColor=strokeColor,
strokeDashArray=strokeDashArray,fillColor=fillColor)
def getSeriesName(self,i,default=None):
'''return series name i or default'''
return _objStr(getattr(self.bars[i],'name',default))
def __init__(self):
assert self.__class__.__name__ not in ('BarChart','BarChart3D'), 'Abstract Class %s Instantiated' % self.__class__.__name__
if self._flipXY:
self.categoryAxis = YCategoryAxis()
self.valueAxis = XValueAxis()
else:
self.categoryAxis = XCategoryAxis()
self.valueAxis = YValueAxis()
PlotArea.__init__(self)
self.barSpacing = 0
self.reversePlotOrder = 0
# this defines two series of 3 points. Just an example.
self.data = [(100,110,120,130),
(70, 80, 85, 90)]
# control bar spacing. is useAbsolute = 1 then
# the next parameters are in points; otherwise
# they are 'proportions' and are normalized to
# fit the available space. Half a barSpacing
# is allocated at the beginning and end of the
# chart.
self.useAbsolute = 0 #- not done yet
self.barWidth = 10
self.groupSpacing = 5
self.barSpacing = 0
self.barLabels = TypedPropertyCollection(BarChartLabel)
self.barLabels.boxAnchor = 'c'
self.barLabels.textAnchor = 'middle'
self.barLabelFormat = None
self.barLabelArray = None
# this says whether the origin is inside or outside
# the bar - +10 means put the origin ten points
# above the tip of the bar if value > 0, or ten
# points inside if bar value < 0. This is different
# to label dx/dy which are not dependent on the
# sign of the data.
self.barLabels.nudge = 0
# if you have multiple series, by default they butt
# together.
# we really need some well-designed default lists of
# colors e.g. from Tufte. These will be used in a
# cycle to set the fill color of each series.
self.bars = TypedPropertyCollection(BarChartProperties)
self.bars.strokeWidth = 1
self.bars.strokeColor = colors.black
self.bars.strokeDashArray = None
self.bars[0].fillColor = colors.red
self.bars[1].fillColor = colors.green
self.bars[2].fillColor = colors.blue
self.naLabel = None #NA_Label()
def demo(self):
"""Shows basic use of a bar chart"""
if self.__class__.__name__=='BarChart':
raise NotImplementedError, 'Abstract Class BarChart has no demo'
drawing = Drawing(200, 100)
bc = self.__class__()
drawing.add(bc)
return drawing
def _getConfigureData(self):
cA = self.categoryAxis
data = self.data
if cA.style not in ('parallel','parallel_3d'):
_data = data
data = max(map(len,_data))*[0]
for d in _data:
for i in xrange(len(d)):
data[i] = data[i] + (d[i] or 0)
data = list(_data) + [data]
self._configureData = data
def _getMinMax(self):
'''Attempt to return the data range'''
self._getConfigureData()
self.valueAxis._setRange(self._configureData)
return self.valueAxis._valueMin, self.valueAxis._valueMax
def _drawBegin(self,org,length):
'''Position and configure value axis, return crossing value'''
vA = self.valueAxis
vA.setPosition(self.x, self.y, length)
self._getConfigureData()
vA.configure(self._configureData)
# if zero is in chart, put the other axis there, otherwise use low
crossesAt = vA.scale(0)
if crossesAt > org+length or crossesAt<org:
crossesAt = org
return crossesAt
def _drawFinish(self):
'''finalize the drawing of a barchart'''
cA = self.categoryAxis
vA = self.valueAxis
cA.configure(self._configureData)
self.calcBarPositions()
g = Group()
g.add(self.makeBackground())
cAdgl = getattr(cA,'drawGridLast',False)
vAdgl = getattr(vA,'drawGridLast',False)
if not cAdgl: cA.makeGrid(g,parent=self, dim=vA.getGridDims)
if not vAdgl: vA.makeGrid(g,parent=self, dim=cA.getGridDims)
g.add(self.makeBars())
g.add(cA)
g.add(vA)
if cAdgl: cA.makeGrid(g,parent=self, dim=vA.getGridDims)
if vAdgl: vA.makeGrid(g,parent=self, dim=cA.getGridDims)
for a in getattr(self,'annotations',()): g.add(a(self,cA.scale,vA.scale))
del self._configureData
return g
def calcBarPositions(self):
"""Works out where they go. default vertical.
Sets an attribute _barPositions which is a list of
lists of (x, y, width, height) matching the data.
"""
flipXY = self._flipXY
if flipXY:
org = self.y
else:
org = self.x
cA = self.categoryAxis
cScale = cA.scale
data = self.data
seriesCount = self._seriesCount = len(data)
self._rowLength = rowLength = max(map(len,data))
wG = self.groupSpacing
barSpacing = self.barSpacing
barWidth = self.barWidth
clbs = getattr(self,'categoryLabelBarSize',0)
clbo = getattr(self,'categoryLabelBarOrder','auto')
if clbo=='auto': clbo = flipXY and 'last' or 'first'
clbo = clbo=='first'
style = cA.style
if style=='parallel':
wB = seriesCount*barWidth
wS = (seriesCount-1)*barSpacing
bGapB = barWidth
bGapS = barSpacing
else:
accum = rowLength*[0]
wB = barWidth
wS = bGapB = bGapS = 0
self._groupWidth = groupWidth = wG+wB+wS
useAbsolute = self.useAbsolute
if useAbsolute:
if not isinstance(useAbsolute,str):
useAbsolute = 7 #all three are fixed
else:
useAbsolute = 0 + 1*('b' in useAbsolute)+2*('g' in useAbsolute)+4*('s' in useAbsolute)
else:
useAbsolute = 0
aW0 = float(cScale(0)[1])
aW = aW0 - clbs
if useAbsolute==0: #case 0 all are free
self._normFactor = fB = fG = fS = aW/groupWidth
elif useAbsolute==7: #all fixed
fB = fG = fS = 1.0
_cscale = cA._scale
elif useAbsolute==1: #case 1 barWidth is fixed
fB = 1.0
fG = fS = (aW-wB)/(wG+wS)
elif useAbsolute==2: #groupspacing is fixed
fG=1.0
fB = fS = (aW-wG)/(wB+wS)
elif useAbsolute==3: #groupspacing & barwidth are fixed
fB = fG = 1.0
fS = (aW-wG-wB)/wS
elif useAbsolute==4: #barspacing is fixed
fS=1.0
fG = fB = (aW-wS)/(wG+wB)
elif useAbsolute==5: #barspacing & barWidth are fixed
fS = fB = 1.0
fG = (aW-wB-wS)/wG
elif useAbsolute==6: #barspacing & groupspacing are fixed
fS = fG = 1
fB = (aW-wS-wG)/wB
self._normFactorB = fB
self._normFactorG = fG
self._normFactorS = fS
# 'Baseline' correction...
vA = self.valueAxis
vScale = vA.scale
vm, vM = vA._valueMin, vA._valueMax
if vm <= 0 <= vM:
baseLine = vScale(0)
elif 0 < vm:
baseLine = vScale(vm)
elif vM < 0:
baseLine = vScale(vM)
self._baseLine = baseLine
nC = max(map(len,data))
width = barWidth*fB
offs = 0.5*wG*fG
bGap = bGapB*fB+bGapS*fS
if clbs:
if clbo: #the lable bar comes first
lbpf = (offs+clbs/6.0)/aW0
offs += clbs
else:
lbpf = (offs+wB*fB+wS*fS+clbs/6.0)/aW0
cA.labels.labelPosFrac = lbpf
self._barPositions = []
reversePlotOrder = self.reversePlotOrder
for rowNo in xrange(seriesCount):
barRow = []
if reversePlotOrder:
xVal = seriesCount-1 - rowNo
else:
xVal = rowNo
xVal = offs + xVal*bGap
row = data[rowNo]
for colNo in xrange(nC):
datum = row[colNo]
# Ufff...
if useAbsolute==7:
x = groupWidth*_cscale(colNo) + xVal + org
else:
(g, _) = cScale(colNo)
x = g + xVal
if datum is None:
height = None
y = baseLine
else:
if style not in ('parallel','parallel_3d'):
y = vScale(accum[colNo])
if y<baseLine: y = baseLine
accum[colNo] = accum[colNo] + datum
datum = accum[colNo]
else:
y = baseLine
height = vScale(datum) - y
if -1e-8<height<=1e-8:
height = 1e-8
if datum<-1e-8: height = -1e-8
barRow.append(flipXY and (y,x,height,width) or (x,y,width,height))
self._barPositions.append(barRow)
def _getLabelText(self, rowNo, colNo):
'''return formatted label text'''
labelFmt = self.barLabelFormat
if labelFmt is None:
labelText = None
elif labelFmt == 'values':
labelText = self.barLabelArray[rowNo][colNo]
elif type(labelFmt) is str:
labelText = labelFmt % self.data[rowNo][colNo]
elif hasattr(labelFmt,'__call__'):
labelText = labelFmt(self.data[rowNo][colNo])
else:
msg = "Unknown formatter type %s, expected string or function" % labelFmt
raise Exception, msg
return labelText
def _labelXY(self,label,x,y,width,height):
'Compute x, y for a label'
nudge = label.nudge
bt = getattr(label,'boxTarget','normal')
anti = bt=='anti'
if anti: nudge = -nudge
pm = value = height
if anti: value = 0
a = x + 0.5*width
nudge = (height>=0 and 1 or -1)*nudge
if bt=='hi':
if value>=0:
b = y + value + nudge
else:
b = y - nudge
pm = -pm
elif bt=='lo':
if value<=0:
b = y + value + nudge
else:
b = y - nudge
pm = -pm
else:
b = y + value + nudge
label._pmv = pm #the plus minus val
return a,b,pm
def _addBarLabel(self, g, rowNo, colNo, x, y, width, height):
text = self._getLabelText(rowNo,colNo)
if text:
self._addLabel(text, self.barLabels[(rowNo, colNo)], g, rowNo, colNo, x, y, width, height)
def _addNABarLabel(self, g, rowNo, colNo, x, y, width, height):
na = self.naLabel
if na and na.text:
na = copy.copy(na)
v = self.valueAxis._valueMax<=0 and -1e-8 or 1e-8
if width is None: width = v
if height is None: height = v
self._addLabel(na.text, na, g, rowNo, colNo, x, y, width, height)
def _addLabel(self, text, label, g, rowNo, colNo, x, y, width, height):
if label.visible:
labelWidth = stringWidth(text, label.fontName, label.fontSize)
flipXY = self._flipXY
if flipXY:
y0, x0, pm = self._labelXY(label,y,x,height,width)
else:
x0, y0, pm = self._labelXY(label,x,y,width,height)
fixedEnd = getattr(label,'fixedEnd', None)
if fixedEnd is not None:
v = fixedEnd._getValue(self,pm)
x00, y00 = x0, y0
if flipXY:
x0 = v
else:
y0 = v
else:
if flipXY:
x00 = x0
y00 = y+height/2.0
else:
x00 = x+width/2.0
y00 = y0
fixedStart = getattr(label,'fixedStart', None)
if fixedStart is not None:
v = fixedStart._getValue(self,pm)
if flipXY:
x00 = v
else:
y00 = v
if pm<0:
if flipXY:
dx = -2*label.dx
dy = 0
else:
dy = -2*label.dy
dx = 0
else:
dy = dx = 0
label.setOrigin(x0+dx, y0+dy)
label.setText(text)
sC, sW = label.lineStrokeColor, label.lineStrokeWidth
if sC and sW: g.insert(0,Line(x00,y00,x0,y0, strokeColor=sC, strokeWidth=sW))
g.add(label)
alx = getattr(self,'barLabelCallOut',None)
if alx:
label._callOutInfo = (self,g,rowNo,colNo,x,y,width,height,x00,y00,x0,y0)
alx(label)
del label._callOutInfo
def _makeBar(self,g,x,y,width,height,rowNo,style):
r = Rect(x, y, width, height)
r.strokeWidth = style.strokeWidth
r.fillColor = style.fillColor
r.strokeColor = style.strokeColor
if style.strokeDashArray:
r.strokeDashArray = style.strokeDashArray
g.add(r)
def _makeBars(self,g,lg):
lenData = len(self.data)
bars = self.bars
br = getattr(self,'barRecord',None)
for rowNo in xrange(lenData):
row = self._barPositions[rowNo]
styleCount = len(bars)
styleIdx = rowNo % styleCount
rowStyle = bars[styleIdx]
for colNo in range(len(row)):
style = (styleIdx,colNo) in bars and bars[(styleIdx,colNo)] or rowStyle
(x, y, width, height) = row[colNo]
if None in (width,height):
self._addNABarLabel(lg,rowNo,colNo,x,y,width,height)
continue
# Draw a rectangular symbol for each data item,
# or a normal colored rectangle.
symbol = None
if hasattr(style, 'symbol'):
symbol = copy.deepcopy(style.symbol)
elif hasattr(self.bars, 'symbol'):
symbol = self.bars.symbol
if symbol:
symbol.x = x
symbol.y = y
symbol.width = width
symbol.height = height
g.add(symbol)
elif abs(width)>1e-7 and abs(height)>=1e-7 and (style.fillColor is not None or style.strokeColor is not None):
self._makeBar(g,x,y,width,height,rowNo,style)
if br: br(g.contents[-1],label=self._getLabelText(rowNo,colNo),value=self.data[rowNo][colNo],rowNo=rowNo,colNo=colNo)
self._addBarLabel(lg,rowNo,colNo,x,y,width,height)
def _computeLabelPosition(self, text, label, rowNo, colNo, x, y, width, height):
if label.visible:
labelWidth = stringWidth(text, label.fontName, label.fontSize)
flipXY = self._flipXY
if flipXY:
y0, x0, pm = self._labelXY(label,y,x,height,width)
else:
x0, y0, pm = self._labelXY(label,x,y,width,height)
fixedEnd = getattr(label,'fixedEnd', None)
if fixedEnd is not None:
v = fixedEnd._getValue(self,pm)
x00, y00 = x0, y0
if flipXY:
x0 = v
else:
y0 = v
else:
if flipXY:
x00 = x0
y00 = y+height/2.0
else:
x00 = x+width/2.0
y00 = y0
fixedStart = getattr(label,'fixedStart', None)
if fixedStart is not None:
v = fixedStart._getValue(self,pm)
if flipXY:
x00 = v
else:
y00 = v
if pm<0:
if flipXY:
dx = -2*label.dx
dy = 0
else:
dy = -2*label.dy
dx = 0
else:
dy = dx = 0
label.setOrigin(x0+dx, y0+dy)
label.setText(text)
return pm,label.getBounds()
def _computeSimpleBarLabelPositions(self):
"""Information function, can be called by charts which want to mess with labels"""
cA, vA = self.categoryAxis, self.valueAxis
if vA: ovAjA, vA.joinAxis = vA.joinAxis, cA
if cA: ocAjA, cA.joinAxis = cA.joinAxis, vA
if self._flipXY:
cA.setPosition(self._drawBegin(self.x,self.width), self.y, self.height)
else:
cA.setPosition(self.x, self._drawBegin(self.y,self.height), self.width)
cA.configure(self._configureData)
self.calcBarPositions()
lenData = len(self.data)
bars = self.bars
R = [].append
for rowNo in xrange(lenData):
row = self._barPositions[rowNo]
C = [].append
for colNo in range(len(row)):
x, y, width, height = row[colNo]
if None in (width,height):
na = self.naLabel
if na and na.text:
na = copy.copy(na)
v = self.valueAxis._valueMax<=0 and -1e-8 or 1e-8
if width is None: width = v
if height is None: height = v
C(self._computeLabelPosition(na.text, na, rowNo, colNo, x, y, width, height))
else:
C(None)
else:
text = self._getLabelText(rowNo,colNo)
if text:
C(self._computeLabelPosition(text, self.barLabels[(rowNo, colNo)], rowNo, colNo, x, y, width, height))
else:
C(None)
R(C.__self__)
return R.__self__
def makeBars(self):
g = Group()
lg = Group()
self._makeBars(g,lg)
g.add(lg)
return g
def _desiredCategoryAxisLength(self):
'''for dynamically computing the desired category axis length'''
style = self.categoryAxis.style
data = self.data
n = len(data)
m = max(map(len,data))
if style=='parallel':
groupWidth = (n-1)*self.barSpacing+n*self.barWidth
else:
groupWidth = self.barWidth
return m*(self.groupSpacing+groupWidth)
def draw(self):
cA, vA = self.categoryAxis, self.valueAxis
if vA: ovAjA, vA.joinAxis = vA.joinAxis, cA
if cA: ocAjA, cA.joinAxis = cA.joinAxis, vA
if self._flipXY:
cA.setPosition(self._drawBegin(self.x,self.width), self.y, self.height)
else:
cA.setPosition(self.x, self._drawBegin(self.y,self.height), self.width)
return self._drawFinish()
class VerticalBarChart(BarChart):
"Vertical bar chart with multiple side-by-side bars."
_flipXY = 0
class HorizontalBarChart(BarChart):
"Horizontal bar chart with multiple side-by-side bars."
_flipXY = 1
class _FakeGroup:
def __init__(self, cmp=None):
self._data = []
self._cmp = cmp
def add(self,what):
self._data.append(what)
def value(self):
return self._data
def sort(self):
self._data.sort(self._cmp)
class BarChart3D(BarChart):
_attrMap = AttrMap(BASE=BarChart,
theta_x = AttrMapValue(isNumber, desc='dx/dz'),
theta_y = AttrMapValue(isNumber, desc='dy/dz'),
zDepth = AttrMapValue(isNumber, desc='depth of an individual series'),
zSpace = AttrMapValue(isNumber, desc='z gap around series'),
)
theta_x = .5
theta_y = .5
zDepth = None
zSpace = None
def calcBarPositions(self):
BarChart.calcBarPositions(self)
seriesCount = self._seriesCount
zDepth = self.zDepth
if zDepth is None: zDepth = self.barWidth
zSpace = self.zSpace
if zSpace is None: zSpace = self.barSpacing
if self.categoryAxis.style=='parallel_3d':
_3d_depth = seriesCount*zDepth+(seriesCount+1)*zSpace
else:
_3d_depth = zDepth + 2*zSpace
_3d_depth *= self._normFactor
self._3d_dx = self.theta_x*_3d_depth
self._3d_dy = self.theta_y*_3d_depth
def _calc_z0(self,rowNo):
zDepth = self.zDepth
if zDepth is None: zDepth = self.barWidth
zSpace = self.zSpace
if zSpace is None: zSpace = self.barSpacing
if self.categoryAxis.style=='parallel_3d':
z0 = self._normFactor*(rowNo*(zDepth+zSpace)+zSpace)
else:
z0 = self._normFactor*zSpace
return z0
def _makeBar(self,g,x,y,width,height,rowNo,style):
zDepth = self.zDepth
if zDepth is None: zDepth = self.barWidth
zSpace = self.zSpace
if zSpace is None: zSpace = self.barSpacing
z0 = self._calc_z0(rowNo)
z1 = z0 + zDepth*self._normFactor
if width<0:
x += width
width = -width
x += z0*self.theta_x
y += z0*self.theta_y
if self._flipXY:
y += zSpace
else:
x += zSpace
g.add((0,z0,z1,x,y,width,height,rowNo,style))
def _addBarLabel(self, g, rowNo, colNo, x, y, width, height):
z0 = self._calc_z0(rowNo)
zSpace = self.zSpace
if zSpace is None: zSpace = self.barSpacing
z1 = z0
x += z0*self.theta_x
y += z0*self.theta_y
if self._flipXY:
y += zSpace
else:
x += zSpace
g.add((1,z0,z1,x,y,width,height,rowNo,colNo))
def makeBars(self):
from utils3d import _draw_3d_bar
fg = _FakeGroup(cmp=self._cmpZ)
self._makeBars(fg,fg)
fg.sort()
g = Group()
theta_x = self.theta_x
theta_y = self.theta_y
if self.categoryAxis.style == 'stacked':
fg_value=fg.value().reverse()
for t in fg.value():
if t[0]==0:
z0,z1,x,y,width,height,rowNo,style = t[1:]
dz = z1 - z0
_draw_3d_bar(g, x, x+width, y, y+height, dz*theta_x, dz*theta_y,
fillColor=style.fillColor, fillColorShaded=None,
strokeColor=style.strokeColor, strokeWidth=style.strokeWidth,
shading=0.45)
for t in fg.value():
if t[0]==1:
z0,z1,x,y,width,height,rowNo,colNo = t[1:]
BarChart._addBarLabel(self,g,rowNo,colNo,x,y,width,height)
return g
class VerticalBarChart3D(BarChart3D,VerticalBarChart):
_cmpZ=lambda self,a,b:cmp((-a[1],a[3],a[0],-a[4]),(-b[1],b[3],b[0],-b[4]))
class HorizontalBarChart3D(BarChart3D,HorizontalBarChart):
_cmpZ = lambda self,a,b: cmp((-a[1],a[4],a[0],-a[3]),(-b[1],b[4],b[0],-b[3])) #t, z0, z1, x, y = a[:5]
# Vertical samples.
def sampleV0a():
"A slightly pathologic bar chart with only TWO data items."
drawing = Drawing(400, 200)
data = [(13, 20)]
bc = VerticalBarChart()
bc.x = 50
bc.y = 50
bc.height = 125
bc.width = 300
bc.data = data
bc.strokeColor = colors.black
bc.valueAxis.valueMin = 0
bc.valueAxis.valueMax = 60
bc.valueAxis.valueStep = 15
bc.categoryAxis.labels.boxAnchor = 'ne'
bc.categoryAxis.labels.dx = 8
bc.categoryAxis.labels.dy = -2
bc.categoryAxis.labels.angle = 30
bc.categoryAxis.categoryNames = ['Ying', 'Yang']
drawing.add(bc)
return drawing
def sampleV0b():
"A pathologic bar chart with only ONE data item."
drawing = Drawing(400, 200)
data = [(42,)]
bc = VerticalBarChart()
bc.x = 50
bc.y = 50
bc.height = 125
bc.width = 300
bc.data = data
bc.strokeColor = colors.black
bc.valueAxis.valueMin = 0
bc.valueAxis.valueMax = 50
bc.valueAxis.valueStep = 15
bc.categoryAxis.labels.boxAnchor = 'ne'
bc.categoryAxis.labels.dx = 8
bc.categoryAxis.labels.dy = -2
bc.categoryAxis.labels.angle = 30
bc.categoryAxis.categoryNames = ['Jan-99']
drawing.add(bc)
return drawing
def sampleV0c():
"A really pathologic bar chart with NO data items at all!"
drawing = Drawing(400, 200)
data = [()]
bc = VerticalBarChart()
bc.x = 50
bc.y = 50
bc.height = 125
bc.width = 300
bc.data = data
bc.strokeColor = colors.black
bc.valueAxis.valueMin = 0
bc.valueAxis.valueMax = 60
bc.valueAxis.valueStep = 15
bc.categoryAxis.labels.boxAnchor = 'ne'
bc.categoryAxis.labels.dx = 8
bc.categoryAxis.labels.dy = -2
bc.categoryAxis.categoryNames = []
drawing.add(bc)
return drawing
def sampleV1():
"Sample of multi-series bar chart."
drawing = Drawing(400, 200)
data = [
(13, 5, 20, 22, 37, 45, 19, 4),
(14, 6, 21, 23, 38, 46, 20, 5)
]
bc = VerticalBarChart()
bc.x = 50
bc.y = 50
bc.height = 125
bc.width = 300
bc.data = data
bc.strokeColor = colors.black
bc.valueAxis.valueMin = 0
bc.valueAxis.valueMax = 60
bc.valueAxis.valueStep = 15
bc.categoryAxis.labels.boxAnchor = 'ne'
bc.categoryAxis.labels.dx = 8
bc.categoryAxis.labels.dy = -2
bc.categoryAxis.labels.angle = 30
catNames = 'Jan Feb Mar Apr May Jun Jul Aug'.split(' ')
catNames = map(lambda n:n+'-99', catNames)
bc.categoryAxis.categoryNames = catNames
drawing.add(bc)
return drawing
def sampleV2a():
"Sample of multi-series bar chart."
data = [(2.4, -5.7, 2, 5, 9.2),
(0.6, -4.9, -3, 4, 6.8)
]
labels = ("Q3 2000", "Year to Date", "12 months",
"Annualised\n3 years", "Since 07.10.99")
drawing = Drawing(400, 200)
bc = VerticalBarChart()
bc.x = 50
bc.y = 50
bc.height = 120
bc.width = 300
bc.data = data
bc.barSpacing = 0
bc.groupSpacing = 10
bc.barWidth = 10
bc.valueAxis.valueMin = -15
bc.valueAxis.valueMax = +15
bc.valueAxis.valueStep = 5
bc.valueAxis.labels.fontName = 'Helvetica'
bc.valueAxis.labels.fontSize = 8
bc.valueAxis.labels.boxAnchor = 'n' # irrelevant (becomes 'c')
bc.valueAxis.labels.textAnchor = 'middle'
bc.categoryAxis.categoryNames = labels
bc.categoryAxis.labels.fontName = 'Helvetica'
bc.categoryAxis.labels.fontSize = 8
bc.categoryAxis.labels.dy = -60
drawing.add(bc)
return drawing
def sampleV2b():
"Sample of multi-series bar chart."
data = [(2.4, -5.7, 2, 5, 9.2),
(0.6, -4.9, -3, 4, 6.8)
]
labels = ("Q3 2000", "Year to Date", "12 months",
"Annualised\n3 years", "Since 07.10.99")
drawing = Drawing(400, 200)
bc = VerticalBarChart()
bc.x = 50
bc.y = 50
bc.height = 120
bc.width = 300
bc.data = data
bc.barSpacing = 5
bc.groupSpacing = 10
bc.barWidth = 10
bc.valueAxis.valueMin = -15
bc.valueAxis.valueMax = +15
bc.valueAxis.valueStep = 5
bc.valueAxis.labels.fontName = 'Helvetica'
bc.valueAxis.labels.fontSize = 8
bc.valueAxis.labels.boxAnchor = 'n' # irrelevant (becomes 'c')
bc.valueAxis.labels.textAnchor = 'middle'
bc.categoryAxis.categoryNames = labels
bc.categoryAxis.labels.fontName = 'Helvetica'
bc.categoryAxis.labels.fontSize = 8
bc.categoryAxis.labels.dy = -60
drawing.add(bc)
return drawing
def sampleV2c():
"Sample of multi-series bar chart."
data = [(2.4, -5.7, 2, 5, 9.99),
(0.6, -4.9, -3, 4, 9.99)
]
labels = ("Q3 2000", "Year to Date", "12 months",
"Annualised\n3 years", "Since 07.10.99")
drawing = Drawing(400, 200)
bc = VerticalBarChart()
bc.x = 50
bc.y = 50
bc.height = 120
bc.width = 300
bc.data = data
bc.barSpacing = 2
bc.groupSpacing = 10
bc.barWidth = 10
bc.valueAxis.valueMin = -15
bc.valueAxis.valueMax = +15
bc.valueAxis.valueStep = 5
bc.valueAxis.labels.fontName = 'Helvetica'
bc.valueAxis.labels.fontSize = 8
bc.categoryAxis.categoryNames = labels
bc.categoryAxis.labels.fontName = 'Helvetica'
bc.categoryAxis.labels.fontSize = 8
bc.valueAxis.labels.boxAnchor = 'n'
bc.valueAxis.labels.textAnchor = 'middle'
bc.categoryAxis.labels.dy = -60
bc.barLabels.nudge = 10
bc.barLabelFormat = '%0.2f'
bc.barLabels.dx = 0
bc.barLabels.dy = 0
bc.barLabels.boxAnchor = 'n' # irrelevant (becomes 'c')
bc.barLabels.fontName = 'Helvetica'
bc.barLabels.fontSize = 6
drawing.add(bc)
return drawing
def sampleV3():
"Faked horizontal bar chart using a vertical real one (deprecated)."
names = ("UK Equities", "US Equities", "European Equities", "Japanese Equities",
"Pacific (ex Japan) Equities", "Emerging Markets Equities",
"UK Bonds", "Overseas Bonds", "UK Index-Linked", "Cash")
series1 = (-1.5, 0.3, 0.5, 1.0, 0.8, 0.7, 0.4, 0.1, 1.0, 0.3)
series2 = (0.0, 0.33, 0.55, 1.1, 0.88, 0.77, 0.44, 0.11, 1.10, 0.33)
assert len(names) == len(series1), "bad data"
assert len(names) == len(series2), "bad data"
drawing = Drawing(400, 200)
bc = VerticalBarChart()
bc.x = 0
bc.y = 0
bc.height = 100
bc.width = 150
bc.data = (series1,)
bc.bars.fillColor = colors.green
bc.barLabelFormat = '%0.2f'
bc.barLabels.dx = 0
bc.barLabels.dy = 0
bc.barLabels.boxAnchor = 'w' # irrelevant (becomes 'c')
bc.barLabels.angle = 90
bc.barLabels.fontName = 'Helvetica'
bc.barLabels.fontSize = 6
bc.barLabels.nudge = 10
bc.valueAxis.visible = 0
bc.valueAxis.valueMin = -2
bc.valueAxis.valueMax = +2
bc.valueAxis.valueStep = 1
bc.categoryAxis.tickUp = 0
bc.categoryAxis.tickDown = 0
bc.categoryAxis.categoryNames = names
bc.categoryAxis.labels.angle = 90
bc.categoryAxis.labels.boxAnchor = 'w'
bc.categoryAxis.labels.dx = 0
bc.categoryAxis.labels.dy = -125
bc.categoryAxis.labels.fontName = 'Helvetica'
bc.categoryAxis.labels.fontSize = 6
g = Group(bc)
g.translate(100, 175)
g.rotate(-90)
drawing.add(g)
return drawing
def sampleV4a():
"A bar chart showing value axis region starting at *exactly* zero."
drawing = Drawing(400, 200)
data = [(13, 20)]
bc = VerticalBarChart()
bc.x = 50
bc.y = 50
bc.height = 125
bc.width = 300
bc.data = data
bc.strokeColor = colors.black
bc.valueAxis.valueMin = 0
bc.valueAxis.valueMax = 60
bc.valueAxis.valueStep = 15
bc.categoryAxis.labels.boxAnchor = 'n'
bc.categoryAxis.labels.dy = -5
bc.categoryAxis.categoryNames = ['Ying', 'Yang']
drawing.add(bc)
return drawing
def sampleV4b():
"A bar chart showing value axis region starting *below* zero."
drawing = Drawing(400, 200)
data = [(13, 20)]
bc = VerticalBarChart()
bc.x = 50
bc.y = 50
bc.height = 125
bc.width = 300
bc.data = data
bc.strokeColor = colors.black
bc.valueAxis.valueMin = -10
bc.valueAxis.valueMax = 60
bc.valueAxis.valueStep = 15
bc.categoryAxis.labels.boxAnchor = 'n'
bc.categoryAxis.labels.dy = -5
bc.categoryAxis.categoryNames = ['Ying', 'Yang']
drawing.add(bc)
return drawing
def sampleV4c():
"A bar chart showing value axis region staring *above* zero."
drawing = Drawing(400, 200)
data = [(13, 20)]
bc = VerticalBarChart()
bc.x = 50
bc.y = 50
bc.height = 125
bc.width = 300
bc.data = data
bc.strokeColor = colors.black
bc.valueAxis.valueMin = 10
bc.valueAxis.valueMax = 60
bc.valueAxis.valueStep = 15
bc.categoryAxis.labels.boxAnchor = 'n'
bc.categoryAxis.labels.dy = -5
bc.categoryAxis.categoryNames = ['Ying', 'Yang']
drawing.add(bc)
return drawing
def sampleV4d():
"A bar chart showing value axis region entirely *below* zero."
drawing = Drawing(400, 200)
data = [(-13, -20)]
bc = VerticalBarChart()
bc.x = 50
bc.y = 50
bc.height = 125
bc.width = 300
bc.data = data
bc.strokeColor = colors.black
bc.valueAxis.valueMin = -30
bc.valueAxis.valueMax = -10
bc.valueAxis.valueStep = 15
bc.categoryAxis.labels.boxAnchor = 'n'
bc.categoryAxis.labels.dy = -5
bc.categoryAxis.categoryNames = ['Ying', 'Yang']
drawing.add(bc)
return drawing
###
##dataSample5 = [(10, 20), (20, 30), (30, 40), (40, 50), (50, 60)]
##dataSample5 = [(10, 60), (20, 50), (30, 40), (40, 30), (50, 20)]
dataSample5 = [(10, 60), (20, 50), (30, 40), (40, 30)]
def sampleV5a():
"A simple bar chart with no expressed spacing attributes."
drawing = Drawing(400, 200)
data = dataSample5
bc = VerticalBarChart()
bc.x = 50
bc.y = 50
bc.height = 125
bc.width = 300
bc.data = data
bc.strokeColor = colors.black
bc.valueAxis.valueMin = 0
bc.valueAxis.valueMax = 60
bc.valueAxis.valueStep = 15
bc.categoryAxis.labels.boxAnchor = 'n'
bc.categoryAxis.labels.dy = -5
bc.categoryAxis.categoryNames = ['Ying', 'Yang']
drawing.add(bc)
return drawing
def sampleV5b():
"A simple bar chart with proportional spacing."
drawing = Drawing(400, 200)
data = dataSample5
bc = VerticalBarChart()
bc.x = 50
bc.y = 50
bc.height = 125
bc.width = 300
bc.data = data
bc.strokeColor = colors.black
bc.useAbsolute = 0
bc.barWidth = 40
bc.groupSpacing = 20
bc.barSpacing = 10
bc.valueAxis.valueMin = 0
bc.valueAxis.valueMax = 60
bc.valueAxis.valueStep = 15
bc.categoryAxis.labels.boxAnchor = 'n'
bc.categoryAxis.labels.dy = -5
bc.categoryAxis.categoryNames = ['Ying', 'Yang']
drawing.add(bc)
return drawing
def sampleV5c1():
"Make sampe simple bar chart but with absolute spacing."
drawing = Drawing(400, 200)
data = dataSample5
bc = VerticalBarChart()
bc.x = 50
bc.y = 50
bc.height = 125
bc.width = 300
bc.data = data
bc.strokeColor = colors.black
bc.useAbsolute = 1
bc.barWidth = 40
bc.groupSpacing = 0
bc.barSpacing = 0
bc.valueAxis.valueMin = 0
bc.valueAxis.valueMax = 60
bc.valueAxis.valueStep = 15
bc.categoryAxis.labels.boxAnchor = 'n'
bc.categoryAxis.labels.dy = -5
bc.categoryAxis.categoryNames = ['Ying', 'Yang']
drawing.add(bc)
return drawing
def sampleV5c2():
"Make sampe simple bar chart but with absolute spacing."
drawing = Drawing(400, 200)
data = dataSample5
bc = VerticalBarChart()
bc.x = 50
bc.y = 50
bc.height = 125
bc.width = 300
bc.data = data
bc.strokeColor = colors.black
bc.useAbsolute = 1
bc.barWidth = 40
bc.groupSpacing = 20
bc.barSpacing = 0
bc.valueAxis.valueMin = 0
bc.valueAxis.valueMax = 60
bc.valueAxis.valueStep = 15
bc.categoryAxis.labels.boxAnchor = 'n'
bc.categoryAxis.labels.dy = -5
bc.categoryAxis.categoryNames = ['Ying', 'Yang']
drawing.add(bc)
return drawing
def sampleV5c3():
"Make sampe simple bar chart but with absolute spacing."
drawing = Drawing(400, 200)
data = dataSample5
bc = VerticalBarChart()
bc.x = 50
bc.y = 50
bc.height = 125
bc.width = 300
bc.data = data
bc.strokeColor = colors.black
bc.useAbsolute = 1
bc.barWidth = 40
bc.groupSpacing = 0
bc.barSpacing = 10
bc.valueAxis.valueMin = 0
bc.valueAxis.valueMax = 60
bc.valueAxis.valueStep = 15
bc.categoryAxis.labels.boxAnchor = 'n'
bc.categoryAxis.labels.dy = -5
bc.categoryAxis.categoryNames = ['Ying', 'Yang']
drawing.add(bc)
return drawing
def sampleV5c4():
"Make sampe simple bar chart but with absolute spacing."
drawing = Drawing(400, 200)
data = dataSample5
bc = VerticalBarChart()
bc.x = 50
bc.y = 50
bc.height = 125
bc.width = 300
bc.data = data
bc.strokeColor = colors.black
bc.useAbsolute = 1
bc.barWidth = 40
bc.groupSpacing = 20
bc.barSpacing = 10
bc.valueAxis.valueMin = 0
bc.valueAxis.valueMax = 60
bc.valueAxis.valueStep = 15
bc.categoryAxis.labels.boxAnchor = 'n'
bc.categoryAxis.labels.dy = -5
bc.categoryAxis.categoryNames = ['Ying', 'Yang']
drawing.add(bc)
return drawing
# Horizontal samples
def sampleH0a():
"Make a slightly pathologic bar chart with only TWO data items."
drawing = Drawing(400, 200)
data = [(13, 20)]
bc = HorizontalBarChart()
bc.x = 50
bc.y = 50
bc.height = 125
bc.width = 300
bc.data = data
bc.strokeColor = colors.black
bc.valueAxis.valueMin = 0
bc.valueAxis.valueMax = 60
bc.valueAxis.valueStep = 15
bc.categoryAxis.labels.boxAnchor = 'se'
bc.categoryAxis.labels.angle = 30
bc.categoryAxis.categoryNames = ['Ying', 'Yang']
drawing.add(bc)
return drawing
def sampleH0b():
"Make a pathologic bar chart with only ONE data item."
drawing = Drawing(400, 200)
data = [(42,)]
bc = HorizontalBarChart()
bc.x = 50
bc.y = 50
bc.height = 125
bc.width = 300
bc.data = data
bc.strokeColor = colors.black
bc.valueAxis.valueMin = 0
bc.valueAxis.valueMax = 50
bc.valueAxis.valueStep = 15
bc.categoryAxis.labels.boxAnchor = 'se'
bc.categoryAxis.labels.angle = 30
bc.categoryAxis.categoryNames = ['Jan-99']
drawing.add(bc)
return drawing
def sampleH0c():
"Make a really pathologic bar chart with NO data items at all!"
drawing = Drawing(400, 200)
data = [()]
bc = HorizontalBarChart()
bc.x = 50
bc.y = 50
bc.height = 125
bc.width = 300
bc.data = data
bc.strokeColor = colors.black
bc.valueAxis.valueMin = 0
bc.valueAxis.valueMax = 60
bc.valueAxis.valueStep = 15
bc.categoryAxis.labels.boxAnchor = 'se'
bc.categoryAxis.labels.angle = 30
bc.categoryAxis.categoryNames = []
drawing.add(bc)
return drawing
def sampleH1():
"Sample of multi-series bar chart."
drawing = Drawing(400, 200)
data = [
(13, 5, 20, 22, 37, 45, 19, 4),
(14, 6, 21, 23, 38, 46, 20, 5)
]
bc = HorizontalBarChart()
bc.x = 50
bc.y = 50
bc.height = 125
bc.width = 300
bc.data = data
bc.strokeColor = colors.black
bc.valueAxis.valueMin = 0
bc.valueAxis.valueMax = 60
bc.valueAxis.valueStep = 15
bc.categoryAxis.labels.boxAnchor = 'e'
catNames = 'Jan Feb Mar Apr May Jun Jul Aug'.split(' ')
catNames = map(lambda n:n+'-99', catNames)
bc.categoryAxis.categoryNames = catNames
drawing.add(bc, 'barchart')
return drawing
def sampleH2a():
"Sample of multi-series bar chart."
data = [(2.4, -5.7, 2, 5, 9.2),
(0.6, -4.9, -3, 4, 6.8)
]
labels = ("Q3 2000", "Year to Date", "12 months",
"Annualised\n3 years", "Since 07.10.99")
drawing = Drawing(400, 200)
bc = HorizontalBarChart()
bc.x = 80
bc.y = 50
bc.height = 120
bc.width = 300
bc.data = data
bc.barSpacing = 0
bc.groupSpacing = 10
bc.barWidth = 10
bc.valueAxis.valueMin = -15
bc.valueAxis.valueMax = +15
bc.valueAxis.valueStep = 5
bc.valueAxis.labels.fontName = 'Helvetica'
bc.valueAxis.labels.fontSize = 8
bc.valueAxis.labels.boxAnchor = 'n' # irrelevant (becomes 'c')
bc.valueAxis.labels.textAnchor = 'middle'
bc.valueAxis.configure(bc.data)
bc.categoryAxis.categoryNames = labels
bc.categoryAxis.labels.fontName = 'Helvetica'
bc.categoryAxis.labels.fontSize = 8
bc.categoryAxis.labels.dx = -150
drawing.add(bc)
return drawing
def sampleH2b():
"Sample of multi-series bar chart."
data = [(2.4, -5.7, 2, 5, 9.2),
(0.6, -4.9, -3, 4, 6.8)
]
labels = ("Q3 2000", "Year to Date", "12 months",
"Annualised\n3 years", "Since 07.10.99")
drawing = Drawing(400, 200)
bc = HorizontalBarChart()
bc.x = 80
bc.y = 50
bc.height = 120
bc.width = 300
bc.data = data
bc.barSpacing = 5
bc.groupSpacing = 10
bc.barWidth = 10
bc.valueAxis.valueMin = -15
bc.valueAxis.valueMax = +15
bc.valueAxis.valueStep = 5
bc.valueAxis.labels.fontName = 'Helvetica'
bc.valueAxis.labels.fontSize = 8
bc.valueAxis.labels.boxAnchor = 'n' # irrelevant (becomes 'c')
bc.valueAxis.labels.textAnchor = 'middle'
bc.categoryAxis.categoryNames = labels
bc.categoryAxis.labels.fontName = 'Helvetica'
bc.categoryAxis.labels.fontSize = 8
bc.categoryAxis.labels.dx = -150
drawing.add(bc)
return drawing
def sampleH2c():
"Sample of multi-series bar chart."
data = [(2.4, -5.7, 2, 5, 9.99),
(0.6, -4.9, -3, 4, 9.99)
]
labels = ("Q3 2000", "Year to Date", "12 months",
"Annualised\n3 years", "Since 07.10.99")
drawing = Drawing(400, 200)
bc = HorizontalBarChart()
bc.x = 80
bc.y = 50
bc.height = 120
bc.width = 300
bc.data = data
bc.barSpacing = 2
bc.groupSpacing = 10
bc.barWidth = 10
bc.valueAxis.valueMin = -15
bc.valueAxis.valueMax = +15
bc.valueAxis.valueStep = 5
bc.valueAxis.labels.fontName = 'Helvetica'
bc.valueAxis.labels.fontSize = 8
bc.valueAxis.labels.boxAnchor = 'n'
bc.valueAxis.labels.textAnchor = 'middle'
bc.categoryAxis.categoryNames = labels
bc.categoryAxis.labels.fontName = 'Helvetica'
bc.categoryAxis.labels.fontSize = 8
bc.categoryAxis.labels.dx = -150
bc.barLabels.nudge = 10
bc.barLabelFormat = '%0.2f'
bc.barLabels.dx = 0
bc.barLabels.dy = 0
bc.barLabels.boxAnchor = 'n' # irrelevant (becomes 'c')
bc.barLabels.fontName = 'Helvetica'
bc.barLabels.fontSize = 6
drawing.add(bc)
return drawing
def sampleH3():
"A really horizontal bar chart (compared to the equivalent faked one)."
names = ("UK Equities", "US Equities", "European Equities", "Japanese Equities",
"Pacific (ex Japan) Equities", "Emerging Markets Equities",
"UK Bonds", "Overseas Bonds", "UK Index-Linked", "Cash")
series1 = (-1.5, 0.3, 0.5, 1.0, 0.8, 0.7, 0.4, 0.1, 1.0, 0.3)
series2 = (0.0, 0.33, 0.55, 1.1, 0.88, 0.77, 0.44, 0.11, 1.10, 0.33)
assert len(names) == len(series1), "bad data"
assert len(names) == len(series2), "bad data"
drawing = Drawing(400, 200)
bc = HorizontalBarChart()
bc.x = 100
bc.y = 20
bc.height = 150
bc.width = 250
bc.data = (series1,)
bc.bars.fillColor = colors.green
bc.barLabelFormat = '%0.2f'
bc.barLabels.dx = 0
bc.barLabels.dy = 0
bc.barLabels.boxAnchor = 'w' # irrelevant (becomes 'c')
bc.barLabels.fontName = 'Helvetica'
bc.barLabels.fontSize = 6
bc.barLabels.nudge = 10
bc.valueAxis.visible = 0
bc.valueAxis.valueMin = -2
bc.valueAxis.valueMax = +2
bc.valueAxis.valueStep = 1
bc.categoryAxis.tickLeft = 0
bc.categoryAxis.tickRight = 0
bc.categoryAxis.categoryNames = names
bc.categoryAxis.labels.boxAnchor = 'w'
bc.categoryAxis.labels.dx = -170
bc.categoryAxis.labels.fontName = 'Helvetica'
bc.categoryAxis.labels.fontSize = 6
g = Group(bc)
drawing.add(g)
return drawing
def sampleH4a():
"A bar chart showing value axis region starting at *exactly* zero."
drawing = Drawing(400, 200)
data = [(13, 20)]
bc = HorizontalBarChart()
bc.x = 50
bc.y = 50
bc.height = 125
bc.width = 300
bc.data = data
bc.strokeColor = colors.black
bc.valueAxis.valueMin = 0
bc.valueAxis.valueMax = 60
bc.valueAxis.valueStep = 15
bc.categoryAxis.labels.boxAnchor = 'e'
bc.categoryAxis.categoryNames = ['Ying', 'Yang']
drawing.add(bc)
return drawing
def sampleH4b():
"A bar chart showing value axis region starting *below* zero."
drawing = Drawing(400, 200)
data = [(13, 20)]
bc = HorizontalBarChart()
bc.x = 50
bc.y = 50
bc.height = 125
bc.width = 300
bc.data = data
bc.strokeColor = colors.black
bc.valueAxis.valueMin = -10
bc.valueAxis.valueMax = 60
bc.valueAxis.valueStep = 15
bc.categoryAxis.labels.boxAnchor = 'e'
bc.categoryAxis.categoryNames = ['Ying', 'Yang']
drawing.add(bc)
return drawing
def sampleH4c():
"A bar chart showing value axis region starting *above* zero."
drawing = Drawing(400, 200)
data = [(13, 20)]
bc = HorizontalBarChart()
bc.x = 50
bc.y = 50
bc.height = 125
bc.width = 300
bc.data = data
bc.strokeColor = colors.black
bc.valueAxis.valueMin = 10
bc.valueAxis.valueMax = 60
bc.valueAxis.valueStep = 15
bc.categoryAxis.labels.boxAnchor = 'e'
bc.categoryAxis.categoryNames = ['Ying', 'Yang']
drawing.add(bc)
return drawing
def sampleH4d():
"A bar chart showing value axis region entirely *below* zero."
drawing = Drawing(400, 200)
data = [(-13, -20)]
bc = HorizontalBarChart()
bc.x = 50
bc.y = 50
bc.height = 125
bc.width = 300
bc.data = data
bc.strokeColor = colors.black
bc.valueAxis.valueMin = -30
bc.valueAxis.valueMax = -10
bc.valueAxis.valueStep = 15
bc.categoryAxis.labels.boxAnchor = 'e'
bc.categoryAxis.categoryNames = ['Ying', 'Yang']
drawing.add(bc)
return drawing
dataSample5 = [(10, 60), (20, 50), (30, 40), (40, 30)]
def sampleH5a():
"A simple bar chart with no expressed spacing attributes."
drawing = Drawing(400, 200)
data = dataSample5
bc = HorizontalBarChart()
bc.x = 50
bc.y = 50
bc.height = 125
bc.width = 300
bc.data = data
bc.strokeColor = colors.black
bc.valueAxis.valueMin = 0
bc.valueAxis.valueMax = 60
bc.valueAxis.valueStep = 15
bc.categoryAxis.labels.boxAnchor = 'e'
bc.categoryAxis.categoryNames = ['Ying', 'Yang']
drawing.add(bc)
return drawing
def sampleH5b():
"A simple bar chart with proportional spacing."
drawing = Drawing(400, 200)
data = dataSample5
bc = HorizontalBarChart()
bc.x = 50
bc.y = 50
bc.height = 125
bc.width = 300
bc.data = data
bc.strokeColor = colors.black
bc.useAbsolute = 0
bc.barWidth = 40
bc.groupSpacing = 20
bc.barSpacing = 10
bc.valueAxis.valueMin = 0
bc.valueAxis.valueMax = 60
bc.valueAxis.valueStep = 15
bc.categoryAxis.labels.boxAnchor = 'e'
bc.categoryAxis.categoryNames = ['Ying', 'Yang']
drawing.add(bc)
return drawing
def sampleH5c1():
"A simple bar chart with absolute spacing."
drawing = Drawing(400, 200)
data = dataSample5
bc = HorizontalBarChart()
bc.x = 50
bc.y = 50
bc.height = 125
bc.width = 300
bc.data = data
bc.strokeColor = colors.black
bc.useAbsolute = 1
bc.barWidth = 10
bc.groupSpacing = 0
bc.barSpacing = 0
bc.valueAxis.valueMin = 0
bc.valueAxis.valueMax = 60
bc.valueAxis.valueStep = 15
bc.categoryAxis.labels.boxAnchor = 'e'
bc.categoryAxis.categoryNames = ['Ying', 'Yang']
drawing.add(bc)
return drawing
def sampleH5c2():
"Simple bar chart with absolute spacing."
drawing = Drawing(400, 200)
data = dataSample5
bc = HorizontalBarChart()
bc.x = 50
bc.y = 50
bc.height = 125
bc.width = 300
bc.data = data
bc.strokeColor = colors.black
bc.useAbsolute = 1
bc.barWidth = 10
bc.groupSpacing = 20
bc.barSpacing = 0
bc.valueAxis.valueMin = 0
bc.valueAxis.valueMax = 60
bc.valueAxis.valueStep = 15
bc.categoryAxis.labels.boxAnchor = 'e'
bc.categoryAxis.categoryNames = ['Ying', 'Yang']
drawing.add(bc)
return drawing
def sampleH5c3():
"Simple bar chart with absolute spacing."
drawing = Drawing(400, 200)
data = dataSample5
bc = HorizontalBarChart()
bc.x = 50
bc.y = 20
bc.height = 155
bc.width = 300
bc.data = data
bc.strokeColor = colors.black
bc.useAbsolute = 1
bc.barWidth = 10
bc.groupSpacing = 0
bc.barSpacing = 2
bc.valueAxis.valueMin = 0
bc.valueAxis.valueMax = 60
bc.valueAxis.valueStep = 15
bc.categoryAxis.labels.boxAnchor = 'e'
bc.categoryAxis.categoryNames = ['Ying', 'Yang']
drawing.add(bc)
return drawing
def sampleH5c4():
"Simple bar chart with absolute spacing."
drawing = Drawing(400, 200)
data = dataSample5
bc = HorizontalBarChart()
bc.x = 50
bc.y = 50
bc.height = 125
bc.width = 300
bc.data = data
bc.strokeColor = colors.black
bc.useAbsolute = 1
bc.barWidth = 10
bc.groupSpacing = 20
bc.barSpacing = 10
bc.valueAxis.valueMin = 0
bc.valueAxis.valueMax = 60
bc.valueAxis.valueStep = 15
bc.categoryAxis.labels.boxAnchor = 'e'
bc.categoryAxis.categoryNames = ['Ying', 'Yang']
drawing.add(bc)
return drawing
def sampleSymbol1():
"Simple bar chart using symbol attribute."
drawing = Drawing(400, 200)
data = dataSample5
bc = VerticalBarChart()
bc.x = 50
bc.y = 50
bc.height = 125
bc.width = 300
bc.data = data
bc.strokeColor = colors.black
bc.barWidth = 10
bc.groupSpacing = 15
bc.barSpacing = 3
bc.valueAxis.valueMin = 0
bc.valueAxis.valueMax = 60
bc.valueAxis.valueStep = 15
bc.categoryAxis.labels.boxAnchor = 'e'
bc.categoryAxis.categoryNames = ['Ying', 'Yang']
from reportlab.graphics.widgets.grids import ShadedRect
sym1 = ShadedRect()
sym1.fillColorStart = colors.black
sym1.fillColorEnd = colors.blue
sym1.orientation = 'horizontal'
sym1.strokeWidth = 0
sym2 = ShadedRect()
sym2.fillColorStart = colors.black
sym2.fillColorEnd = colors.pink
sym2.orientation = 'horizontal'
sym2.strokeWidth = 0
sym3 = ShadedRect()
sym3.fillColorStart = colors.blue
sym3.fillColorEnd = colors.white
sym3.orientation = 'vertical'
sym3.cylinderMode = 1
sym3.strokeWidth = 0
bc.bars.symbol = sym1
bc.bars[2].symbol = sym2
bc.bars[3].symbol = sym3
drawing.add(bc)
return drawing
def sampleStacked1():
"Simple bar chart using symbol attribute."
drawing = Drawing(400, 200)
data = dataSample5
bc = VerticalBarChart()
bc.categoryAxis.style = 'stacked'
bc.x = 50
bc.y = 50
bc.height = 125
bc.width = 300
bc.data = data
bc.strokeColor = colors.black
bc.barWidth = 10
bc.groupSpacing = 15
bc.valueAxis.valueMin = 0
bc.categoryAxis.labels.boxAnchor = 'e'
bc.categoryAxis.categoryNames = ['Ying', 'Yang']
from reportlab.graphics.widgets.grids import ShadedRect
bc.bars.symbol = ShadedRect()
bc.bars.symbol.fillColorStart = colors.red
bc.bars.symbol.fillColorEnd = colors.white
bc.bars.symbol.orientation = 'vertical'
bc.bars.symbol.cylinderMode = 1
bc.bars.symbol.strokeWidth = 0
bc.bars[1].symbol = ShadedRect()
bc.bars[1].symbol.fillColorStart = colors.magenta
bc.bars[1].symbol.fillColorEnd = colors.white
bc.bars[1].symbol.orientation = 'vertical'
bc.bars[1].symbol.cylinderMode = 1
bc.bars[1].symbol.strokeWidth = 0
bc.bars[2].symbol = ShadedRect()
bc.bars[2].symbol.fillColorStart = colors.green
bc.bars[2].symbol.fillColorEnd = colors.white
bc.bars[2].symbol.orientation = 'vertical'
bc.bars[2].symbol.cylinderMode = 1
bc.bars[2].symbol.strokeWidth = 0
bc.bars[3].symbol = ShadedRect()
bc.bars[3].symbol.fillColorStart = colors.blue
bc.bars[3].symbol.fillColorEnd = colors.white
bc.bars[3].symbol.orientation = 'vertical'
bc.bars[3].symbol.cylinderMode = 1
bc.bars[3].symbol.strokeWidth = 0
drawing.add(bc)
return drawing
#class version of function sampleH5c4 above
class SampleH5c4(Drawing):
"Simple bar chart with absolute spacing."
def __init__(self,width=400,height=200,*args,**kw):
Drawing.__init__(self,width,height,*args,**kw)
bc = HorizontalBarChart()
bc.x = 50
bc.y = 50
bc.height = 125
bc.width = 300
bc.data = dataSample5
bc.strokeColor = colors.black
bc.useAbsolute = 1
bc.barWidth = 10
bc.groupSpacing = 20
bc.barSpacing = 10
bc.valueAxis.valueMin = 0
bc.valueAxis.valueMax = 60
bc.valueAxis.valueStep = 15
bc.categoryAxis.labels.boxAnchor = 'e'
bc.categoryAxis.categoryNames = ['Ying', 'Yang']
self.add(bc,name='HBC')
|
[
"[email protected]"
] | |
502af0bed2bbfa2a50d2e1de9e7666692457d803
|
4d99350a527a88110b7bdc7d6766fc32cf66f211
|
/OpenGLCffi/EGL/EXT/NOK/swap_region.py
|
b2fd2f88da1733562175c045d1e5ec660dfd4f1c
|
[
"MIT"
] |
permissive
|
cydenix/OpenGLCffi
|
e790ef67c2f6c9877badd5c38b7d58961c8739cd
|
c78f51ae5e6b655eb2ea98f072771cf69e2197f3
|
refs/heads/master
| 2021-01-11T07:31:10.591188 | 2017-04-17T11:04:55 | 2017-04-17T11:04:55 | 80,312,084 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 167 |
py
|
from OpenGLCffi.EGL import params
@params(api='egl', prms=['dpy', 'surface', 'numRects', 'rects'])
def eglSwapBuffersRegionNOK(dpy, surface, numRects, rects):
pass
|
[
"[email protected]"
] | |
db00213220f441f8ccbfbb9ed9c1465e73391f49
|
3233629117c8672a6ec001c673191a5bfbab609e
|
/hikyuu/admin/dialog/__init__.py
|
47834a88dca817cb29cd6fa6e07ac95d370e3f1c
|
[
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
haoyong1232002/hikyuu
|
efc5ae72b42caa47650c553922af3b0dae20cf78
|
9f93dd9374e2b9f3098176ea9c8f18f2cda24185
|
refs/heads/master
| 2023-04-23T02:58:19.034029 | 2021-05-12T17:52:08 | 2021-05-12T17:52:08 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 79 |
py
|
# -*- coding: utf-8 -*-
from .HkuEditSessionDialog import HkuEditSessionDialog
|
[
"[email protected]"
] | |
615957840ee474ac9841f70c54cafc727ca1a5c4
|
0e4df81fb59129d23ccc820b11117ba78a372099
|
/inheritance.py
|
e263c9de3b6611c8d780899f66d55edec7963e15
|
[] |
no_license
|
crishonsou/hackerrank_30_days_of_code
|
ffdb51163a4e14d2b8438a8e01183f31b4d9a138
|
aa267d82915dd7d3cfb6f5cbfb52b86497044b84
|
refs/heads/main
| 2022-12-22T18:06:51.595103 | 2020-10-06T15:07:15 | 2020-10-06T15:07:15 | 301,764,193 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,336 |
py
|
## Inheritance
class Person:
def __init__(self, firstName, lastName, idNumber):
self.firstName = firstName
self.lastName = lastName
self.idNumber = idNumber
def printPerson(self):
print('Name:', self.lastName + ',', self.firstName)
print('ID:', self.idNumber)
class Student(Person):
def __init__(self, firstName, lastName, idNumber, scores):
Person.__init__(self, firstName, lastName, idNumber)
self.testScores = scores
def calculate(self):
average = 0
for i in self.testScores:
average += i
average = average / len(self.testScores)
if(average >= 90):
return 'O' # Outstanding
elif(average >= 80):
return 'E' # Exceeds Expectations
elif(average >= 70):
return 'A' # Acceptable
elif(average >= 55):
return 'P' # Poor
elif(average >= 40):
return 'D' # Dreadful
else:
return 'T' # Troll
line = input().split()
firstName = line[0]
lastName = line[1]
idNum = line[2]
numScores = int(input())
scores = list(map(int, input().split()))
s = Student(firstName, lastName, idNum, scores)
s.printPerson()
print('Grade:', s.calculate())
|
[
"[email protected]"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.