blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 777
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 149
values | src_encoding
stringclasses 26
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 3
10.2M
| extension
stringclasses 188
values | content
stringlengths 3
10.2M
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
e54795bb281bdf8f85f066736ab758402ee247bb | 8d35b8aa63f3cae4e885e3c081f41235d2a8f61f | /discord/ext/dl/extractor/formula1.py | fe89d221c6f687c2412b0273b350ca3685ae8f59 | [
"MIT"
] | permissive | alexyy802/Texus | 1255f4e54c8d3cc067f0d30daff1cf24932ea0c9 | c282a836f43dfd588d89d5c13f432896aebb540f | refs/heads/master | 2023-09-05T06:14:36.217601 | 2021-11-21T03:39:55 | 2021-11-21T03:39:55 | 429,390,575 | 0 | 0 | MIT | 2021-11-19T09:22:22 | 2021-11-18T10:43:11 | Python | UTF-8 | Python | false | false | 1,020 | py | # coding: utf-8
from __future__ import unicode_literals
from .common import InfoExtractor
class Formula1IE(InfoExtractor):
_VALID_URL = (
r"https?://(?:www\.)?formula1\.com/en/latest/video\.[^.]+\.(?P<id>\d+)\.html"
)
_TEST = {
"url": "https://www.formula1.com/en/latest/video.race-highlights-spain-2016.6060988138001.html",
"md5": "be7d3a8c2f804eb2ab2aa5d941c359f8",
"info_dict": {
"id": "6060988138001",
"ext": "mp4",
"title": "Race highlights - Spain 2016",
"timestamp": 1463332814,
"upload_date": "20160515",
"uploader_id": "6057949432001",
},
"add_ie": ["BrightcoveNew"],
}
BRIGHTCOVE_URL_TEMPLATE = "http://players.brightcove.net/6057949432001/S1WMrhjlh_default/index.html?videoId=%s"
def _real_extract(self, url):
bc_id = self._match_id(url)
return self.url_result(
self.BRIGHTCOVE_URL_TEMPLATE % bc_id, "BrightcoveNew", bc_id
)
| [
"[email protected]"
] | |
ad2d33429d0c99627e9c18caa875ca3926d8864f | f028c7ca2e4c42505011ac0543cde4a111ee5c74 | /eggs/django_lfs-0.10.2-py2.7.egg/lfs/order/settings.py | deea3ffc34011276d45e862026eca7c9462fbb11 | [] | no_license | yunmengyanjin/website | d625544330c28f072707dcbbc5eb7308a3f4bd9f | 77e9c70687b35fd8b65a7f2d879e0261ae69c00e | refs/heads/master | 2021-04-22T13:10:09.584559 | 2017-05-15T07:39:32 | 2017-05-15T07:39:32 | 56,428,389 | 2 | 16 | null | 2020-10-02T07:41:08 | 2016-04-17T09:18:33 | Python | UTF-8 | Python | false | false | 755 | py | # django imports
from django.utils.translation import ugettext_lazy as _
from django.conf import settings
SUBMITTED = 0
PAID = 1
SENT = 2
CLOSED = 3
CANCELED = 4
PAYMENT_FAILED = 5
PAYMENT_FLAGGED = 6
PREPARED = 7
ORDER_STATES = [
(SUBMITTED, _(u"Submitted")),
(PAID, _(u"Paid")),
(PREPARED, _(u"Prepared")),
(SENT, _(u"Sent")),
(CLOSED, _(u"Closed")),
(CANCELED, _(u"Canceled")),
(PAYMENT_FAILED, _(u"Payment Failed")),
(PAYMENT_FLAGGED, _(u"Payment Flagged")),
]
# use numbers above 20 for custom order states to avoid conflicts if new base states are added to LFS core!
LFS_EXTRA_ORDER_STATES = getattr(settings, 'LFS_EXTRA_ORDER_STATES', [])
if LFS_EXTRA_ORDER_STATES:
ORDER_STATES.extend(LFS_EXTRA_ORDER_STATES) | [
"[email protected]"
] | |
f3bcde6ae30cfb731230794841388499d4d42f42 | 4403600c57fd170aad6bb505e4f14c4b70e63356 | /sensor.py | 3d90ccfcb357c6b8161da75db9832f89fceda02f | [] | no_license | moonclearner/sensor | de4ef554cbc3dadb5fe5e801c55627d9f9340d19 | 0c3ad14375267b135940e8254262b0d054bd472c | refs/heads/master | 2021-01-12T05:11:52.667742 | 2017-01-03T06:38:59 | 2017-01-03T06:38:59 | 77,886,520 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 680 | py | # sensor collection system
from __future__ import unicode_literals
# starting time: 30, December,2016
# author: moonclearner
# -*- coding: utf-8 -*-
from socket import *
from time import ctime
Host = '127.0.0.1'
Port = 21010
BufferSize = 1024
ADDR = (Host,Port)
def server_init():
tcpserversock = socket(AF_INET,SOCK_STREAM)
tcpserversock.bind(ADDR)
tcpserversock.listen(5)
while True:
print "waiting for connection ..."
tcpCliSock, addr =tcpserversock.accept()
print '...connected from:',addr
while True:
data = tcpCliSock.recv(BufferSize)
if not data:
break
tcpCliSock.send('[%s] %s' % (ctime(),data))
tcpCliSock.close()
pass
server_init()
| [
"[email protected]"
] | |
3ea4d53a79484f18a2f537cce2b80ff6fb76d9d5 | 2e9f3f35cd239ce59f528c7b3b5e9714f7e5d5a3 | /furnace/kernels/lib_tree_filter/functions/bfs.py | 0c42bda0aebff2c10db5ec40a4cd5d48df3bdd46 | [
"MIT"
] | permissive | CV-IP/TreeFilter-Torch | 8e2bd831060d0fa4e589a56353c2d91a7d4ac87b | 46f36024f4522056fb9a3edf90c94f0a86a1352b | refs/heads/master | 2023-02-04T19:09:32.790909 | 2020-12-16T07:14:41 | 2020-12-16T07:14:41 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 485 | py | import torch
from torch import nn
from torch.autograd import Function
from torch.autograd.function import once_differentiable
from torch.nn.modules.utils import _pair
import tree_filter_cuda as _C
class _BFS(Function):
@staticmethod
def forward(ctx, edge_index, max_adj_per_vertex):
sorted_index, sorted_parent, sorted_child =\
_C.bfs_forward(edge_index, max_adj_per_vertex)
return sorted_index, sorted_parent, sorted_child
bfs = _BFS.apply
| [
"[email protected]"
] | |
cf3574e7f1b07fdaf295f9b85a87e7e6aa4fa6a1 | 34bb6071725fb31f50ef7ff147fce5a06a5bb534 | /code/router/handler.py | 131eb17758636f15b29d37e503a366f93725e8eb | [] | no_license | joshmarshall/intro-to-wsgi-presentation-2013 | 6f28612da4fc7225e8ed2081f725ae940821c0d3 | 19bf30410f435a0bb9a101bb2800cac294096931 | refs/heads/master | 2023-08-14T09:39:08.858311 | 2019-01-09T11:45:25 | 2019-01-09T11:45:25 | 164,854,894 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 486 | py | class Handler(object):
def __init__(self, environ, start_response):
self._environ = environ
self._start_response = start_response
self._response_started = False
self._code = 200
self._message = "OK"
self.headers = {}
def start_response(self, code, status="OK"):
self.headers.setdefault("Content-Length", "application/json")
self._start_response(
"%s %s" % (code, status), list(self.headers.items()))
| [
"[email protected]"
] | |
3305807a13f174ff87ead377d7acd503806033be | a6e4a6f0a73d24a6ba957277899adbd9b84bd594 | /sdk/python/pulumi_azure_native/netapp/v20190601/_inputs.py | 6ac3e38e0fdff20cacdaccca6e8025b52fcd1b8f | [
"BSD-3-Clause",
"Apache-2.0"
] | permissive | MisinformedDNA/pulumi-azure-native | 9cbd75306e9c8f92abc25be3f73c113cb93865e9 | de974fd984f7e98649951dbe80b4fc0603d03356 | refs/heads/master | 2023-03-24T22:02:03.842935 | 2021-03-08T21:16:19 | 2021-03-08T21:16:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,825 | py | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
from ._enums import *
__all__ = [
'ActiveDirectoryArgs',
'ExportPolicyRuleArgs',
'VolumePropertiesExportPolicyArgs',
]
@pulumi.input_type
class ActiveDirectoryArgs:
def __init__(__self__, *,
active_directory_id: Optional[pulumi.Input[str]] = None,
dns: Optional[pulumi.Input[str]] = None,
domain: Optional[pulumi.Input[str]] = None,
organizational_unit: Optional[pulumi.Input[str]] = None,
password: Optional[pulumi.Input[str]] = None,
smb_server_name: Optional[pulumi.Input[str]] = None,
status: Optional[pulumi.Input[str]] = None,
username: Optional[pulumi.Input[str]] = None):
"""
Active Directory
:param pulumi.Input[str] active_directory_id: Id of the Active Directory
:param pulumi.Input[str] dns: Comma separated list of DNS server IP addresses for the Active Directory domain
:param pulumi.Input[str] domain: Name of the Active Directory domain
:param pulumi.Input[str] organizational_unit: The Organizational Unit (OU) within the Windows Active Directory
:param pulumi.Input[str] password: Plain text password of Active Directory domain administrator
:param pulumi.Input[str] smb_server_name: NetBIOS name of the SMB server. This name will be registered as a computer account in the AD and used to mount volumes
:param pulumi.Input[str] status: Status of the Active Directory
:param pulumi.Input[str] username: Username of Active Directory domain administrator
"""
if active_directory_id is not None:
pulumi.set(__self__, "active_directory_id", active_directory_id)
if dns is not None:
pulumi.set(__self__, "dns", dns)
if domain is not None:
pulumi.set(__self__, "domain", domain)
if organizational_unit is not None:
pulumi.set(__self__, "organizational_unit", organizational_unit)
if password is not None:
pulumi.set(__self__, "password", password)
if smb_server_name is not None:
pulumi.set(__self__, "smb_server_name", smb_server_name)
if status is not None:
pulumi.set(__self__, "status", status)
if username is not None:
pulumi.set(__self__, "username", username)
@property
@pulumi.getter(name="activeDirectoryId")
def active_directory_id(self) -> Optional[pulumi.Input[str]]:
"""
Id of the Active Directory
"""
return pulumi.get(self, "active_directory_id")
@active_directory_id.setter
def active_directory_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "active_directory_id", value)
@property
@pulumi.getter
def dns(self) -> Optional[pulumi.Input[str]]:
"""
Comma separated list of DNS server IP addresses for the Active Directory domain
"""
return pulumi.get(self, "dns")
@dns.setter
def dns(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "dns", value)
@property
@pulumi.getter
def domain(self) -> Optional[pulumi.Input[str]]:
"""
Name of the Active Directory domain
"""
return pulumi.get(self, "domain")
@domain.setter
def domain(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "domain", value)
@property
@pulumi.getter(name="organizationalUnit")
def organizational_unit(self) -> Optional[pulumi.Input[str]]:
"""
The Organizational Unit (OU) within the Windows Active Directory
"""
return pulumi.get(self, "organizational_unit")
@organizational_unit.setter
def organizational_unit(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "organizational_unit", value)
@property
@pulumi.getter
def password(self) -> Optional[pulumi.Input[str]]:
"""
Plain text password of Active Directory domain administrator
"""
return pulumi.get(self, "password")
@password.setter
def password(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "password", value)
@property
@pulumi.getter(name="smbServerName")
def smb_server_name(self) -> Optional[pulumi.Input[str]]:
"""
NetBIOS name of the SMB server. This name will be registered as a computer account in the AD and used to mount volumes
"""
return pulumi.get(self, "smb_server_name")
@smb_server_name.setter
def smb_server_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "smb_server_name", value)
@property
@pulumi.getter
def status(self) -> Optional[pulumi.Input[str]]:
"""
Status of the Active Directory
"""
return pulumi.get(self, "status")
@status.setter
def status(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "status", value)
@property
@pulumi.getter
def username(self) -> Optional[pulumi.Input[str]]:
"""
Username of Active Directory domain administrator
"""
return pulumi.get(self, "username")
@username.setter
def username(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "username", value)
@pulumi.input_type
class ExportPolicyRuleArgs:
def __init__(__self__, *,
allowed_clients: Optional[pulumi.Input[str]] = None,
cifs: Optional[pulumi.Input[bool]] = None,
nfsv3: Optional[pulumi.Input[bool]] = None,
nfsv4: Optional[pulumi.Input[bool]] = None,
rule_index: Optional[pulumi.Input[int]] = None,
unix_read_only: Optional[pulumi.Input[bool]] = None,
unix_read_write: Optional[pulumi.Input[bool]] = None):
"""
Volume Export Policy Rule
:param pulumi.Input[str] allowed_clients: Client ingress specification as comma separated string with IPv4 CIDRs, IPv4 host addresses and host names
:param pulumi.Input[bool] cifs: Allows CIFS protocol
:param pulumi.Input[bool] nfsv3: Allows NFSv3 protocol
:param pulumi.Input[bool] nfsv4: Deprecated: Will use the NFSv4.1 protocol, please use swagger version 2019-07-01 or later
:param pulumi.Input[int] rule_index: Order index
:param pulumi.Input[bool] unix_read_only: Read only access
:param pulumi.Input[bool] unix_read_write: Read and write access
"""
if allowed_clients is not None:
pulumi.set(__self__, "allowed_clients", allowed_clients)
if cifs is not None:
pulumi.set(__self__, "cifs", cifs)
if nfsv3 is not None:
pulumi.set(__self__, "nfsv3", nfsv3)
if nfsv4 is not None:
pulumi.set(__self__, "nfsv4", nfsv4)
if rule_index is not None:
pulumi.set(__self__, "rule_index", rule_index)
if unix_read_only is not None:
pulumi.set(__self__, "unix_read_only", unix_read_only)
if unix_read_write is not None:
pulumi.set(__self__, "unix_read_write", unix_read_write)
@property
@pulumi.getter(name="allowedClients")
def allowed_clients(self) -> Optional[pulumi.Input[str]]:
"""
Client ingress specification as comma separated string with IPv4 CIDRs, IPv4 host addresses and host names
"""
return pulumi.get(self, "allowed_clients")
@allowed_clients.setter
def allowed_clients(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "allowed_clients", value)
@property
@pulumi.getter
def cifs(self) -> Optional[pulumi.Input[bool]]:
"""
Allows CIFS protocol
"""
return pulumi.get(self, "cifs")
@cifs.setter
def cifs(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "cifs", value)
@property
@pulumi.getter
def nfsv3(self) -> Optional[pulumi.Input[bool]]:
"""
Allows NFSv3 protocol
"""
return pulumi.get(self, "nfsv3")
@nfsv3.setter
def nfsv3(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "nfsv3", value)
@property
@pulumi.getter
def nfsv4(self) -> Optional[pulumi.Input[bool]]:
"""
Deprecated: Will use the NFSv4.1 protocol, please use swagger version 2019-07-01 or later
"""
return pulumi.get(self, "nfsv4")
@nfsv4.setter
def nfsv4(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "nfsv4", value)
@property
@pulumi.getter(name="ruleIndex")
def rule_index(self) -> Optional[pulumi.Input[int]]:
"""
Order index
"""
return pulumi.get(self, "rule_index")
@rule_index.setter
def rule_index(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "rule_index", value)
@property
@pulumi.getter(name="unixReadOnly")
def unix_read_only(self) -> Optional[pulumi.Input[bool]]:
"""
Read only access
"""
return pulumi.get(self, "unix_read_only")
@unix_read_only.setter
def unix_read_only(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "unix_read_only", value)
@property
@pulumi.getter(name="unixReadWrite")
def unix_read_write(self) -> Optional[pulumi.Input[bool]]:
"""
Read and write access
"""
return pulumi.get(self, "unix_read_write")
@unix_read_write.setter
def unix_read_write(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "unix_read_write", value)
@pulumi.input_type
class VolumePropertiesExportPolicyArgs:
def __init__(__self__, *,
rules: Optional[pulumi.Input[Sequence[pulumi.Input['ExportPolicyRuleArgs']]]] = None):
"""
Set of export policy rules
:param pulumi.Input[Sequence[pulumi.Input['ExportPolicyRuleArgs']]] rules: Export policy rule
"""
if rules is not None:
pulumi.set(__self__, "rules", rules)
@property
@pulumi.getter
def rules(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ExportPolicyRuleArgs']]]]:
"""
Export policy rule
"""
return pulumi.get(self, "rules")
@rules.setter
def rules(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['ExportPolicyRuleArgs']]]]):
pulumi.set(self, "rules", value)
| [
"[email protected]"
] | |
f595c71d34cfb1606170d2728772f40f56279ff0 | 2309a185c5427f576b7f5bb927a572c778533403 | /smc_updater.py | 5f0996cfab1af45aa08a92a4ed4d4888848be66d | [] | no_license | wxlg1117/smc_updater | 6f4b8a5389dd417a3fce33270f78b036e31e1119 | aa88364a7c000c2dfed80cb70a77751ff4ae7a9f | refs/heads/master | 2020-06-20T11:11:45.107570 | 2019-05-20T21:18:46 | 2019-05-20T21:18:46 | 197,104,198 | 1 | 1 | null | 2019-07-16T02:16:17 | 2019-07-16T02:16:16 | null | UTF-8 | Python | false | false | 9,690 | py | import os
import time
import sys
import re
import json
import cli_ui
import delegator
import logging
import urllib.request
from selenium import webdriver
from selenium.webdriver.firefox.options import Options
from pyvirtualdisplay import Display
manual_run = True
sum_bin = "./sum_2.1.0_Linux_x86_64/sum"
ipmicfg_bin = "./IPMICFG_1.29.0_build.181029/Linux/64bit/IPMICFG-Linux.x86_64"
alpha_dict = {
"a":0,
"b":1,
"c":2,
"d":3,
"e":4,
"f":5,
"g":6,
"h":7,
"i":8,
"j":9
}
""" Check if a process ID is valid or not (signal 0 doesn't kill anything)"""
def check_pid(pid):
try:
os.kill(pid, 0)
except OSError:
return False
else:
return True
""" Test if the system is using UEFI or BIOS"""
""" Some BIOS updates are seperated by these types"""
def is_uefi_boot():
return os.path.isdir("/sys/firmware/efi")
def get_board_model():
return delegator.chain("sudo dmidecode -t baseboard|grep 'Product Name: ' |sed 's/Product Name: //' | tr -d '[:space:]'").out
def get_current_bios():
return delegator.chain("sudo dmidecode -t bios| grep Version |sed 's/Version://' |tr -d '[:space:]'").out
def get_current_ipmi():
return delegator.chain("sudo ./IPMICFG_1.29.0_build.181029/Linux/64bit/IPMICFG-Linux.x86_64 -ver | awk -F ':' {'print $2'}| tr -d [:space:]").out
def update_ipmi(ipmi_bin):
logging.info("update_ipmi() | Running IPMI Update With {0}".format(ipmi_bin))
print("Updating with {0}".format(ipmi_bin))
ipmi_update = delegator.run("sudo {0} -c UpdateBmc --file {1}".format(sum_bin, ipmi_bin), block=False, timeout=600)
timer = 0
print(ipmi_update.pid)
while check_pid(ipmi_update.pid):
print("Updating IPMI....This may take up to 10 minutes. [ Elapsed Time: {0}m ]".format(str(timer)))
time.sleep(60)
timer += 1
print("IPMI Update Complete")
logging.info(ipmi_update)
logging.info("main::update_ipmi() | IPMI Update Complete {0}".format(ipmi_bin))
def update_bios(bios_bin):
print("Updating BIOS with {0}".format(bios_bin))
logging.info("main::update_bios() | Running BIOS update with {0}".format(bios_bin))
bios_update = delegator.run("sudo {0} -c UpdateBios --file {1}".format(sum_bin, bios_bin), block=False, timeout=600)
timer = 0
while check_pid(bios_update.pid):
print(bios_update.pid)
print("Updating BIOS....This may take up to 10 minutes. [ Elapsed Time: {0}m ]".format(str(timer)))
time.sleep(60)
timer += 1
if 'Manual steps are required' in bios_update.out:
print("Automated BIOS Update Failed: Please Reboot System And Try Again")
logging.error("main::update_bios() | BIOS Update Failed")
logging.error(bios_update)
else:
logging.info("main::update_bios() | BIOS Update Complete {0}".format(bios_bin))
print("BIOS Update Complete. Please reboot to use new BIOS.")
logging.info(bios_update)
logging.info("main::update_bios() | IPMI Update Complete {0}".format(bios_bin))
def download_file(url, dl_path):
print("Downloading {0} to {1}".format(url, dl_path))
urllib.request.urlretrieve(url, dl_path)
def extract_zip(zip_file, extract_dir):
import zipfile
with zipfile.ZipFile(zip_file,"r") as zipped:
zipped.extractall(extract_dir)
def find_bios_file(bios_bin):
if os.path.isdir("/tmp/bios_update/{0}/UEFI".format(bios_bin)) and is_uefi_boot():
return delegator.run("ls /tmp/bios_update/{0}/UEFI/{0}".format(bios_bin)).out
elif os.path.isdir("/tmp/bios_update/BIOS") and not is_uefi_boot():
return delegator.run("ls /tmp/bios_update/{0}/BIOS/{0}".format(bios_bin)).out
else:
return delegator.run("ls /tmp/bios_update/{0}".format(bios_bin)).out
def find_ipmi_file(ipmi_bin):
return delegator.run("ls /tmp/ipmi_update/*.bin").out
def is_alpha_version(ver):
try:
return ver.encode('ascii').isalpha()
except:
return False
def eval_version(cur_version, new_version, ipmi=False):
version = re.findall(r"[^\W\d_]+|\d+", cur_version)
cur_major = int(version[0])
cur_minor = int(version[1])
cur_ver = float(str(cur_major) + "." + str(cur_minor))
if ipmi == False and is_alpha_version(cur_version):
cur_alpha = cur_version[2]
version_new = re.findall(r"[^\W\d_]+|\d+", new_version)
if ipmi == False and is_alpha_version(new_version):
new_alpha = new_version[2]
new_major = int(version_new[0])
new_minor = int(version_new[1])
new_ver = float(str(new_major) + "." + str(new_minor))
if new_ver > cur_ver:
return True
if new_ver == cur_ver:
if is_alpha_version(new_version):
if is_alpha_version(old_version):
if alpha_dict[new_alpha] > alpha_dict[cur_alpha]:
return True
else:
return False
else:
return True #Alpha versions are higher than a non-alpha version (3.1a > 3.1)
if new_ver < cur_ver:
return False
def get_latest_bios(board_model):
update_choice = None
latest_bios_revision = None
for link in links:
link_board = link.split("/")[-1].split(".")[0]
if board_model.replace("+", "_") == link_board:
driver.get("https://www.supermicro.com{0}".format(link))
driver.find_element_by_xpath('//a[@href="{0}"]'.format("javascript:document.biosForm.submit();")).click()
raw = driver.find_element_by_class_name("yui-skin-sam").text.split("\n")
for line in raw:
if "BIOS Revision:" in line:
latest_bios_version = line.split(":")[1].replace("R", "").strip()
a = driver.find_element_by_partial_link_text('.zip')
filename = a.text
software_id = a.get_attribute("href").split("=")[-1]
bios_dl_link = "https://www.supermicro.com/Bios/softfiles/{0}/{1}".format(software_id, filename)
if latest_bios_version and bios_dl_link:
return [latest_bios_version, bios_dl_link]
else:
print("failed to download bios information")
if latest_bios_revision == None:
print("Failed to find BIOS online")
def get_latest_ipmi(board_model):
for link in links:
link_board = link.split("/")[-1].split(".")[0]
if board_model.replace("+", "_") == link_board:
driver.get("https://www.supermicro.com{0}".format(link))
driver.find_element_by_xpath('//a[@href="{0}"]'.format("javascript:document.IPMIForm.submit();")).click()
raw = driver.find_element_by_class_name("yui-skin-sam").text.split("\n")
for line in raw:
if "Firmware Revision:" in line:
latest_ipmi_version = line.split(":")[1].strip(" R")
a = driver.find_element_by_partial_link_text('.zip')
filename = a.text
software_id = a.get_attribute("href").split("=")[-1]
ipmi_dl_link = "https://www.supermicro.com/Bios/softfiles/{0}/{1}".format(software_id, filename)
return [latest_ipmi_version, ipmi_dl_link]
def main():
board_model = get_board_model()
bios_version = get_current_bios()
bios_dl = get_latest_bios(board_model)
ipmi_version = get_current_ipmi()
ipmi_dl = get_latest_ipmi(board_model)
sys_headers = ['FW', 'CURRENT', 'LATEST']
cli_ui.info_section(cli_ui.green, cli_ui.bold, "SMC UPDATER")
board_header = ['BOARD MODEL']
board_data = [[(cli_ui.bold, board_model)]]
cli_ui.info_table(board_data, headers=board_header)
print()
sys_data = [
[(cli_ui.bold, "BIOS"), (cli_ui.bold, bios_version), (cli_ui.bold, bios_dl[0])],
[(cli_ui.bold, "IPMI"), (cli_ui.bold, ipmi_version), (cli_ui.bold, ipmi_dl[0])]
]
cli_ui.info_table(sys_data, headers=sys_headers)
print()
if eval_version(bios_version, bios_dl[0]):
update_choice = None
while update_choice == None or update_choice == 'y':
bios_old = True
if manual_run == True:
update_choice = cli_ui.ask_string("BIOS is out of date. Would you like to update now? [y/n]")
if update_choice != 'y':
continue
bin_file = bios_dl[1].split("/")[-1]
bin_name = bin_file.split("_")[0]
bin_ver = bin_file.split(".")[0].split("_")[-1]
bin_raw = bin_file.split(".")[0]
bin_ex = "{0}.{1}".format(bin_name, bin_ver)
download_file(bios_dl[1], '/tmp/{0}'.format(bios_dl[1].split("/")[-1]))
extract_zip("/tmp/{0}".format(bin_file), "/tmp/bios_update/")
bios_file_path = find_bios_file(bin_ex)
update_bios(bios_file_path)
break
else:
print("BIOS is up-to-date.")
# logging.info("main(): Website version is newer, updating BIOS...")
if eval_version(ipmi_version, ipmi_dl[0], ipmi=True):
update_choice = None
while update_choice == None or update_choice == 'y':
ipmi_old = True
if manual_run == True:
update_choice = cli_ui.ask_string("IPMI is out of date. Would you like to update now? [y/n]")
logging.info("main(): Webiste version is newer, updating IPMI...")
bin_file = ipmi_dl[1].split("/")[-1]
bin_name = bin_file.split(".")[0]
bin_ex = "{0}.{1}".format(bin_name, 'bin')
download_file(ipmi_dl[1], '/tmp/{0}'.format(ipmi_dl[1].split("/")[-1]))
extract_zip("/tmp/{0}".format(bin_file), "/tmp/ipmi_update/")
ipmi_file_path = "/tmp/ipmi_update/{0}.bin".format(bin_name)
update_ipmi(ipmi_file_path)
break
else:
print("IPMI is up-to-date.")
print("\nExiting...")
if __name__ == "__main__":
if not delegator.run("which dmidecode").out:
print("Fatal Error: dmidecode not detected.")
exit()
#logging.basicConfig(filename='app.log', filemode='w', format='%(name)s - %(levelname)s - %(message)s')
#binary = FirefoxBinary('./geckodriver')
options = Options()
options.headless = True
driver = webdriver.Firefox(options=options)
#File created with "for i in `curl -s https://www.supermicro.com/products/motherboard/| grep quaternaryNavItem|awk -F ' ' {'print $2'}| sed 's/href=\"//'|sed 's/\"//'|grep -v 'Global_SKU'`; do curl -s https://www.supermicro.com/${i} | grep prodLink| awk -F '<a href="' {'print $2'}| awk -F 'class=' {'print $1'}|sed 's/\"//'|grep -v Global_SKU >> smc_board_links.txt;done"
with open("smc_board_links.txt") as f:
links = f.readlines()
main()
| [
"[email protected]"
] | |
08c134f6f876b56b29c1de913786e6806a67d98e | 74b12c96a73d464e3ca3241ae83a0b6fe984b913 | /python/tvm/runtime/__init__.py | e0da680a24fc3e555e5824caa81dc414f22c6abf | [
"Apache-2.0",
"BSD-3-Clause",
"Zlib",
"MIT",
"LicenseRef-scancode-unknown-license-reference",
"Unlicense",
"BSD-2-Clause"
] | permissive | masahi/tvm | cf765bb892655f02135e1ce3afde88698f026483 | c400f7e871214451b75f20f4879992becfe5e3a4 | refs/heads/master | 2023-08-22T20:46:25.795382 | 2022-04-13T08:47:10 | 2022-04-13T08:47:10 | 138,661,036 | 4 | 2 | Apache-2.0 | 2021-09-03T20:35:19 | 2018-06-25T23:39:51 | Python | UTF-8 | Python | false | false | 1,454 | py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""TVM runtime namespace."""
# class exposures
from .packed_func import PackedFunc
from .object import Object
from .object_generic import ObjectGeneric, ObjectTypes
from .ndarray import NDArray, DataType, DataTypeCode, Device
from .module import Module, num_threads
from .profiling import Report
# function exposures
from .object_generic import convert_to_object, convert, const
from .ndarray import device, cpu, cuda, gpu, opencl, cl, vulkan, metal, mtl
from .ndarray import vpi, rocm, ext_dev
from .module import load_module, enabled, system_lib
from .container import String, ShapeTuple
from .params import save_param_dict, load_param_dict
from . import executor
| [
"[email protected]"
] | |
839c9387ac151c746e63cdb75c3c0509c99be87d | f9e1d9c71d232aa0bcf03627259e6c9f88538b18 | /gs92QuerysetAggregation/gs92/asgi.py | a4d73d71bdf12411bd1bef9349c4ca3d3cea7831 | [] | no_license | nayan-gujju/Django-Practice | a7db202b6a3627a6a4e9f96953b61e43eaf68cb1 | eafa29e9321a1683867b2ea1d26ca74dfa6db12d | refs/heads/master | 2023-07-27T11:41:43.956705 | 2021-09-09T08:47:44 | 2021-09-09T08:47:44 | 403,917,725 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 385 | py | """
ASGI config for gs92 project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'gs92.settings')
application = get_asgi_application()
| [
"[email protected]"
] | |
9f36779e687ed1474e2e97a9940161e6764000b2 | b0c8e0cafa4a8916faab3cce65756ae91426c43f | /study/Python/Week8/BOJ_11497_강의현.py | e96c5e3d830e8bd8766220fac444dcc77cd359af | [] | no_license | Rurril/IT-DA-3rd | b3e3ec3c2a5efbc75b76b84e9002c27a0ba4a1c4 | 9985e237cb1b90e9609656d534e0ed164723e281 | refs/heads/master | 2022-07-22T15:26:39.085369 | 2021-11-23T13:30:06 | 2021-11-23T13:30:06 | 288,980,334 | 3 | 29 | null | 2020-11-05T10:25:30 | 2020-08-20T10:49:17 | Java | UTF-8 | Python | false | false | 598 | py | # 통나무 건너뛰기 - S2
import sys
from collections import deque
T=int(sys.stdin.readline())
for _ in range(T):
new_log=deque()
N=int(sys.stdin.readline())
log=list(map(int,sys.stdin.readline().split()))
log.sort()
new_log.append(log[-1])
for i in range(N-2,-1,-1):
if i%2==0:
new_log.appendleft(log[i])
else:
new_log.append(log[i])
difficulty=list()
difficulty.append(abs(new_log[-1]-new_log[0]))
for i in range(1,N):
difficulty.append(abs(new_log[i]-new_log[i-1]))
print(max(difficulty)) | [
"[email protected]"
] | |
bf3ab03aff33092943b6feb95eb8ecc781d53477 | 91d9bba0d7608818c077676c588701855dd1382c | /virtual/bin/pyrsa-encrypt | 3bf93434aec95d8f816bcb119259cc94286688e0 | [] | no_license | Antony-me/movie-library | 89e82c908288d55153aa81f2289087246e383cf4 | cbd333a79362ab1a2aa26356a6e914e9b67f5a63 | refs/heads/main | 2023-01-11T10:29:56.626291 | 2020-11-19T12:17:29 | 2020-11-19T12:17:29 | 313,741,106 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 283 | #!/home/moringa/Documents/Moringa-Projects/CORE-PYTHON/Django/Netflix/virtual/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from rsa.cli import encrypt
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(encrypt())
| [
"[email protected]"
] | ||
0485c06a92bd564030cc3dff86d3ed9c9bb8fbd3 | ec931947aa3e06ce565637e7ee1cb707f56375a2 | /aoc2015/modules/grid.py | 145b44dc85960aecdd0fa419d8dbac10c91fa804 | [] | no_license | chrisglencross/advent-of-code | 5f16ed7e2265d27ce15f502ce2a1c2f11fc99fc0 | 21623d4aa01a9e20285a0233c50f8f56c4099af5 | refs/heads/master | 2023-01-24T22:01:30.829679 | 2023-01-12T23:03:03 | 2023-01-12T23:03:03 | 224,833,969 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,596 | py | from __future__ import annotations
from typing import Tuple, Dict, List, Optional
import networkx as nx
from aoc2019.modules import textgridprinter
from aoc2019.modules.directions import COMPASS_DIRECTIONS
Coords = Tuple[int, int]
def default_is_navigable(grid: Grid, from_coord: Coords, to_coord: Coords):
return grid[from_coord] in {"."} and grid[to_coord] in {"."}
def default_node_factory(coords):
return coords
class Grid:
def __init__(self, grid, directions=COMPASS_DIRECTIONS.values()):
self.grid = grid
self.directions = directions
def items(self):
return self.grid.items()
def get_bounds(self) -> Tuple[Coords, Coords]:
xs = set([c[0] for c in self.grid.keys()])
ys = set([c[1] for c in self.grid.keys()])
if not xs:
xs = {0}
if not ys:
ys = {0}
return (min(xs), min(ys)), (max(xs), max(ys))
def find_cell(self, symbol) -> Optional[Coords]:
for coords, cell in self.grid.items():
if cell == symbol:
return coords
return None
def find_cells(self, symbol) -> List[Coords]:
result = []
for coords, cell in self.grid.items():
if cell == symbol:
result.append(coords)
return result
def index_cells(self, symbols=None, not_symbols=None) -> Dict[str, Coords]:
if symbols is None and not_symbols is None:
not_symbols = {".", "#", " "}
result = {}
for coords, cell in self.grid.items():
if (symbols and cell in symbols) or (not_symbols and cell not in not_symbols):
if result.get(cell) is not None:
raise Exception(f"Symbol {cell} is repeated in grid. Index it with index_repeating_cells()")
result[cell] = coords
return result
def index_repeating_cells(self, symbols=None, not_symbols=None) -> Dict[str, List[Coords]]:
if symbols is None and not_symbols is None:
not_symbols = {".", "#", " "}
result = {}
for coords, cell in self.grid.items():
if (symbols and cell in symbols) or (not_symbols and cell not in not_symbols):
result_list = result.get(cell)
if result_list is None:
result_list = []
result[cell] = result_list
result_list.append(coords)
return result
def keys(self):
return self.grid.keys()
def values(self):
return self.grid.values()
def get(self, coords: Coords, default_value=None):
return self.grid.get(coords, default_value)
def __getitem__(self, coords: Coords):
return self.get(coords)
def __setitem__(self, coords: Coords, cell: str):
self.grid[coords] = cell
def build_graph(self,
directions=COMPASS_DIRECTIONS.values(),
node_factory=default_node_factory,
is_navigable=default_is_navigable) -> nx.Graph:
graph = nx.Graph()
self.add_graph_edges(graph, directions, node_factory, is_navigable)
return graph
def build_digraph(self,
directions=COMPASS_DIRECTIONS.values(),
node_factory=default_node_factory,
is_navigable=default_is_navigable) -> nx.DiGraph:
graph = nx.DiGraph()
self.add_graph_edges(graph, directions, node_factory, is_navigable)
return graph
def add_graph_edges(self, graph: nx.Graph,
directions=COMPASS_DIRECTIONS.values(),
node_factory=default_node_factory,
is_navigable=default_is_navigable):
for from_coords, from_symbol in self.items():
from_node = node_factory(from_coords)
for direction in directions:
to_coords = direction.move(from_coords)
to_symbol = self.get(to_coords)
if to_symbol and is_navigable(self, from_coords, to_coords):
to_node = node_factory(to_coords)
graph.add_edge(from_node, to_node, distance=1)
def print(self):
textgridprinter.TextGridPrinter().print(self)
def parse_grid(content: str) -> Grid:
grid = {}
for y, line in enumerate(content.split("\n")):
for x, cell in enumerate(line.rstrip()):
grid[(x, y)] = cell
return Grid(grid)
def load_grid(file: str) -> Grid:
with open(file) as f:
content = f.read()
return parse_grid(content)
| [
"[email protected]"
] | |
4c651e654d7a4629ae37b0c69f86348993078c0b | 8d753bb8f19b5b1f526b0688d3cb199b396ed843 | /osp_sai_2.1.8/system/third_party/precompiled/arm64/python/usr/bin/smtpd.py | 88dc01c9dee2f9d29f28fcd5b376d6d926094198 | [] | no_license | bonald/vim_cfg | f166e5ff650db9fa40b564d05dc5103552184db8 | 2fee6115caec25fd040188dda0cb922bfca1a55f | refs/heads/master | 2023-01-23T05:33:00.416311 | 2020-11-19T02:09:18 | 2020-11-19T02:09:18 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 18,654 | py | #!/data01/users/sw/shil/my_tmp/osp_sai/arm64/Python-2.7.13_dir/python2_7_13_for_aarch64/../python2_7_13_for_aarch64_out/bin/python2.7
"""An RFC 2821 smtp proxy.
Usage: %(program)s [options] [localhost:localport [remotehost:remoteport]]
Options:
--nosetuid
-n
This program generally tries to setuid `nobody', unless this flag is
set. The setuid call will fail if this program is not run as root (in
which case, use this flag).
--version
-V
Print the version number and exit.
--class classname
-c classname
Use `classname' as the concrete SMTP proxy class. Uses `PureProxy' by
default.
--debug
-d
Turn on debugging prints.
--help
-h
Print this message and exit.
Version: %(__version__)s
If localhost is not given then `localhost' is used, and if localport is not
given then 8025 is used. If remotehost is not given then `localhost' is used,
and if remoteport is not given, then 25 is used.
"""
# Overview:
#
# This file implements the minimal SMTP protocol as defined in RFC 821. It
# has a hierarchy of classes which implement the backend functionality for the
# smtpd. A number of classes are provided:
#
# SMTPServer - the base class for the backend. Raises NotImplementedError
# if you try to use it.
#
# DebuggingServer - simply prints each message it receives on stdout.
#
# PureProxy - Proxies all messages to a real smtpd which does final
# delivery. One known problem with this class is that it doesn't handle
# SMTP errors from the backend server at all. This should be fixed
# (contributions are welcome!).
#
# MailmanProxy - An experimental hack to work with GNU Mailman
# <www.list.org>. Using this server as your real incoming smtpd, your
# mailhost will automatically recognize and accept mail destined to Mailman
# lists when those lists are created. Every message not destined for a list
# gets forwarded to a real backend smtpd, as with PureProxy. Again, errors
# are not handled correctly yet.
#
# Please note that this script requires Python 2.0
#
# Author: Barry Warsaw <[email protected]>
#
# TODO:
#
# - support mailbox delivery
# - alias files
# - ESMTP
# - handle error codes from the backend smtpd
import sys
import os
import errno
import getopt
import time
import socket
import asyncore
import asynchat
__all__ = ["SMTPServer","DebuggingServer","PureProxy","MailmanProxy"]
program = sys.argv[0]
__version__ = 'Python SMTP proxy version 0.2'
class Devnull:
def write(self, msg): pass
def flush(self): pass
DEBUGSTREAM = Devnull()
NEWLINE = '\n'
EMPTYSTRING = ''
COMMASPACE = ', '
def usage(code, msg=''):
print >> sys.stderr, __doc__ % globals()
if msg:
print >> sys.stderr, msg
sys.exit(code)
class SMTPChannel(asynchat.async_chat):
COMMAND = 0
DATA = 1
def __init__(self, server, conn, addr):
asynchat.async_chat.__init__(self, conn)
self.__server = server
self.__conn = conn
self.__addr = addr
self.__line = []
self.__state = self.COMMAND
self.__greeting = 0
self.__mailfrom = None
self.__rcpttos = []
self.__data = ''
self.__fqdn = socket.getfqdn()
try:
self.__peer = conn.getpeername()
except socket.error, err:
# a race condition may occur if the other end is closing
# before we can get the peername
self.close()
if err[0] != errno.ENOTCONN:
raise
return
print >> DEBUGSTREAM, 'Peer:', repr(self.__peer)
self.push('220 %s %s' % (self.__fqdn, __version__))
self.set_terminator('\r\n')
# Overrides base class for convenience
def push(self, msg):
asynchat.async_chat.push(self, msg + '\r\n')
# Implementation of base class abstract method
def collect_incoming_data(self, data):
self.__line.append(data)
# Implementation of base class abstract method
def found_terminator(self):
line = EMPTYSTRING.join(self.__line)
print >> DEBUGSTREAM, 'Data:', repr(line)
self.__line = []
if self.__state == self.COMMAND:
if not line:
self.push('500 Error: bad syntax')
return
method = None
i = line.find(' ')
if i < 0:
command = line.upper()
arg = None
else:
command = line[:i].upper()
arg = line[i+1:].strip()
method = getattr(self, 'smtp_' + command, None)
if not method:
self.push('502 Error: command "%s" not implemented' % command)
return
method(arg)
return
else:
if self.__state != self.DATA:
self.push('451 Internal confusion')
return
# Remove extraneous carriage returns and de-transparency according
# to RFC 821, Section 4.5.2.
data = []
for text in line.split('\r\n'):
if text and text[0] == '.':
data.append(text[1:])
else:
data.append(text)
self.__data = NEWLINE.join(data)
status = self.__server.process_message(self.__peer,
self.__mailfrom,
self.__rcpttos,
self.__data)
self.__rcpttos = []
self.__mailfrom = None
self.__state = self.COMMAND
self.set_terminator('\r\n')
if not status:
self.push('250 Ok')
else:
self.push(status)
# SMTP and ESMTP commands
def smtp_HELO(self, arg):
if not arg:
self.push('501 Syntax: HELO hostname')
return
if self.__greeting:
self.push('503 Duplicate HELO/EHLO')
else:
self.__greeting = arg
self.push('250 %s' % self.__fqdn)
def smtp_NOOP(self, arg):
if arg:
self.push('501 Syntax: NOOP')
else:
self.push('250 Ok')
def smtp_QUIT(self, arg):
# args is ignored
self.push('221 Bye')
self.close_when_done()
# factored
def __getaddr(self, keyword, arg):
address = None
keylen = len(keyword)
if arg[:keylen].upper() == keyword:
address = arg[keylen:].strip()
if not address:
pass
elif address[0] == '<' and address[-1] == '>' and address != '<>':
# Addresses can be in the form <[email protected]> but watch out
# for null address, e.g. <>
address = address[1:-1]
return address
def smtp_MAIL(self, arg):
print >> DEBUGSTREAM, '===> MAIL', arg
address = self.__getaddr('FROM:', arg) if arg else None
if not address:
self.push('501 Syntax: MAIL FROM:<address>')
return
if self.__mailfrom:
self.push('503 Error: nested MAIL command')
return
self.__mailfrom = address
print >> DEBUGSTREAM, 'sender:', self.__mailfrom
self.push('250 Ok')
def smtp_RCPT(self, arg):
print >> DEBUGSTREAM, '===> RCPT', arg
if not self.__mailfrom:
self.push('503 Error: need MAIL command')
return
address = self.__getaddr('TO:', arg) if arg else None
if not address:
self.push('501 Syntax: RCPT TO: <address>')
return
self.__rcpttos.append(address)
print >> DEBUGSTREAM, 'recips:', self.__rcpttos
self.push('250 Ok')
def smtp_RSET(self, arg):
if arg:
self.push('501 Syntax: RSET')
return
# Resets the sender, recipients, and data, but not the greeting
self.__mailfrom = None
self.__rcpttos = []
self.__data = ''
self.__state = self.COMMAND
self.push('250 Ok')
def smtp_DATA(self, arg):
if not self.__rcpttos:
self.push('503 Error: need RCPT command')
return
if arg:
self.push('501 Syntax: DATA')
return
self.__state = self.DATA
self.set_terminator('\r\n.\r\n')
self.push('354 End data with <CR><LF>.<CR><LF>')
class SMTPServer(asyncore.dispatcher):
def __init__(self, localaddr, remoteaddr):
self._localaddr = localaddr
self._remoteaddr = remoteaddr
asyncore.dispatcher.__init__(self)
try:
self.create_socket(socket.AF_INET, socket.SOCK_STREAM)
# try to re-use a server port if possible
self.set_reuse_addr()
self.bind(localaddr)
self.listen(5)
except:
# cleanup asyncore.socket_map before raising
self.close()
raise
else:
print >> DEBUGSTREAM, \
'%s started at %s\n\tLocal addr: %s\n\tRemote addr:%s' % (
self.__class__.__name__, time.ctime(time.time()),
localaddr, remoteaddr)
def handle_accept(self):
pair = self.accept()
if pair is not None:
conn, addr = pair
print >> DEBUGSTREAM, 'Incoming connection from %s' % repr(addr)
channel = SMTPChannel(self, conn, addr)
# API for "doing something useful with the message"
def process_message(self, peer, mailfrom, rcpttos, data):
"""Override this abstract method to handle messages from the client.
peer is a tuple containing (ipaddr, port) of the client that made the
socket connection to our smtp port.
mailfrom is the raw address the client claims the message is coming
from.
rcpttos is a list of raw addresses the client wishes to deliver the
message to.
data is a string containing the entire full text of the message,
headers (if supplied) and all. It has been `de-transparencied'
according to RFC 821, Section 4.5.2. In other words, a line
containing a `.' followed by other text has had the leading dot
removed.
This function should return None, for a normal `250 Ok' response;
otherwise it returns the desired response string in RFC 821 format.
"""
raise NotImplementedError
class DebuggingServer(SMTPServer):
# Do something with the gathered message
def process_message(self, peer, mailfrom, rcpttos, data):
inheaders = 1
lines = data.split('\n')
print '---------- MESSAGE FOLLOWS ----------'
for line in lines:
# headers first
if inheaders and not line:
print 'X-Peer:', peer[0]
inheaders = 0
print line
print '------------ END MESSAGE ------------'
class PureProxy(SMTPServer):
def process_message(self, peer, mailfrom, rcpttos, data):
lines = data.split('\n')
# Look for the last header
i = 0
for line in lines:
if not line:
break
i += 1
lines.insert(i, 'X-Peer: %s' % peer[0])
data = NEWLINE.join(lines)
refused = self._deliver(mailfrom, rcpttos, data)
# TBD: what to do with refused addresses?
print >> DEBUGSTREAM, 'we got some refusals:', refused
def _deliver(self, mailfrom, rcpttos, data):
import smtplib
refused = {}
try:
s = smtplib.SMTP()
s.connect(self._remoteaddr[0], self._remoteaddr[1])
try:
refused = s.sendmail(mailfrom, rcpttos, data)
finally:
s.quit()
except smtplib.SMTPRecipientsRefused, e:
print >> DEBUGSTREAM, 'got SMTPRecipientsRefused'
refused = e.recipients
except (socket.error, smtplib.SMTPException), e:
print >> DEBUGSTREAM, 'got', e.__class__
# All recipients were refused. If the exception had an associated
# error code, use it. Otherwise,fake it with a non-triggering
# exception code.
errcode = getattr(e, 'smtp_code', -1)
errmsg = getattr(e, 'smtp_error', 'ignore')
for r in rcpttos:
refused[r] = (errcode, errmsg)
return refused
class MailmanProxy(PureProxy):
def process_message(self, peer, mailfrom, rcpttos, data):
from cStringIO import StringIO
from Mailman import Utils
from Mailman import Message
from Mailman import MailList
# If the message is to a Mailman mailing list, then we'll invoke the
# Mailman script directly, without going through the real smtpd.
# Otherwise we'll forward it to the local proxy for disposition.
listnames = []
for rcpt in rcpttos:
local = rcpt.lower().split('@')[0]
# We allow the following variations on the theme
# listname
# listname-admin
# listname-owner
# listname-request
# listname-join
# listname-leave
parts = local.split('-')
if len(parts) > 2:
continue
listname = parts[0]
if len(parts) == 2:
command = parts[1]
else:
command = ''
if not Utils.list_exists(listname) or command not in (
'', 'admin', 'owner', 'request', 'join', 'leave'):
continue
listnames.append((rcpt, listname, command))
# Remove all list recipients from rcpttos and forward what we're not
# going to take care of ourselves. Linear removal should be fine
# since we don't expect a large number of recipients.
for rcpt, listname, command in listnames:
rcpttos.remove(rcpt)
# If there's any non-list destined recipients left,
print >> DEBUGSTREAM, 'forwarding recips:', ' '.join(rcpttos)
if rcpttos:
refused = self._deliver(mailfrom, rcpttos, data)
# TBD: what to do with refused addresses?
print >> DEBUGSTREAM, 'we got refusals:', refused
# Now deliver directly to the list commands
mlists = {}
s = StringIO(data)
msg = Message.Message(s)
# These headers are required for the proper execution of Mailman. All
# MTAs in existence seem to add these if the original message doesn't
# have them.
if not msg.getheader('from'):
msg['From'] = mailfrom
if not msg.getheader('date'):
msg['Date'] = time.ctime(time.time())
for rcpt, listname, command in listnames:
print >> DEBUGSTREAM, 'sending message to', rcpt
mlist = mlists.get(listname)
if not mlist:
mlist = MailList.MailList(listname, lock=0)
mlists[listname] = mlist
# dispatch on the type of command
if command == '':
# post
msg.Enqueue(mlist, tolist=1)
elif command == 'admin':
msg.Enqueue(mlist, toadmin=1)
elif command == 'owner':
msg.Enqueue(mlist, toowner=1)
elif command == 'request':
msg.Enqueue(mlist, torequest=1)
elif command in ('join', 'leave'):
# TBD: this is a hack!
if command == 'join':
msg['Subject'] = 'subscribe'
else:
msg['Subject'] = 'unsubscribe'
msg.Enqueue(mlist, torequest=1)
class Options:
setuid = 1
classname = 'PureProxy'
def parseargs():
global DEBUGSTREAM
try:
opts, args = getopt.getopt(
sys.argv[1:], 'nVhc:d',
['class=', 'nosetuid', 'version', 'help', 'debug'])
except getopt.error, e:
usage(1, e)
options = Options()
for opt, arg in opts:
if opt in ('-h', '--help'):
usage(0)
elif opt in ('-V', '--version'):
print >> sys.stderr, __version__
sys.exit(0)
elif opt in ('-n', '--nosetuid'):
options.setuid = 0
elif opt in ('-c', '--class'):
options.classname = arg
elif opt in ('-d', '--debug'):
DEBUGSTREAM = sys.stderr
# parse the rest of the arguments
if len(args) < 1:
localspec = 'localhost:8025'
remotespec = 'localhost:25'
elif len(args) < 2:
localspec = args[0]
remotespec = 'localhost:25'
elif len(args) < 3:
localspec = args[0]
remotespec = args[1]
else:
usage(1, 'Invalid arguments: %s' % COMMASPACE.join(args))
# split into host/port pairs
i = localspec.find(':')
if i < 0:
usage(1, 'Bad local spec: %s' % localspec)
options.localhost = localspec[:i]
try:
options.localport = int(localspec[i+1:])
except ValueError:
usage(1, 'Bad local port: %s' % localspec)
i = remotespec.find(':')
if i < 0:
usage(1, 'Bad remote spec: %s' % remotespec)
options.remotehost = remotespec[:i]
try:
options.remoteport = int(remotespec[i+1:])
except ValueError:
usage(1, 'Bad remote port: %s' % remotespec)
return options
if __name__ == '__main__':
options = parseargs()
# Become nobody
classname = options.classname
if "." in classname:
lastdot = classname.rfind(".")
mod = __import__(classname[:lastdot], globals(), locals(), [""])
classname = classname[lastdot+1:]
else:
import __main__ as mod
class_ = getattr(mod, classname)
proxy = class_((options.localhost, options.localport),
(options.remotehost, options.remoteport))
if options.setuid:
try:
import pwd
except ImportError:
print >> sys.stderr, \
'Cannot import module "pwd"; try running with -n option.'
sys.exit(1)
nobody = pwd.getpwnam('nobody')[2]
try:
os.setuid(nobody)
except OSError, e:
if e.errno != errno.EPERM: raise
print >> sys.stderr, \
'Cannot setuid "nobody"; try running with -n option.'
sys.exit(1)
try:
asyncore.loop()
except KeyboardInterrupt:
pass
| [
"[email protected]"
] | |
590dfe20b180eb3890d52d15e988acae284a291f | c9ddbdb5678ba6e1c5c7e64adf2802ca16df778c | /cases/synthetic/sieve-big-7295.py | b08c00077bf2436014166c990deddcaa8e461e1f | [] | no_license | Virtlink/ccbench-chocopy | c3f7f6af6349aff6503196f727ef89f210a1eac8 | c7efae43bf32696ee2b2ee781bdfe4f7730dec3f | refs/heads/main | 2023-04-07T15:07:12.464038 | 2022-02-03T15:42:39 | 2022-02-03T15:42:39 | 451,969,776 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 31,755 | py | # A resizable list of integers
class Vector(object):
items: [int] = None
size: int = 0
def __init__(self:"Vector"):
self.items = [0]
# Returns current capacity
def capacity(self:"Vector") -> int:
return len(self.items)
# Increases capacity of vector by one element
def increase_capacity(self:"Vector") -> int:
self.items = self.items + [0]
return self.capacity()
# Appends one item to end of vector
def append(self:"Vector", item: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends many items to end of vector
def append_all(self:"Vector", new_items: [int]) -> object:
item:int = 0
for item in new_items:
self.append(item)
# Removes an item from the middle of vector
def remove_at(self:"Vector", idx: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Retrieves an item at a given index
def get(self:"Vector", idx: int) -> int:
return self.items[idx]
# Retrieves the current size of the vector
def length(self:"Vector") -> int:
return self.size
# A resizable list of integers
class Vector2(object):
items: [int] = None
items2: [int] = None
size: int = 0
size2: int = 0
def __init__(self:"Vector2"):
self.items = [0]
# Returns current capacity
def capacity(self:"Vector2") -> int:
return len(self.items)
# Returns current capacity
def capacity2(self:"Vector2") -> int:
return len(self.items)
# Increases capacity of vector by one element
def increase_capacity(self:"Vector2") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity2(self:"Vector2") -> int:
self.items = self.items + [0]
return self.capacity()
# Appends one item to end of vector
def append(self:"Vector2", item: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append2(self:"Vector2", item: int, item2: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends many items to end of vector
def append_all(self:"Vector2", new_items: [int]) -> object:
item:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all2(self:"Vector2", new_items: [int], new_items2: [int]) -> object:
item:int = 0
item2:int = 0
for item in new_items:
self.append(item)
# Removes an item from the middle of vector
def remove_at(self:"Vector2", idx: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at2(self:"Vector2", idx: int, idx2: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Retrieves an item at a given index
def get(self:"Vector2", idx: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get2(self:"Vector2", idx: int, idx2: int) -> int:
return self.items[idx]
# Retrieves the current size of the vector
def length(self:"Vector2") -> int:
return self.size
# Retrieves the current size of the vector
def length2(self:"Vector2") -> int:
return self.size
# A resizable list of integers
class Vector3(object):
items: [int] = None
items2: [int] = None
items3: [int] = None
size: int = 0
size2: int = 0
size3: int = 0
def __init__(self:"Vector3"):
self.items = [0]
# Returns current capacity
def capacity(self:"Vector3") -> int:
return len(self.items)
# Returns current capacity
def capacity2(self:"Vector3") -> int:
return len(self.items)
# Returns current capacity
def capacity3(self:"Vector3") -> int:
return len(self.items)
# Increases capacity of vector by one element
def increase_capacity(self:"Vector3") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity2(self:"Vector3") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity3(self:"Vector3") -> int:
self.items = self.items + [0]
return self.capacity()
# Appends one item to end of vector
def append(self:"Vector3", item: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append2(self:"Vector3", item: int, item2: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append3(self:"Vector3", item: int, item2: int, item3: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends many items to end of vector
def append_all(self:"Vector3", new_items: [int]) -> object:
item:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all2(self:"Vector3", new_items: [int], new_items2: [int]) -> object:
item:int = 0
item2:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all3(self:"Vector3", new_items: [int], new_items2: [int], new_items3: [int]) -> object:
item:int = 0
item2:int = 0
item3:int = 0
for item in new_items:
self.append(item)
# Removes an item from the middle of vector
def remove_at(self:"Vector3", idx: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at2(self:"Vector3", idx: int, idx2: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at3(self:"Vector3", idx: int, idx2: int, idx3: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Retrieves an item at a given index
def get(self:"Vector3", idx: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get2(self:"Vector3", idx: int, idx2: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get3(self:"Vector3", idx: int, idx2: int, idx3: int) -> int:
return self.items[idx]
# Retrieves the current size of the vector
def length(self:"Vector3") -> int:
return self.size
# Retrieves the current size of the vector
def length2(self:"Vector3") -> int:
return self.size
# Retrieves the current size of the vector
def length3(self:"Vector3") -> int:
return self.size
# A resizable list of integers
class Vector4(object):
items: [int] = None
items2: [int] = None
items3: [int] = None
items4: [int] = None
size: int = 0
size2: int = 0
size3: int = 0
size4: int = 0
def __init__(self:"Vector4"):
self.items = [0]
# Returns current capacity
def capacity(self:"Vector4") -> int:
return len(self.items)
# Returns current capacity
def capacity2(self:"Vector4") -> int:
return len(self.items)
# Returns current capacity
def capacity3(self:"Vector4") -> int:
return len(self.items)
# Returns current capacity
def capacity4(self:"Vector4") -> int:
return len(self.items)
# Increases capacity of vector by one element
def increase_capacity(self:"Vector4") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity2(self:"Vector4") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity3(self:"Vector4") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity4(self:"Vector4") -> int:
self.items = self.items + [0]
return self.capacity()
# Appends one item to end of vector
def append(self:"Vector4", item: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append2(self:"Vector4", item: int, item2: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append3(self:"Vector4", item: int, item2: int, item3: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append4(self:"Vector4", item: int, item2: int, item3: int, item4: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends many items to end of vector
def append_all(self:"Vector4", new_items: [int]) -> object:
item:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all2(self:"Vector4", new_items: [int], new_items2: [int]) -> object:
item:int = 0
item2:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all3(self:"Vector4", new_items: [int], new_items2: [int], new_items3: [int]) -> object:
item:int = 0
item2:int = 0
item3:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all4(self:"Vector4", new_items: [int], new_items2: [int], new_items3: [int], new_items4: [int]) -> object:
item:int = 0
item2:int = 0
item3:int = 0
item4:int = 0
for item in new_items:
self.append(item)
# Removes an item from the middle of vector
def remove_at(self:"Vector4", idx: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at2(self:"Vector4", idx: int, idx2: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at3(self:"Vector4", idx: int, idx2: int, idx3: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at4(self:"Vector4", idx: int, idx2: int, idx3: int, idx4: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Retrieves an item at a given index
def get(self:"Vector4", idx: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get2(self:"Vector4", idx: int, idx2: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get3(self:"Vector4", idx: int, idx2: int, idx3: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get4(self:"Vector4", idx: int, idx2: int, idx3: int, idx4: int) -> int:
return self.items[idx]
# Retrieves the current size of the vector
def length(self:"Vector4") -> int:
return self.size
# Retrieves the current size of the vector
def length2(self:"Vector4") -> int:
return self.size
# Retrieves the current size of the vector
def length3(self:"Vector4") -> int:
return self.size
# Retrieves the current size of the vector
def length4(self:"Vector4") -> int:
return self.size
# A resizable list of integers
class Vector5(object):
items: [int] = None
items2: [int] = None
items3: [int] = None
items4: [int] = None
items5: [int] = None
size: int = 0
size2: int = 0
size3: int = 0
size4: int = 0
size5: int = 0
def __init__(self:"Vector5"):
self.items = [0]
# Returns current capacity
def capacity(self:"Vector5") -> int:
return len(self.items)
# Returns current capacity
def capacity2(self:"Vector5") -> int:
return len(self.items)
# Returns current capacity
def capacity3(self:"Vector5") -> int:
return len(self.items)
# Returns current capacity
def capacity4(self:"Vector5") -> int:
return len(self.items)
# Returns current capacity
def capacity5(self:"Vector5") -> int:
return len(self.items)
# Increases capacity of vector by one element
def increase_capacity(self:"Vector5") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity2(self:"Vector5") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity3(self:"Vector5") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity4(self:"Vector5") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity5(self:"Vector5") -> int:
self.items = self.items + [0]
return self.capacity()
# Appends one item to end of vector
def append(self:"Vector5", item: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append2(self:"Vector5", item: int, item2: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append3(self:"Vector5", item: int, item2: int, item3: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append4(self:"Vector5", item: int, item2: int, item3: int, item4: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append5(self:"Vector5", item: int, item2: int, item3: int, item4: int, item5: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends many items to end of vector
def append_all(self:"Vector5", new_items: [int]) -> object:
item:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all2(self:"Vector5", new_items: [int], new_items2: [int]) -> object:
item:int = 0
item2:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all3(self:"Vector5", new_items: [int], new_items2: [int], new_items3: [int]) -> object:
item:int = 0
item2:int = 0
item3:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all4(self:"Vector5", new_items: [int], new_items2: [int], new_items3: [int], new_items4: [int]) -> object:
item:int = 0
item2:int = 0
item3:int = 0
item4:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all5(self:"Vector5", new_items: [int], new_items2: [int], new_items3: [int], new_items4: [int], new_items5: [int]) -> object:
item:int = 0
item2:int = 0
item3:int = 0
item4:int = 0
item5:int = 0
for item in new_items:
self.append(item)
# Removes an item from the middle of vector
def remove_at(self:"Vector5", idx: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at2(self:"Vector5", idx: int, idx2: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at3(self:"Vector5", idx: int, idx2: int, idx3: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at4(self:"Vector5", idx: int, idx2: int, idx3: int, idx4: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at5(self:"Vector5", idx: int, idx2: int, idx3: int, idx4: int, idx5: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Retrieves an item at a given index
def get(self:"Vector5", idx: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get2(self:"Vector5", idx: int, idx2: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get3(self:"Vector5", idx: int, idx2: int, idx3: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get4(self:"Vector5", idx: int, idx2: int, idx3: int, idx4: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get5(self:"Vector5", idx: int, idx2: int, idx3: int, idx4: int, idx5: int) -> int:
return self.items[idx]
# Retrieves the current size of the vector
def length(self:"Vector5") -> int:
return self.size
# Retrieves the current size of the vector
def length2(self:"Vector5") -> int:
return self.size
# Retrieves the current size of the vector
def length3(self:"Vector5") -> int:
return self.size
# Retrieves the current size of the vector
def length4(self:"Vector5") -> int:
return self.size
# Retrieves the current size of the vector
def length5(self:"Vector5") -> int:
return self.size
# A faster (but more memory-consuming) implementation of vector
class DoublingVector(Vector):
doubling_limit:int = 1000
# Overriding to do fewer resizes
def increase_capacity(self:"DoublingVector") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# A faster (but more memory-consuming) implementation of vector
class DoublingVector2(Vector):
doubling_limit:int = 1000
doubling_limit2:int = 1000
# Overriding to do fewer resizes
def increase_capacity(self:"DoublingVector2") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity2(self:"DoublingVector2") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# A faster (but more memory-consuming) implementation of vector
class DoublingVector3(Vector):
doubling_limit:int = 1000
doubling_limit2:int = 1000
doubling_limit3:int = 1000
# Overriding to do fewer resizes
def increase_capacity(self:"DoublingVector3") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity2(self:"DoublingVector3") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity3(self:"DoublingVector3") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# A faster (but more memory-consuming) implementation of vector
class DoublingVector4(Vector):
doubling_limit:int = 1000
doubling_limit2:int = 1000
doubling_limit3:int = 1000
doubling_limit4:int = 1000
# Overriding to do fewer resizes
def increase_capacity(self:"DoublingVector4") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity2(self:"DoublingVector4") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = $Var.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity3(self:"DoublingVector4") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity4(self:"DoublingVector4") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# A faster (but more memory-consuming) implementation of vector
class DoublingVector5(Vector):
doubling_limit:int = 1000
doubling_limit2:int = 1000
doubling_limit3:int = 1000
doubling_limit4:int = 1000
doubling_limit5:int = 1000
# Overriding to do fewer resizes
def increase_capacity(self:"DoublingVector5") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity2(self:"DoublingVector5") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity3(self:"DoublingVector5") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity4(self:"DoublingVector5") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity5(self:"DoublingVector5") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Makes a vector in the range [i, j)
def vrange(i:int, j:int) -> Vector:
v:Vector = None
v = DoublingVector()
while i < j:
v.append(i)
i = i + 1
return v
def vrange2(i:int, j:int, i2:int, j2:int) -> Vector:
v:Vector = None
v2:Vector = None
v = DoublingVector()
while i < j:
v.append(i)
i = i + 1
return v
def vrange3(i:int, j:int, i2:int, j2:int, i3:int, j3:int) -> Vector:
v:Vector = None
v2:Vector = None
v3:Vector = None
v = DoublingVector()
while i < j:
v.append(i)
i = i + 1
return v
def vrange4(i:int, j:int, i2:int, j2:int, i3:int, j3:int, i4:int, j4:int) -> Vector:
v:Vector = None
v2:Vector = None
v3:Vector = None
v4:Vector = None
v = DoublingVector()
while i < j:
v.append(i)
i = i + 1
return v
def vrange5(i:int, j:int, i2:int, j2:int, i3:int, j3:int, i4:int, j4:int, i5:int, j5:int) -> Vector:
v:Vector = None
v2:Vector = None
v3:Vector = None
v4:Vector = None
v5:Vector = None
v = DoublingVector()
while i < j:
v.append(i)
i = i + 1
return v
# Sieve of Eratosthenes (not really)
def sieve(v:Vector) -> object:
i:int = 0
j:int = 0
k:int = 0
while i < v.length():
k = v.get(i)
j = i + 1
while j < v.length():
if v.get(j) % k == 0:
v.remove_at(j)
else:
j = j + 1
i = i + 1
def sieve2(v:Vector, v2:Vector) -> object:
i:int = 0
i2:int = 0
j:int = 0
j2:int = 0
k:int = 0
k2:int = 0
while i < v.length():
k = v.get(i)
j = i + 1
while j < v.length():
if v.get(j) % k == 0:
v.remove_at(j)
else:
j = j + 1
i = i + 1
def sieve3(v:Vector, v2:Vector, v3:Vector) -> object:
i:int = 0
i2:int = 0
i3:int = 0
j:int = 0
j2:int = 0
j3:int = 0
k:int = 0
k2:int = 0
k3:int = 0
while i < v.length():
k = v.get(i)
j = i + 1
while j < v.length():
if v.get(j) % k == 0:
v.remove_at(j)
else:
j = j + 1
i = i + 1
def sieve4(v:Vector, v2:Vector, v3:Vector, v4:Vector) -> object:
i:int = 0
i2:int = 0
i3:int = 0
i4:int = 0
j:int = 0
j2:int = 0
j3:int = 0
j4:int = 0
k:int = 0
k2:int = 0
k3:int = 0
k4:int = 0
while i < v.length():
k = v.get(i)
j = i + 1
while j < v.length():
if v.get(j) % k == 0:
v.remove_at(j)
else:
j = j + 1
i = i + 1
def sieve5(v:Vector, v2:Vector, v3:Vector, v4:Vector, v5:Vector) -> object:
i:int = 0
i2:int = 0
i3:int = 0
i4:int = 0
i5:int = 0
j:int = 0
j2:int = 0
j3:int = 0
j4:int = 0
j5:int = 0
k:int = 0
k2:int = 0
k3:int = 0
k4:int = 0
k5:int = 0
while i < v.length():
k = v.get(i)
j = i + 1
while j < v.length():
if v.get(j) % k == 0:
v.remove_at(j)
else:
j = j + 1
i = i + 1
# Input parameter
n:int = 50
n2:int = 50
n3:int = 50
n4:int = 50
n5:int = 50
# Data
v:Vector = None
v2:Vector = None
v3:Vector = None
v4:Vector = None
v5:Vector = None
i:int = 0
i2:int = 0
i3:int = 0
i4:int = 0
i5:int = 0
# Crunch
v = vrange(2, n)
v2 = vrange(2, n)
v3 = vrange(2, n)
v4 = vrange(2, n)
v5 = vrange(2, n)
sieve(v)
# Print
while i < v.length():
print(v.get(i))
i = i + 1
| [
"[email protected]"
] | |
24f099a2224e7baa91a9ab2ebaa2b26ed626e085 | b86a0656dfb861e0af4b784c94f52742738c29ae | /abf-repoclosure/repoclosure/renders/render_repoclosure.py | a50e71f3bd0e2a7cd0cf7642ac73fd62d6f25f28 | [] | no_license | OpenMandrivaSoftware/docker-abf | dba52547ac51fa86028a4ee56f9b165297e66fd5 | 635774f0f97e71aeaa8f9a3965bfa94c99ad1d93 | refs/heads/master | 2023-06-21T14:59:42.311892 | 2023-06-20T01:04:10 | 2023-06-20T01:04:10 | 45,573,628 | 0 | 6 | null | 2018-03-19T21:56:09 | 2015-11-04T23:12:59 | Shell | UTF-8 | Python | false | false | 560 | py | from ..templates import repoclosure
def render_repoclosure(result, title, compressed_report, path):
t = repoclosure()
if result['return_code'] == -1:
t.code = -1
t.title = title
t.errors = result['errors_raw']
else:
t.bad_packages = result['report']
t.code = result['return_code']
t.errors = result['errors_raw']
t.count = result['count']
t.title = title
t.total_count = result['total_count']
t.percent = result['percent']
t.compressed = compressed_report
with open(path, "w") as f:
f.write(t.respond()) | [
"[email protected]"
] | |
2ba8d0457b0f510232d4d95043d81f82a5fa7b41 | b8fed8222b41e447cd5ce83513eb4d014c01742b | /sbm_purchase_rent/contract/po_contract.py | bc25bb0bcb7486749663e8676b19e87b615275c2 | [] | no_license | lajayuhniyarsyah/ERP-Supra | e993d8face6e022b6f863d1dff7cb51cda36be8d | 5a64dbb57ee40070354926700091fb9025c1350c | refs/heads/master | 2021-01-25T22:09:46.306990 | 2017-11-08T05:32:04 | 2017-11-08T05:32:04 | 23,605,825 | 0 | 10 | null | 2017-11-08T05:32:05 | 2014-09-03T03:58:28 | Python | UTF-8 | Python | false | false | 1,186 | py | from datetime import datetime
import netsvc
from osv import osv, fields
class POContract(osv.osv):
_inherit = 'purchase.order'
_columns = {
'contract_id' : fields.many2one('purchase.order.contract.data','Contract',ondelete="Cascade"),
'contract_no' : fields.related('contract_id','contract_no',type="char",string="Contract No",store=False),
'start_contract' : fields.related('contract_id','start_contract',type="date",string="Contract Start",store=False),
'expire_contract' : fields.related('contract_id','expire_contract',type="date",string="Contract Expire",store=False),
'notes' : fields.related('contract_id','notes',type="text",string="Notes",store=False),
}
# def write(self,cr,uid,ids,vals,context=None):
# return False
POContract()
class POContractData(osv.osv):
_name = 'purchase.order.contract.data'
_rec_name = 'contract_no'
_columns = {
'contract_no' : fields.char('Contract No',30,required=True),
'start_contract' : fields.date('Date Start',required=True),
'expire_contract' : fields.date('Expire Contract',required=True),
'notes' : fields.text('Notes'),
'pos' : fields.one2many('purchase.order','contract_id','POs')
}
POContractData() | [
"[email protected]"
] | |
6d4ccc01a0dc81b33d21a9f1ecd1714a78978b4a | 9b1446b26e81a79c303f9799fb6a91785c7adb03 | /.history/Code/markov_chain_20200121115909.py | bdd2412da83d245c8cc5b9d7f73b3d70654d3c82 | [] | no_license | SamirIngley/CS1.2-Tweet-Gen | 017ea15b1113881a156ff24682828bc654eb6c81 | bcd95fa63e05849cbf8e36230d8e31032b99daaa | refs/heads/master | 2020-12-14T20:19:57.733290 | 2020-08-04T23:19:23 | 2020-08-04T23:19:23 | 234,856,234 | 0 | 0 | null | 2020-06-05T21:13:04 | 2020-01-19T07:05:55 | Python | UTF-8 | Python | false | false | 2,340 | py | import sample
import random
from clean_text import clean
from dictogram import Dictogram
class Markov():
def __init__(self, corpus):
self.corpus = clean(corpus)
self.states = {}
self.chain()
def chain(self):
last_word = None
for word in self.corpus:
if last_word is not None: # set last word line 14
if last_word not in self.states: # if we haven't seen this word before
self.states[last_word] = Dictogram() # empty histogram as value
self.states[last_word].add_count(word) # add word to last word histogram
last_word = word # set word as last_word
def __str__(self):
return str(self.states)
def random_walk(self, num_words=11):
# length = len(self.states)
# rand = random.randint(0, length)
sentence = []
length = len(self.states)
rand = random.randint(0, length)
words_counter = 0
last_word = None
while num_words > words_counter:
if last_word:
pickings = self.states[last_word] # dictionary of words to pick from based on last_word's hist
print(pickings)
total_wc = 0 # number of words in dictionary for a word
print(total_wc)
dart = random.randint(0, 100) # as percentage
print(dart)
for value in pickings.values(): # calculates total word count
total_wc += value
print(value)
counter = 0
for key,value in pickings.items():
print(key, value)
while counter < dart:
counter += (value / total_wc) * 100 # as percentage
print(counter)
last_word = key
print(last_word)
else:
last_word = (list(self.states)[rand])
words_counter += 1
sentence.append(last_word)
return sentence
if __name__ == '__main__':
source = 'one fish two fish red fish blue fish'
markov = Markov('source.txt')
print(markov.states)
print('')
print(markov.random_walk())
| [
"[email protected]"
] | |
9ea95b3cbe04b5c4c0ea9517f7307f3eab838f14 | 349dadbf45b7c12a3fe41c5e0421c0488b679919 | /transformers/src/transformers/models/pegasus/tokenization_pegasus_fast.py | 67c065e7ecf815ac334c6ebdf52ce62839538df3 | [
"BSD-3-Clause",
"CC0-1.0",
"LicenseRef-scancode-unknown-license-reference",
"Unlicense",
"Apache-2.0"
] | permissive | salesforce/CodeRL | c772e408bac690527759f416ea22add4c97e5bec | 51db4ff983d5376e62b9e7eba150316a651c80d9 | refs/heads/main | 2023-08-18T18:38:02.740995 | 2022-11-18T16:14:28 | 2022-11-18T16:14:28 | 508,912,853 | 412 | 52 | BSD-3-Clause | 2023-08-31T07:51:27 | 2022-06-30T02:54:36 | Python | UTF-8 | Python | false | false | 9,820 | py | # coding=utf-8
# Copyright 2020 Google and The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Tokenization class for model PEGASUS."""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...file_utils import is_sentencepiece_available
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if is_sentencepiece_available():
from .tokenization_pegasus import PegasusTokenizer
else:
PegasusTokenizer = None
logger = logging.get_logger(__name__)
SPIECE_UNDERLINE = "▁"
VOCAB_FILES_NAMES = {"vocab_file": "spiece.model", "tokenizer_file": "tokenizer.json"}
PRETRAINED_VOCAB_FILES_MAP = {
"vocab_file": {"google/pegasus-xsum": "https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model"},
"tokenizer_file": {
"google/pegasus-xsum": "https://huggingface.co/google/pegasus-xsum/resolve/main/tokenizer.json"
},
}
PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = {
"google/pegasus-xsum": 512,
}
class PegasusTokenizerFast(PreTrainedTokenizerFast):
r"""
Construct a "fast" PEGASUS tokenizer (backed by HuggingFace's *tokenizers* library). Based on
[Unigram](https://huggingface.co/docs/tokenizers/python/latest/components.html?highlight=unigram#models).
This tokenizer inherits from [`PreTrainedTokenizerFast`] which contains most of the main methods. Users should
refer to this superclass for more information regarding those methods.
Args:
vocab_file (`str`):
[SentencePiece](https://github.com/google/sentencepiece) file (generally has a *.spm* extension) that
contains the vocabulary necessary to instantiate a tokenizer.
pad_token (`str`, *optional*, defaults to `"<pad>"`):
The token used for padding, for example when batching sequences of different lengths.
eos_token (`str`, *optional*, defaults to `"</s>"`):
The end of sequence token.
<Tip>
When building a sequence using special tokens, this is not the token that is used for the end of sequence.
The token used is the `sep_token`.
</Tip>
unk_token (`str`, *optional*, defaults to `"<unk>"`):
The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
token instead.
mask_token (`str`, *optional*, defaults to `"<mask_2>"`):
The token used for masking single token values. This is the token used when training this model with masked
language modeling (MLM). This is the token that the PEGASUS encoder will try to predict during pretraining.
It corresponds to *[MASK2]* in [PEGASUS: Pre-training with Extracted Gap-sentences for Abstractive
Summarization](https://arxiv.org/pdf/1912.08777.pdf).
mask_token_sent (`str`, *optional*, defaults to `"<mask_1>"`):
The token used for masking whole target sentences. This is the token used when training this model with gap
sentences generation (GSG). This is the sentence that the PEGASUS decoder will try to predict during
pretraining. It corresponds to *[MASK1]* in [PEGASUS: Pre-training with Extracted Gap-sentences for
Abstractive Summarization](https://arxiv.org/pdf/1912.08777.pdf).
additional_special_tokens (`List[str]`, *optional*):
Additional special tokens used by the tokenizer. If no additional_special_tokens are provided <mask_2> and
<unk_2, ..., unk_102> are used as additional special tokens corresponding to the [original PEGASUS
tokenizer](https://github.com/google-research/pegasus/blob/939830367bcf411193d2b5eca2f2f90f3f9260ca/pegasus/ops/pretrain_parsing_ops.cc#L66)
that uses the tokens 2 - 104 only for pretraining
"""
vocab_files_names = VOCAB_FILES_NAMES
pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
slow_tokenizer_class = PegasusTokenizer
model_input_names = ["input_ids", "attention_mask"]
def __init__(
self,
vocab_file=None,
tokenizer_file=None,
pad_token="<pad>",
eos_token="</s>",
unk_token="<unk>",
mask_token="<mask_2>",
mask_token_sent="<mask_1>",
additional_special_tokens=None,
offset=103, # entries 2 - 104 are only used for pretraining
**kwargs
):
self.offset = offset
if additional_special_tokens is not None:
assert isinstance(
additional_special_tokens, list
), f"additional_special_tokens should be of type {type(list)}, but is {type(additional_special_tokens)}"
additional_special_tokens_extended = (
([mask_token_sent] + additional_special_tokens)
if mask_token_sent not in additional_special_tokens and mask_token_sent is not None
else additional_special_tokens
)
# fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken
additional_special_tokens_extended += [
f"<unk_{i}>" for i in range(len(additional_special_tokens_extended), self.offset - 1)
]
if len(set(additional_special_tokens_extended)) != len(additional_special_tokens_extended):
raise ValueError(
f"Please make sure that the provided additional_special_tokens do not contain an incorrectly shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}."
)
additional_special_tokens = additional_special_tokens_extended
else:
additional_special_tokens = [mask_token_sent] if mask_token_sent is not None else []
additional_special_tokens += [f"<unk_{i}>" for i in range(2, self.offset)]
super().__init__(
vocab_file,
tokenizer_file=tokenizer_file,
pad_token=pad_token,
eos_token=eos_token,
unk_token=unk_token,
mask_token=mask_token,
mask_token_sent=mask_token_sent,
offset=offset,
additional_special_tokens=additional_special_tokens,
**kwargs,
)
self.vocab_file = vocab_file
self.can_save_slow_tokenizer = False if not self.vocab_file else True
def _special_token_mask(self, seq):
all_special_ids = set(self.all_special_ids) # call it once instead of inside list comp
all_special_ids.remove(self.unk_token_id) # <unk> is only sometimes special
assert all_special_ids == set(
range(len(self.additional_special_tokens) + 3)
), f"There should be 3 special tokens: mask_token, pad_token, and eos_token + {len(self.additional_special_tokens)} additional_special_tokens, but got {all_special_ids}"
return [1 if x in all_special_ids else 0 for x in seq]
def get_special_tokens_mask(
self, token_ids_0: List, token_ids_1: Optional[List] = None, already_has_special_tokens: bool = False
) -> List[int]:
"""Get list where entries are [1] if a token is [eos] or [pad] else 0."""
if already_has_special_tokens:
return self._special_token_mask(token_ids_0)
elif token_ids_1 is None:
return self._special_token_mask(token_ids_0) + [1]
else:
return self._special_token_mask(token_ids_0 + token_ids_1) + [1]
def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None) -> List[int]:
"""
Build model inputs from a sequence by adding eos to the end. no bos token is added to the front.
- single sequence: `X </s>`
- pair of sequences: `A B </s>` (not intended use)
Args:
token_ids_0 (`List[int]`):
List of IDs to which the special tokens will be added
token_ids_1 (`List[int]`, *optional*):
Optional second list of IDs for sequence pairs.
Returns:
`List[int]`: list of [input IDs](../glossary#input-ids) with the appropriate special tokens.
"""
if token_ids_1 is None:
return token_ids_0 + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_0 + token_ids_1 + [self.eos_token_id]
def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer."
)
if not os.path.isdir(save_directory):
logger.error(f"Vocabulary path ({save_directory}) should be a directory")
return
out_vocab_file = os.path.join(
save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
)
if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file):
copyfile(self.vocab_file, out_vocab_file)
return (out_vocab_file,)
| [
"[email protected]"
] | |
3af099fce18a35cd4837291a2a99727140954c97 | 74f8d8c8030ce0c8cd3622cb99f0a668ba93a7e8 | /dialogue-engine/test/programytest/parser/template/node_tests/test_authorise.py | d421ef78f11762c08248c75b2c9b95023a672603 | [
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | Tommytosik/cotoba-agent-oss | 3124a376ac0ca1147a58405a8f269a0eb68bc014 | 78e5c2c244b92e71755783d972228904c4d0d373 | refs/heads/master | 2022-12-08T15:44:27.731731 | 2020-08-04T08:36:41 | 2020-08-04T08:36:41 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,002 | py | """
Copyright (c) 2020 COTOBA DESIGN, Inc.
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO
THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
import xml.etree.ElementTree as ET
from programy.parser.template.nodes.base import TemplateNode
from programy.parser.template.nodes.authorise import TemplateAuthoriseNode
from programy.parser.template.nodes.word import TemplateWordNode
from programytest.parser.base import ParserTestsBaseClass
class MockTemplateAuthoriseNode(TemplateAuthoriseNode):
def __init__(self):
TemplateAuthoriseNode.__init__(self)
def resolve_to_string(self, context):
raise Exception("This is a failure")
class TemplateAuthoriseNodeTests(ParserTestsBaseClass):
def test_node_init(self):
root = TemplateNode()
self.assertIsNotNone(root)
self.assertIsNotNone(root.children)
self.assertEqual(len(root.children), 0)
node = TemplateAuthoriseNode()
node.role = "root"
self.assertIsNotNone(node)
self.assertEqual("root", node.role)
root.append(node)
self.assertEqual(len(root.children), 1)
self.assertEqual("[AUTHORISE (role=root)]", node.to_string())
def test_node_init_optiona_srai(self):
root = TemplateNode()
self.assertIsNotNone(root)
self.assertIsNotNone(root.children)
self.assertEqual(len(root.children), 0)
node = TemplateAuthoriseNode()
node.role = "root"
self.assertIsNotNone(node)
self.assertEqual("root", node.role)
node.denied_srai = "ACCESS_DENIED"
self.assertIsNotNone(node)
self.assertEqual("ACCESS_DENIED", node.denied_srai)
root.append(node)
self.assertEqual(len(root.children), 1)
self.assertEqual("[AUTHORISE (role=root, denied_srai=ACCESS_DENIED)]", node.to_string())
def test_to_xml_service_no_content(self):
root = TemplateNode()
node = TemplateAuthoriseNode()
node.role = "root"
self.assertIsNotNone(node)
root.append(node)
self.assertEqual(len(root.children), 1)
xml = root.xml_tree(self._client_context)
self.assertIsNotNone(xml)
xml_str = ET.tostring(xml, "utf-8").decode("utf-8")
self.assertEqual('<template><authorise role="root" /></template>', xml_str)
def test_to_xml_service_with_content(self):
root = TemplateNode()
node = TemplateAuthoriseNode()
node.role = "root"
self.assertIsNotNone(node)
node.append(TemplateWordNode("Hello"))
root.append(node)
self.assertEqual(len(root.children), 1)
xml = root.xml_tree(self._client_context)
self.assertIsNotNone(xml)
xml_str = ET.tostring(xml, "utf-8").decode("utf-8")
self.assertEqual('<template><authorise role="root">Hello</authorise></template>', xml_str)
def test_to_xml_service_no_content_and_optional_srai(self):
root = TemplateNode()
node = TemplateAuthoriseNode()
node.role = "root"
node.denied_srai = "ACCESS_DENIED"
self.assertIsNotNone(node)
root.append(node)
self.assertEqual(len(root.children), 1)
xml = root.xml_tree(self._client_context)
self.assertIsNotNone(xml)
xml_str = ET.tostring(xml, "utf-8").decode("utf-8")
self.assertEqual('<template><authorise denied_srai="ACCESS_DENIED" role="root" /></template>', xml_str)
def test_node_exception_handling(self):
root = TemplateNode()
self.assertIsNotNone(root)
self.assertIsNotNone(root.children)
self.assertEqual(len(root.children), 0)
node = MockTemplateAuthoriseNode()
node.role = "root"
self.assertIsNotNone(node)
self.assertEqual("root", node.role)
root.append(node)
self.assertEqual(len(root.children), 1)
with self.assertRaises(Exception):
node.resolve_to_string(self._client_context)
with self.assertRaises(Exception):
root.resolve(self._client_context)
| [
"[email protected]"
] | |
d92d88a90e3d3629c5262cff0521dabd3e43ceb5 | fbe3a52d2dd02bec18f7f52b31e357aed192a308 | /misc/begin/recursion/misc_lhm.py | 261358dc125ac9c701cf6739853038d988549f0f | [] | no_license | lherrada/python | 8fc5bd5ceb6038479fa6347dd6c0bd6e17f92e98 | d8260f35ba91b89590ef8e489188fb80ca1aed4e | refs/heads/master | 2022-10-29T06:23:17.297554 | 2022-09-24T15:45:59 | 2022-09-24T15:45:59 | 23,411,904 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 104 | py | #!/usr/bin/python
#import misc1
#print misc1.fib2(30)
import fibo
print fibo.fib2(10)
fibo.printing()
| [
"[email protected]"
] | |
d6a017668b77161fc0092d339bbc5ee76cb9b2a8 | 29ed133feb870455ca619c9fa2ce9b7eb1dcc470 | /URIs/URI1930.py | 22e1944f07f5f32fae61f640c2b1136be3f4465e | [] | no_license | jrantunes/URIs-Python-3 | c5e676686a979b6bbfd10b8e7168a6d35fb8f6a2 | 4692f3fba4a1c9a0f51322a13e9e267d8b07ea3e | refs/heads/master | 2022-04-17T10:56:52.468275 | 2020-03-28T17:07:46 | 2020-03-28T17:07:46 | 250,395,664 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 174 | py | #Electrical Outlet
t1, t2, t3, t4 = input().split()
t1, t2, t3, t4 = int(t1) - 1, int(t2) - 1, int(t3) - 1, int(t4)
tomadas = [t1, t2, t3, t4]
print(sum(tomadas)) | [
"[email protected]"
] | |
24c9821ee09e36a22850395bcbc3a104f1b923c9 | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/429/usersdata/321/101584/submittedfiles/jogoDaVelha.py | 6a1f739064ae86cf4550efd44217ef6939a0aeb4 | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 468 | py | # -*- coding: utf-8 -*-
from jogoDaVelha_BIB import *
# COLOQUE SEU PROGRAMA A PARTIR DAQUI
print('Bem vindo ao JogoDaVelha do grupo 8 [Iara, Ingrid, Luiz Otávio, Tatiane]')
nome = str(input('Qual seu nome? '))
s = str(input('Qual símbolo você deseja utilizar no jogo? (X ou O) '))
while s != X or s != O:
print('Isira um símbolo válido')
s = str(input('Qual símbolo você deseja utilizar no jogo? '))
if s = X
print(sorteio(inicio))
print(tabuleiro) | [
"[email protected]"
] | |
1cb73c6568858279025a470e045c2fd95de4ee58 | 1d1f173d67a04b78f732aee99ef0e2d4e8284d63 | /dev/migrate_testing_phylesystem.py | 8df26db7e044c0beb132c03618620d7e68edd506 | [
"Python-2.0",
"BSD-2-Clause"
] | permissive | rvosa/peyotl | 8767165ec85129c8f25c56a572f0bd879158aa2a | 98154af9832d18cbcb079f7e2db3b0e45893e1da | refs/heads/master | 2021-01-18T19:48:31.273061 | 2015-09-03T15:30:13 | 2015-09-03T15:30:13 | 41,867,598 | 0 | 0 | null | 2015-09-03T15:29:00 | 2015-09-03T15:29:00 | null | UTF-8 | Python | false | false | 4,445 | py | #!/usr/bin/env python
from peyotl.phylografter.nexson_workaround import workaround_phylografter_export_diffs, \
add_default_prop
from peyotl.phylesystem.git_actions import get_filepath_for_namespaced_id
from peyotl import get_logger
from subprocess import call
import codecs
import json
import sys
import os
import re
_LOG = get_logger(__name__)
def debug(m):
_LOG.debug(m)
old_phylesystem = sys.argv[1]
old_phylesystem_study = os.path.abspath(os.path.join(old_phylesystem, 'study'))
new_phylesystem = sys.argv[2]
new_phylesystem_study = os.path.abspath(os.path.join(new_phylesystem, 'study'))
scratch_par = sys.argv[3]
assert(os.path.isdir(old_phylesystem_study))
assert(os.path.isdir(new_phylesystem_study))
assert(os.path.isdir(scratch_par))
script_name = os.path.abspath(sys.argv[0])
peyotl_dev_dir = os.path.split(script_name)[0]
peyotl_dir =os.path.split(peyotl_dev_dir)[0]
conversion_script = os.path.join(peyotl_dir, 'scripts', 'nexson', 'nexson_nexml.py')
assert(os.path.isfile(conversion_script))
validation_script = os.path.join(peyotl_dir, 'scripts', 'nexson', 'validate_ot_nexson.py')
assert(os.path.isfile(conversion_script))
failed = []
pg_study_pat = re.compile(r'^\d+')
if len(sys.argv) > 4:
sl = sys.argv[4:]
else:
sl = os.listdir(old_phylesystem_study)
for f in sl:
if pg_study_pat.match(f):
source_study = f
dest_full = get_filepath_for_namespaced_id(new_phylesystem, f)
scratch_dir = os.path.join(scratch_par, f)
if not os.path.exists(scratch_dir):
os.makedirs(scratch_dir)
full_source = os.path.join(old_phylesystem_study, source_study, source_study + '.json')
dest_dir = os.path.split(dest_full)[0]
assert(os.path.exists(full_source))
if os.path.exists(dest_full):
debug('Skipping {} because output exists'.format(f))
continue
# read input and do the phylografter_workaround to valid 0.0.0 syntax
# store in scratch.
valid_bf = os.path.join(scratch_dir, 'v0.0.0-' + source_study + '.json')
debug('Raw phylografter from "{}" to valid 0.0.0 NexSON at "{}" ...'.format(full_source, valid_bf))
inp = codecs.open(full_source, mode='rU', encoding='utf-8')
obj = json.load(inp)
try:
workaround_phylografter_export_diffs(obj, valid_bf)
except:
_LOG.exception('Exception in workaround_phylografter_export_diffs for study ' + f)
failed.append(f)
continue
# Convert to 1.2.1
unchecked_hbf = os.path.join(scratch_dir, 'v1.2.1-' + source_study + '.json')
debug('Converting cleaned 0.0.0 NexSON from "{}" to unchecked 1.2.1 NexSON at "{}" ...'.format(valid_bf, unchecked_hbf))
invoc = [sys.executable,
conversion_script,
'-s',
'-e',
'1.2.1',
'-o',
unchecked_hbf,
valid_bf]
debug('invoc: "{}"'.format('" "'.join(invoc)))
rc = call(invoc)
if rc != 0:
failed.append(f)
else:
inp = codecs.open(unchecked_hbf, mode='rU', encoding='utf-8')
obj = json.load(inp)
aug_hbf = os.path.join(scratch_dir, 'augmentedv1.2.1-' + source_study + '.json')
add_default_prop(obj, aug_hbf)
# validate
annotation = os.path.join(scratch_dir, 'validation.json')
tmp = os.path.join(scratch_dir, 'final.json')
debug('Writing annotated version of "{}" to "{}" with annotations to "{}" ...'.format(
aug_hbf,
tmp,
annotation))
invoc = [sys.executable,
validation_script,
'--embed',
'--agent-only',
'-e',
annotation,
'-o',
tmp,
aug_hbf]
debug('invoc: "{}"'.format('" "'.join(invoc)))
rc = call(invoc)
if rc != 0:
failed.append(f)
else:
if not os.path.isdir(dest_dir):
os.makedirs(dest_dir)
os.rename(tmp, dest_full)
if failed:
m = '\n '.join(failed)
sys.exit('Conversion of the following studies failed:\n {}'.format(m)) | [
"[email protected]"
] | |
b6fc79b993cd002a05a15ed4d474c68787c15613 | 1b9075ffea7d4b846d42981b41be44238c371202 | /2009/devel/programming/library/caps/actions.py | 518c5e326c2b2857a117363e060e5b3fc85eebcc | [] | no_license | pars-linux/contrib | bf630d4be77f4e484b8c6c8b0698a5b34b3371f4 | 908210110796ef9461a1f9b080b6171fa022e56a | refs/heads/master | 2020-05-26T20:35:58.697670 | 2011-07-11T11:16:38 | 2011-07-11T11:16:38 | 82,484,996 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 644 | py | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Licensed under the GNU General Public License, version 2.
# See the file http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt
from pisi.actionsapi import pisitools
WorkDir = "ipfdevlib_linux"
def install():
pisitools.dobin("examples/i686/ipfinfo")
pisitools.insinto("/usr/include/caps", "include/caps/capsimage.h")
pisitools.dolib_so("lib/i686/libcapsimage.so.2.0")
pisitools.dosym("/usr/lib/libcapsimage.so.2.0", "/usr/lib/libcapsimage.so.2")
pisitools.dosym("/usr/lib/libcapsimage.so.2.0", "/usr/lib/libcapsimage.so")
pisitools.dodoc("HISTORY", "LICENSE", "README")
| [
"[email protected]"
] | |
caa1324452f60f4345f56ee55fdce418254174e2 | 32606415426b55b12f2c991a56a238a08a12baca | /ostrich/steps.py | 33875d59810c56f5b30a64136695a291a67a061e | [
"Apache-2.0"
] | permissive | shakenfist/ostrich | 773c6bc15b1a0b571fb80906d4e865238f349cf9 | f1a557c2dc19adf70d32152fbd07bc34676ee345 | refs/heads/master | 2021-07-21T23:26:28.448778 | 2017-07-18T01:24:36 | 2017-07-18T01:24:36 | 83,365,646 | 0 | 0 | Apache-2.0 | 2021-08-03T04:55:48 | 2017-02-27T22:57:05 | Python | UTF-8 | Python | false | false | 14,298 | py | #!/usr/bin/env python
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import fcntl
import json
import os
import psutil
import re
import select
import shutil
import subprocess
import sys
import time
import yaml
import emitters
import utils
def _handle_path_in_cwd(path, cwd):
if not cwd:
return path
if path.startswith('/'):
return path
return os.path.join(cwd, path)
class Step(object):
def __init__(self, name, **kwargs):
self.name = name
self.kwargs = kwargs
self.depends = kwargs.get('depends', None)
self.attempts = 0
self.max_attempts = kwargs.get('max_attempts', 5)
self.failing_step_delay = kwargs.get('failing_step_delay', 30)
self.on_failure = kwargs.get('on_failure')
def __str__(self):
return 'step %s, depends on %s' % (self.name, self.depends)
def run(self, emit, screen):
if self.attempts > 0:
emit.emit('... not our first attempt, sleeping for %s seconds'
% self.failing_step_delay)
time.sleep(self.failing_step_delay)
self.attempts += 1
if self.attempts > self.max_attempts:
emit.emit('... repeatedly failed step, giving up')
sys.exit(1)
emit.emit('Running %s' % self)
emit.emit(' with kwargs: %s' % self.kwargs)
emit.emit('\n')
return self._run(emit, screen)
class KwargsStep(Step):
def __init__(self, name, r, kwarg_updates, **kwargs):
super(KwargsStep, self).__init__(name, **kwargs)
self.r = r
self.kwarg_updates = kwarg_updates
def run(self, emit, screen):
utils.recursive_dictionary_update(self.r.kwargs, self.kwarg_updates)
emit.emit(json.dumps(self.r.kwargs, indent=4, sort_keys=True))
return True
class SimpleCommandStep(Step):
def __init__(self, name, command, **kwargs):
super(SimpleCommandStep, self).__init__(name, **kwargs)
self.command = command
self.cwd = kwargs.get('cwd')
self.trace_processes = kwargs.get('trace_processes', False)
self.env = os.environ
self.env.update(kwargs.get('env'))
self.acceptable_exit_codes = kwargs.get(
'acceptable_exit_codes', [0])
def _output_analysis(self, d):
pass
def _run(self, emit, screen):
emit.emit('# %s\n' % self.command)
obj = subprocess.Popen(self.command,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
shell=True,
cwd=self.cwd,
env=self.env)
proc = psutil.Process(obj.pid)
procs = {}
flags = fcntl.fcntl(obj.stdout, fcntl.F_GETFL)
fcntl.fcntl(obj.stdout, fcntl.F_SETFL, flags | os.O_NONBLOCK)
flags = fcntl.fcntl(obj.stderr, fcntl.F_GETFL)
fcntl.fcntl(obj.stderr, fcntl.F_SETFL, flags | os.O_NONBLOCK)
obj.stdin.close()
while obj.poll() is None:
readable, _, _ = select.select([obj.stderr, obj.stdout], [], [], 1)
for f in readable:
d = os.read(f.fileno(), 10000)
self._output_analysis(d)
emit.emit(d)
seen = []
for child in proc.children(recursive=True):
try:
seen.append(child.pid)
if child.pid not in procs:
procs[child.pid] = ' '.join(child.cmdline())
if self.trace_processes:
emit.emit('*** process started *** %d -> %s'
% (child.pid, procs[child.pid]))
except psutil.NoSuchProcess:
pass
ended = []
for pid in procs:
if pid not in seen:
if self.trace_processes:
emit.emit('*** process ended *** %d -> %s'
% (pid, procs.get(child.pid, '???')))
ended.append(pid)
for pid in ended:
del procs[pid]
emit.emit('... process complete')
returncode = obj.returncode
emit.emit('... exit code %d' % returncode)
return returncode in self.acceptable_exit_codes
EXECUTION_RE = re.compile('^\[Executing "(.*)" playbook\]$')
RUN_TIME_RE = re.compile('^Run Time = ([0-9]+) seconds$')
class AnsibleTimingSimpleCommandStep(SimpleCommandStep):
def __init__(self, name, command, timings_path, **kwargs):
super(AnsibleTimingSimpleCommandStep, self).__init__(
name, command, **kwargs)
self.playbook = None
self.timings = []
self.timings_path = timings_path
if os.path.exists(self.timings_path):
with open(self.timings_path, 'r') as f:
self.timings = json.loads(f.read())
def _output_analysis(self, d):
for line in d.split('\n'):
m = EXECUTION_RE.match(line)
if m:
self.playbook = m.group(1)
m = RUN_TIME_RE.match(line)
if m and self.playbook:
self.timings.append((self.playbook, m.group(1)))
def _run(self, emit, screen):
res = super(AnsibleTimingSimpleCommandStep, self)._run(emit, screen)
with open(self.timings_path, 'w') as f:
f.write(json.dumps(self.timings, indent=4))
return res
class PatchStep(SimpleCommandStep):
def __init__(self, name, **kwargs):
self.local_kwargs = copy.copy(kwargs)
self.local_kwargs['cwd'] = __file__.replace('/ostrich/steps.py', '')
self.local_kwargs['acceptable_exit_codes'] = [0, 1]
self.archive_path = os.path.expanduser('~/.ostrich')
self.files = []
with open(os.path.join(self.local_kwargs['cwd'],
'patches/%s' % name)) as f:
for line in f.readlines():
if line.startswith('--- '):
self.files.append(line.split()[1])
super(PatchStep, self).__init__(
name,
'patch -d / -p 1 --verbose < patches/%s' % name,
**self.local_kwargs)
def _archive_files(self, stage):
for f in self.files:
arc_path = os.path.join(self.archive_path,
'%s-%s-%s'
% (self.name, f.replace('/', '_'), stage))
if not os.path.exists(arc_path):
shutil.copyfile(f, arc_path)
def _run(self, emit, screen):
self._archive_files('before')
res = super(PatchStep, self)._run(emit, screen)
self._archive_files('after')
return res
class QuestionStep(Step):
def __init__(self, name, title, helpful, prompt, **kwargs):
super(QuestionStep, self).__init__(name, **kwargs)
self.title = title
self.help = helpful
self.prompt = prompt
def _run(self, emit, screen):
emit.emit('%s' % self.title)
emit.emit('%s\n' % ('=' * len(self.title)))
emit.emit('%s\n' % self.help)
return emit.getstr('>> ')
class RegexpEditorStep(Step):
def __init__(self, name, path, search, replace, **kwargs):
super(RegexpEditorStep, self).__init__(name, **kwargs)
self.path = _handle_path_in_cwd(path, kwargs.get('cwd'))
self.search = search
self.replace = replace
def _run(self, emit, screen):
output = []
changes = 0
emit.emit('--- %s' % self.path)
emit.emit('+++ %s' % self.path)
with open(self.path, 'r') as f:
for line in f.readlines():
line = line.rstrip()
newline = re.sub(self.search, self.replace, line)
output.append(newline)
if newline != line:
emit.emit('- %s' % line)
emit.emit('+ %s' % newline)
changes += 1
else:
emit.emit(' %s' % line)
with open(self.path, 'w') as f:
f.write('\n'.join(output))
return 'Changed %d lines' % changes
class BulkRegexpEditorStep(Step):
def __init__(self, name, path, file_filter, replacements, **kwargs):
super(BulkRegexpEditorStep, self).__init__(name, **kwargs)
self.path = _handle_path_in_cwd(path, kwargs.get('cwd'))
self.file_filter = re.compile(file_filter)
self.replacements = replacements
def _run(self, emit, screen):
silent_emitter = emitters.NoopEmitter('noop', None)
changes = 0
for root, _, files in os.walk(self.path):
for filename in files:
m = self.file_filter.match(filename)
if not m:
continue
path = os.path.join(root, filename)
for (search, replace) in self.replacements:
s = RegexpEditorStep('bulk-edit', path, search, replace)
result = s.run(silent_emitter, None)
emit.emit('%s -> %s' % (path, result))
if result != 'Changed 0 lines':
changes += 1
return changes
class FileAppendStep(Step):
def __init__(self, name, path, text, **kwargs):
super(FileAppendStep, self).__init__(name, **kwargs)
self.path = _handle_path_in_cwd(path, kwargs.get('cwd'))
self.text = text
def _run(self, emit, screen):
if not os.path.exists(self.path):
emit.emit('%s does not exist' % self.path)
return False
with open(self.path, 'a+') as f:
f.write(self.text)
return True
class FileCreateStep(Step):
def __init__(self, name, path, text, **kwargs):
super(FileCreateStep, self).__init__(name, **kwargs)
self.path = _handle_path_in_cwd(path, kwargs.get('cwd'))
self.text = text
def _run(self, emit, screen):
if os.path.exists(self.path):
emit.emit('%s exists' % self.path)
return False
with open(self.path, 'w') as f:
f.write(self.text)
return True
class CopyFileStep(Step):
def __init__(self, name, from_path, to_path, **kwargs):
super(CopyFileStep, self).__init__(name, **kwargs)
self.from_path = _handle_path_in_cwd(from_path, kwargs.get('cwd'))
self.to_path = _handle_path_in_cwd(to_path, kwargs.get('cwd'))
def _run(self, emit, screen):
shutil.copyfile(self.from_path, self.to_path)
return True
class YamlAddElementStep(Step):
def __init__(self, name, path, target_element_path, data, **kwargs):
super(YamlAddElementStep, self).__init__(name, **kwargs)
self.path = _handle_path_in_cwd(path, kwargs.get('cwd'))
self.target_element_path = target_element_path
self.data = data
def _run(self, emit, screen):
with open(self.path) as f:
y = yaml.load(f.read())
sub = y
for key in self.target_element_path:
print key
sub = sub[key]
sub.append(self.data)
emit.emit('YAML after changes:')
emit.emit(yaml.dump(y))
with open(self.path, 'w') as f:
f.write(yaml.dump(y, default_flow_style=False))
return True
class YamlUpdateElementStep(Step):
def __init__(self, name, path, target_element_path, target_key, data,
**kwargs):
super(YamlUpdateElementStep, self).__init__(name, **kwargs)
self.path = _handle_path_in_cwd(path, kwargs.get('cwd'))
self.target_element_path = target_element_path
self.target_key = target_key
self.data = data
def _run(self, emit, screen):
with open(self.path) as f:
y = yaml.load(f.read())
sub = y
for key in self.target_element_path:
sub = sub[key]
sub[self.target_key] = self.data
emit.emit('YAML after changes:')
emit.emit(yaml.dump(y))
with open(self.path, 'w') as f:
f.write(yaml.dump(y, default_flow_style=False))
return True
class YamlDeleteElementStep(Step):
def __init__(self, name, path, target_element_path, index, **kwargs):
super(YamlDeleteElementStep, self).__init__(name, **kwargs)
self.path = _handle_path_in_cwd(path, kwargs.get('cwd'))
self.target_element_path = target_element_path
self.index = index
def _run(self, emit, screen):
with open(self.path) as f:
y = yaml.load(f.read())
sub = y
for key in self.target_element_path:
sub = sub[key]
del sub[self.index]
emit.emit('YAML after changes:')
emit.emit(yaml.dump(y))
with open(self.path, 'w') as f:
f.write(yaml.dump(y, default_flow_style=False))
return True
class YamlUpdateDictionaryStep(Step):
def __init__(self, name, path, target_element_path, data, **kwargs):
super(YamlUpdateDictionaryStep, self).__init__(name, **kwargs)
self.path = _handle_path_in_cwd(path, kwargs.get('cwd'))
self.target_element_path = target_element_path
self.data = data
def _run(self, emit, screen):
with open(self.path) as f:
y = yaml.load(f.read())
sub = y
for key in self.target_element_path:
sub = sub[key]
sub.update(self.data)
emit.emit('YAML after changes:')
emit.emit(yaml.dump(y))
with open(self.path, 'w') as f:
f.write(yaml.dump(y, default_flow_style=False))
return True
| [
"[email protected]"
] | |
c0a560dc7b728ab8f5e2bb57b87cb1e63a75ab05 | 30a2f77f5427a3fe89e8d7980a4b67fe7526de2c | /python/HERWIGPP_POWHEG_GluonFusion_H1200_bbbar_8TeV_cff.py | 69a444d3216d8ed1263402b24a90ec1ffe8bbca7 | [] | no_license | DryRun/QCDAnalysis | 7fb145ce05e1a7862ee2185220112a00cb8feb72 | adf97713956d7a017189901e858e5c2b4b8339b6 | refs/heads/master | 2020-04-06T04:23:44.112686 | 2018-01-08T19:47:01 | 2018-01-08T19:47:01 | 55,909,998 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,112 | py | import FWCore.ParameterSet.Config as cms
from Configuration.Generator.HerwigppDefaults_cfi import *
generator = cms.EDFilter(
"ThePEGGeneratorFilter",
herwigDefaultsBlock,
configFiles = cms.vstring(),
parameterSets = cms.vstring(
'cm8TeV',
'powhegNewDefaults',
'GluonFusionHbbParameters',
'basicSetup',
'setParticlesStableForDetector',
),
powhegNewDefaults = cms.vstring(
'# Need to use an NLO PDF',
'# and strong coupling',
'cp /Herwig/Partons/MRST-NLO /Herwig/Partons/cmsPDFSet',
'create Herwig::O2AlphaS O2AlphaS',
'set /Herwig/Generators/LHCGenerator:StandardModelParameters:QCD/RunningAlphaS O2AlphaS',
'# Setup the POWHEG shower',
'cd /Herwig/Shower',
'set Evolver:HardEmissionMode POWHEG',
'# higgs + W (N.B. if considering all W decay modes useful to set )',
'# (jet pT cut to zero so no cut on W decay products )',
'# insert SimpleQCD:MatrixElements[0] PowhegMEPP2WH',
'# set /Herwig/Cuts/JetKtCut:MinKT 0.0*GeV',
'# higgs + Z (N.B. if considering all Z decay modes useful to set )',
'# (jet pT cut to zero so no cut on Z decay products )',
'# insert SimpleQCD:MatrixElements[0] PowhegMEPP2ZH',
'# set /Herwig/Cuts/JetKtCut:MinKT 0.0*GeV',
'# gg/qqbar -> Higgs',
'# insert SimpleQCD:MatrixElements[0] PowhegMEHiggs',
'# Weak boson pair production: WW / ZZ / WZ / W+Z [WpZ] / W-Z [WmZ]',
'# insert SimpleQCD:MatrixElements[0] PowhegMEPP2VV',
'# set PowhegMEPP2VV:Process WpZ'
),
pdfCTEQ6M = cms.vstring(
'mkdir /LHAPDF',
'cd /LHAPDF',
'create ThePEG::LHAPDF CTEQ6M',
'set CTEQ6M:PDFName cteq6mE.LHgrid',
'set CTEQ6M:RemnantHandler /Herwig/Partons/HadronRemnants',
'cp CTEQ6M /cmsPDFSet',
'cd /'
),
GluonFusionHbbParameters = cms.vstring(
'cd /Herwig/MatrixElements/',
'insert SimpleQCD:MatrixElements[0] PowhegMEHiggs',
'set /Herwig/Particles/h0:NominalMass 1200.*GeV',
'set /Herwig/Particles/h0/h0->b,bbar;:OnOff On',
#'set /Herwig/Particles/h0/h0->b,bbar;:BranchingRatio 0.7195',
#'set /Herwig/Particles/h0/h0->b,bbar;:BranchingRatio 1.0000',
'set /Herwig/Particles/h0/h0->W+,W-;:OnOff Off',
'set /Herwig/Particles/h0/h0->tau-,tau+;:OnOff Off',
'set /Herwig/Particles/h0/h0->g,g;:OnOff Off',
'set /Herwig/Particles/h0/h0->c,cbar;:OnOff Off',
'set /Herwig/Particles/h0/h0->Z0,Z0;:OnOff Off',
'set /Herwig/Particles/h0/h0->gamma,gamma;:OnOff Off',
'set /Herwig/Particles/h0/h0->mu-,mu+;:OnOff Off',
'set /Herwig/Particles/h0/h0->t,tbar;:OnOff Off'
),
#crossSection = cms.untracked.double(0.1665),
#filterEfficiency = cms.untracked.double(1.0)
)
configurationMetadata = cms.untracked.PSet(
version = cms.untracked.string('\$Revision: 1.3 $'),
name = cms.untracked.string('\$Source: /local/reps/CMSSW/CMSSW/Configuration/GenProduction/python/EightTeV/HERWIGPP_POWHEG_H125_bbbar_W_lnu_8TeV_cff.py,v $'),
annotation = cms.untracked.string('HERWIGPP/POWHEG: (H->bb)(W->lnu), m(H)=125 GeV, l=e or mu or tau')
)
| [
"[email protected]"
] | |
b614ca7ed169de8fd6bc9ceab1f35f66a6ecbd4e | 3bb70650b4b83e4653dcc18c8233c106c7a5611a | /receipt/models.py | a0249ce6f92b8a79eb8446196a76c3d5c9ab0a32 | [] | no_license | khanhlu2013/pos_connect_code | 48e736a6b1c5ca6a5c4ff39d842d8a93f66e67ef | fdf70de858c10b175832af31ecc0cf770d028396 | refs/heads/master | 2023-04-08T02:35:46.181265 | 2016-10-18T21:12:51 | 2016-10-18T21:12:51 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,959 | py | from django.db import models
from store.models import Store
from store_product.models import Store_product
import decimal
from payment_type.models import Payment_type
class Receipt(models.Model):
date = models.DateTimeField()
tax_rate = models.DecimalField(max_digits=6, decimal_places=4)
store = models.ForeignKey(Store)
_receipt_doc_id = models.CharField(max_length=40,unique=True)#this field is the receipt doc id from couch.as an optimization to save sale data to master. we bulk create models.Receipt and need this link to document.Receipt to bulk insert models.Receipt_ln
def __unicode__(self):
return str(self.id)
class Tender_ln(models.Model):
receipt = models.ForeignKey(Receipt,related_name="tender_ln_lst")
payment_type = models.ForeignKey(Payment_type,blank=True,null=True)
amount = models.DecimalField(max_digits=6, decimal_places=2)
name = models.CharField(blank=True,null=True,max_length=100)
class Receipt_ln(models.Model):
receipt = models.ForeignKey(Receipt,related_name="receipt_ln_lst")
qty = models.IntegerField()
discount = models.DecimalField(max_digits=6, decimal_places=2,blank=True,null=True)
override_price = models.DecimalField(max_digits=6, decimal_places=3,blank=True,null=True)
date = models.DateTimeField()
store_product = models.ForeignKey(Store_product,blank=True,null=True)
sp_stamp_name = models.CharField(max_length=100,blank=True,null=True)
sp_stamp_price = models.DecimalField(max_digits=6, decimal_places=2,blank=True,null=True)
sp_stamp_value_customer_price = models.DecimalField(max_digits=6, decimal_places=2,blank=True,null=True)
sp_stamp_crv = models.DecimalField(max_digits=6, decimal_places=2,blank=True,null=True)
sp_stamp_is_taxable = models.NullBooleanField(blank=True,null=True)
sp_stamp_is_sale_report = models.NullBooleanField(blank=True,null=True)
sp_stamp_p_type = models.CharField(max_length=100,blank=True,null=True)
sp_stamp_p_tag = models.CharField(max_length=100,blank=True,null=True)
sp_stamp_cost = models.DecimalField(max_digits=6, decimal_places=2,blank=True,null=True)
sp_stamp_vendor = models.CharField(max_length=100,blank=True,null=True)
sp_stamp_buydown = models.DecimalField(max_digits=6, decimal_places=2,blank=True,null=True)
mm_deal_discount = models.DecimalField(max_digits=6, decimal_places=2,blank=True,null=True)
mm_deal_name = models.CharField(max_length=100,blank=True,null=True)
non_inventory_name = models.CharField(max_length=100,blank=True,null=True)
non_inventory_price = models.DecimalField(max_digits=6, decimal_places=2,blank=True,null=True)
non_inventory_crv = models.DecimalField(max_digits=6, decimal_places=2,blank=True,null=True)
non_inventory_is_taxable = models.NullBooleanField(blank=True,null=True)
non_inventory_cost = models.DecimalField(max_digits=6, decimal_places=2,blank=True,null=True) | [
"[email protected]"
] | |
0ce8d4ae15eba8e000cbe459928f96dd85b9f175 | 9e5eca27222871dd04e42c9106bb2fba07e598ff | /src/osxification/foundation/ns_string.py | 4305958fd9999299dcc1df4b9c9d9d5641838191 | [] | no_license | jepebe/osxification | b2a68dec07cd0be3b7ebd519bd99d0bbd51e61c7 | c9a539f4dbeda9200e32a2eea2c955dd94e6f45e | refs/heads/master | 2016-09-03T06:35:41.659315 | 2015-05-19T18:00:23 | 2015-05-19T18:00:23 | 35,567,134 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,614 | py | from osxification.foundation import NSStringEncoding, NSObject
class NSString(NSObject):
def __init__(self, content, encoding=None):
if isinstance(content, str):
encoding = NSStringEncoding.NSUTF8StringEncoding
# elif isinstance(content, unicode):
# encoding = NSStringEncoding.NSUnicodeStringEncoding
else:
raise UserWarning("[%s] Error: 'content' should be a string, received: %s" % (self.__class__.__name__, type(content)))
identifier = NSString._init(self.alloc(), content, encoding)
super(NSString, self).__init__(identifier)
def __str__(self):
return self._asCString(NSStringEncoding.NSUTF8StringEncoding)
# def __unicode__(self):
# return self._asCString(NSStringEncoding.NSUnicodeStringEncoding)
def __int__(self):
return self._intValue()
def __float__(self):
return self._floatValue()
def __eq__(self, other):
return self._isEqualToString(other)
@classmethod
def from_param(cls, instance):
if isinstance(instance, str):
instance = NSString(instance)
return NSObject.from_param(instance)
NSString._init = NSString.bindMethodToClass("initWithCString:encoding:")
NSString._asCString = NSString.bindMethodToClass("cStringUsingEncoding:", returns=str)
NSString._intValue = NSString.bindMethodToClass("integerValue", returns=int)
NSString._floatValue = NSString.bindMethodToClass("doubleValue", returns=float)
NSString._isEqualToString = NSString.bindMethodToClass("isEqualToString:", parameters=[NSString], returns=bool) | [
"[email protected]"
] | |
bb4e08299b87e0d44389027cb157b9ba193b8b62 | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/63/usersdata/250/32382/submittedfiles/swamee.py | 65a4d92fd968b2221e8050cbd8814d6ae8e3c0f0 | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 461 | py | # -*- coding: utf-8 -*-
import math
#COMECE SEU CÓDIGO AQUI
f=float(input('digite f:'))
l=float(input('digite l:'))
q=float(input('digite q:'))
deltah=float(input('digite o delta:'))
v=float(input('digite v:'))
g=9.81
e=0.000002
D=((((8*f*l*q*q)/((math.pi**2)*(g*deltah))))**1/5)
print('o valor de D é:%.4f'%D)
rey=((4*q)/(math.pi*D*v))
print('o valor de rey é:%.4f'%rey)
k=0.25/(math.log10((e/(3.7*D))+(5.74/(rey**0.9))))
print('o valor de k é:%.4f'%k)
| [
"[email protected]"
] | |
a4a1a15fcd715bdc69965843f94c3b2f571c20b3 | 30227ff573bcec32644fca1cca42ef4cdd612c3e | /leetcode/linkedList/singly_linked_list/remove_node_a1.py | 8ee8af6cdfb55f3965cc1e1c627c57e7e5e85560 | [] | no_license | saurabh-pandey/AlgoAndDS | bc55864422c93e6c93b8432e483394f286ce8ef2 | dad11dedea9ceb4904d6c2dea801ce0172abfc81 | refs/heads/master | 2023-07-01T09:12:57.951949 | 2023-06-15T12:16:36 | 2023-06-15T12:16:36 | 88,239,921 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,079 | py | #URL: https://leetcode.com/explore/learn/card/linked-list/219/classic-problems/1207/
# Description
"""
Given the head of a linked list and an integer val, remove all the nodes of the linked list that
has Node.val == val, and return the new head.
Example 1:
Input: head = [1,2,6,3,4,5,6], val = 6
Output: [1,2,3,4,5]
Example 2:
Input: head = [], val = 1
Output: []
Example 3:
Input: head = [7,7,7,7], val = 7
Output: []
Constraints:
The number of nodes in the list is in the range [0, 104].
1 <= Node.val <= 50
0 <= k <= 50
"""
def removeElements(head, val):
if head is None:
return
newHead = head
prevNode = head
currNode = head
while currNode is not None:
if currNode._val == val:
if currNode is newHead:
newHead = currNode._next
else:
prevNode._next = currNode._next
else:
# Only move previous node if current node is not the one to be deleted. Previous node should
# always point to something that is going to be part of the list
prevNode = currNode
currNode = currNode._next
return newHead | [
"[email protected]"
] | |
cd8aecca91fd152be1487734efe54d582598aa3d | ab47546a5fbf086193130b66a0ac8d849aa75d23 | /venv/bin/pip3.7 | aa3156ab60ab683ac043913e5b5ec19d31981c22 | [] | no_license | icerahi/ecommerce-webapp | 1f7b7a29d78d73ab03baa6aeeb69a4c95e042090 | eb44e9c1f5b8444a72b3aaf5fcd96f30aa4757c9 | refs/heads/master | 2020-05-01T06:30:05.477773 | 2019-03-24T08:11:38 | 2019-03-24T08:11:38 | 177,331,593 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 408 | 7 | #!/home/rahi/PycharmProjects/E-commerce/venv/bin/python
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==10.0.1','console_scripts','pip3.7'
__requires__ = 'pip==10.0.1'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==10.0.1', 'console_scripts', 'pip3.7')()
)
| [
"[email protected]"
] | |
ddf2a1f0cc0195a7ea1195d4200196e3b871e4be | 7a20dac7b15879b9453150b1a1026e8760bcd817 | /Curso/ModuloTkinter/Aula001HelloWorld.py | 32a092bff66a7ed68b5bc07212de60aeb6f607e8 | [
"MIT"
] | permissive | DavidBitner/Aprendizado-Python | 7afbe94c48c210ddf1ab6ae21109a8475e11bdbc | e1dcf18f9473c697fc2302f34a2d3e025ca6c969 | refs/heads/master | 2023-01-02T13:24:38.987257 | 2020-10-26T19:31:22 | 2020-10-26T19:31:22 | 283,448,224 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 547 | py | from tkinter import *
# Sempre começar um programa com o root e a classe Tk
root = Tk()
# Criação do primeiro widget, nesse caso uma label presa a root com o texto "Hello World!"
myLabel = Label(root, text="Hello World!")
# Posicionando a label criada
myLabel.pack()
# Todo programa roda através de um loop, e com o atributo mainloop nós definimos que o programa deixa de rodar a partir desta linha de código. Ou seja, as linhas de código que vierem depois do mainloop serão executadas apenas após o fechamento do gui
root.mainloop()
| [
"[email protected]"
] | |
a90cd4d5bf5d588410d769c97cfa33f4a39619c4 | d0eb9e95c796042067aceaf0fc3d43f56d4eb87b | /Tests/PootyTests.py | a75560eb9533c2d64a49521fcaed266ae119f381 | [] | no_license | finneyjm/RynLib | ea0fd0f8ccd21fdac4663d5fb2b6836efce49a10 | 42e7d07ff879f72ae163f682cb07ba7489ce0a06 | refs/heads/master | 2021-02-16T15:30:02.181769 | 2020-03-05T19:20:22 | 2020-03-05T19:20:22 | 245,020,012 | 0 | 0 | null | 2020-03-04T22:47:09 | 2020-03-04T22:47:08 | null | UTF-8 | Python | false | false | 1,094 | py | from Peeves.TestUtils import *
from unittest import TestCase
from PootyAndTheBlowfish.Templator import *
from PootyAndTheBlowfish.PotentialTemplator import PotentialTemplate
import sys
class PootyTests(TestCase):
@inactiveTest
def test_ApplyBaseTemplate(self):
import os
curdir = os.getcwd()
template = os.path.join(curdir, "RynLib", "PootyAndTheBlowfish", "Templates", "PotentialTemplate")
writer = TemplateWriter(template, LibName = "ploot")
out = os.path.expanduser("~/Desktop")
writer.iterate_write(out)
worked = os.path.exists(os.path.join(out, "plootPot", "src", "CMakeLists.txt"))
self.assertTrue(worked)
@inactiveTest
def test_SimplePotential(self):
import os
writer = PotentialTemplate(
lib_name = "DumbPot",
function_name = "DumbPot",
linked_libs = [ "DumbPot" ],
potential_source = TestManager.test_data("DumbPot"),
requires_make = True
)
out = os.path.expanduser("~/Desktop")
writer.apply(out)
| [
"[email protected]"
] | |
20dafa7d284b657578456c3c93e1fdde8a345ed6 | 5817da2441abfe5974cd31f323caaa00958e26f5 | /dekanat/migrations/0002_news_newsimage.py | aa0501d98902d0f66c90062049b5e6076b935ab5 | [] | no_license | katalyzator/Dekanat | 3923f05eefb120fb366db2658e418ea9d87272f3 | af32167bc78ca6ed52ad5910f6bfc8dfa3f91469 | refs/heads/master | 2021-01-12T15:05:09.618166 | 2016-10-23T08:51:11 | 2016-10-23T08:51:11 | 71,691,607 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,473 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.9.6 on 2016-10-18 02:24
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('dekanat', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='News',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=255, verbose_name='\u0417\u0430\u0433\u043e\u043b\u043e\u0432\u043e\u043a \u043f\u043e\u0441\u0442\u0430')),
('description', models.CharField(max_length=1000, verbose_name='\u041e\u043f\u0438\u0441\u0430\u043d\u0438\u0435 \u043f\u043e\u0441\u0442\u0430')),
('text', models.TextField(verbose_name='\u0422\u0435\u043a\u0441\u0442 \u043f\u043e\u0441\u0442\u0430')),
('timestamp', models.DateTimeField(auto_now_add=True)),
('updated', models.DateTimeField(auto_now=True)),
],
options={
'verbose_name': '\u0414\u043e\u0431\u0430\u0432\u043b\u0435\u043d\u0438\u0435 \u043d\u043e\u0432\u043e\u0441\u0442\u0435\u0439',
'verbose_name_plural': '\u0414\u043e\u0431\u0430\u0432\u043b\u0435\u043d\u0438\u0435 \u043d\u043e\u0432\u043e\u0441\u0442\u0435\u0439',
},
),
migrations.CreateModel(
name='NewsImage',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('image', models.ImageField(upload_to='', verbose_name='\u043a\u0430\u0440\u0442\u0438\u043d\u043a\u0430')),
('timestamp', models.DateTimeField(auto_now_add=True)),
('updated', models.DateTimeField(auto_now=True)),
('news', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='dekanat.News', verbose_name='\u0432\u044b\u0431\u0435\u0440\u0438\u0442\u0435 \u043d\u043e\u0432\u043e\u0441\u0442\u044c')),
],
options={
'verbose_name': '\u041a\u0430\u0440\u0442\u0438\u043d\u043a\u0438 \u043d\u043e\u0432\u043e\u0441\u0442\u0435\u0439',
'verbose_name_plural': '\u041a\u0430\u0440\u0442\u0438\u043d\u043a\u0438 \u043d\u043e\u0432\u043e\u0441\u0442\u0435\u0439',
},
),
]
| [
"[email protected]"
] | |
ac89a3e772ac4651679f9bbcd3d7859f8433465b | 63f9a0d150cbef75f4e6e8246dc7ecac3f3b6d09 | /python/ray/serve/controller.py | 3176a6321bab846bb8ad713f732a0067362fc473 | [
"Apache-2.0",
"MIT"
] | permissive | ray-project/maze-raylit | 79f0a5af9fe4bdc13a2d5b3919da867ed5439aab | a03cd14a50d87d58effea1d749391af530d7609c | refs/heads/master | 2023-01-23T04:23:35.178501 | 2020-12-04T22:34:14 | 2020-12-04T22:34:14 | 318,274,659 | 5 | 0 | Apache-2.0 | 2020-12-04T22:34:15 | 2020-12-03T17:47:58 | Python | UTF-8 | Python | false | false | 41,020 | py | import asyncio
from collections import defaultdict
from itertools import chain
import os
import random
import time
from dataclasses import dataclass, field
from typing import Dict, Any, List, Optional, Tuple
from pydantic import BaseModel
import ray
import ray.cloudpickle as pickle
from ray.serve.autoscaling_policy import BasicAutoscalingPolicy
from ray.serve.backend_worker import create_backend_replica
from ray.serve.constants import ASYNC_CONCURRENCY, SERVE_PROXY_NAME
from ray.serve.http_proxy import HTTPProxyActor
from ray.serve.kv_store import RayInternalKVStore
from ray.serve.exceptions import RayServeException
from ray.serve.utils import (format_actor_name, get_random_letters, logger,
try_schedule_resources_on_nodes, get_all_node_ids)
from ray.serve.config import BackendConfig, ReplicaConfig
from ray.serve.long_poll import LongPollerHost
from ray.actor import ActorHandle
import numpy as np
# Used for testing purposes only. If this is set, the controller will crash
# after writing each checkpoint with the specified probability.
_CRASH_AFTER_CHECKPOINT_PROBABILITY = 0
CHECKPOINT_KEY = "serve-controller-checkpoint"
# Feature flag for controller resource checking. If true, controller will
# error if the desired replicas exceed current resource availability.
_RESOURCE_CHECK_ENABLED = True
# How often to call the control loop on the controller.
CONTROL_LOOP_PERIOD_S = 1.0
REPLICA_STARTUP_TIME_WARNING_S = 5
# TypeDefs
BackendTag = str
EndpointTag = str
ReplicaTag = str
NodeId = str
GoalId = int
class TrafficPolicy:
def __init__(self, traffic_dict: Dict[str, float]) -> None:
self.traffic_dict: Dict[str, float] = dict()
self.shadow_dict: Dict[str, float] = dict()
self.set_traffic_dict(traffic_dict)
def set_traffic_dict(self, traffic_dict: Dict[str, float]) -> None:
prob = 0
for backend, weight in traffic_dict.items():
if weight < 0:
raise ValueError(
"Attempted to assign a weight of {} to backend '{}'. "
"Weights cannot be negative.".format(weight, backend))
prob += weight
# These weights will later be plugged into np.random.choice, which
# uses a tolerance of 1e-8.
if not np.isclose(prob, 1, atol=1e-8):
raise ValueError("Traffic dictionary weights must sum to 1, "
"currently they sum to {}".format(prob))
self.traffic_dict = traffic_dict
def set_shadow(self, backend: str, proportion: float):
if proportion == 0 and backend in self.shadow_dict:
del self.shadow_dict[backend]
else:
self.shadow_dict[backend] = proportion
def __repr__(self) -> str:
return f"<Traffic {self.traffic_dict}; Shadow {self.shadow_dict}>"
class BackendInfo(BaseModel):
# TODO(architkulkarni): Add type hint for worker_class after upgrading
# cloudpickle and adding types to RayServeWrappedReplica
worker_class: Any
backend_config: BackendConfig
replica_config: ReplicaConfig
class Config:
# TODO(architkulkarni): Remove once ReplicaConfig is a pydantic
# model
arbitrary_types_allowed = True
@dataclass
class SystemState:
backends: Dict[BackendTag, BackendInfo] = field(default_factory=dict)
traffic_policies: Dict[EndpointTag, TrafficPolicy] = field(
default_factory=dict)
routes: Dict[BackendTag, Tuple[EndpointTag, Any]] = field(
default_factory=dict)
backend_goal_ids: Dict[BackendTag, GoalId] = field(default_factory=dict)
traffic_goal_ids: Dict[EndpointTag, GoalId] = field(default_factory=dict)
route_goal_ids: Dict[BackendTag, GoalId] = field(default_factory=dict)
def get_backend_configs(self) -> Dict[BackendTag, BackendConfig]:
return {
tag: info.backend_config
for tag, info in self.backends.items()
}
def get_backend(self, backend_tag: BackendTag) -> Optional[BackendInfo]:
return self.backends.get(backend_tag)
def add_backend(self,
backend_tag: BackendTag,
backend_info: BackendInfo,
goal_id: GoalId = 0) -> None:
self.backends[backend_tag] = backend_info
self.backend_goal_ids = goal_id
def get_endpoints(self) -> Dict[EndpointTag, Dict[str, Any]]:
endpoints = {}
for route, (endpoint, methods) in self.routes.items():
if endpoint in self.traffic_policies:
traffic_policy = self.traffic_policies[endpoint]
traffic_dict = traffic_policy.traffic_dict
shadow_dict = traffic_policy.shadow_dict
else:
traffic_dict = {}
shadow_dict = {}
endpoints[endpoint] = {
"route": route if route.startswith("/") else None,
"methods": methods,
"traffic": traffic_dict,
"shadows": shadow_dict,
}
return endpoints
@dataclass
class ActorStateReconciler:
controller_name: str = field(init=True)
detached: bool = field(init=True)
routers_cache: Dict[NodeId, ActorHandle] = field(default_factory=dict)
backend_replicas: Dict[BackendTag, Dict[ReplicaTag, ActorHandle]] = field(
default_factory=lambda: defaultdict(dict))
backend_replicas_to_start: Dict[BackendTag, List[ReplicaTag]] = field(
default_factory=lambda: defaultdict(list))
backend_replicas_to_stop: Dict[BackendTag, List[ReplicaTag]] = field(
default_factory=lambda: defaultdict(list))
backends_to_remove: List[BackendTag] = field(default_factory=list)
endpoints_to_remove: List[EndpointTag] = field(default_factory=list)
# TODO(edoakes): consider removing this and just using the names.
def router_handles(self) -> List[ActorHandle]:
return list(self.routers_cache.values())
def get_replica_handles(self) -> List[ActorHandle]:
return list(
chain.from_iterable([
replica_dict.values()
for replica_dict in self.backend_replicas.values()
]))
def get_replica_tags(self) -> List[ReplicaTag]:
return list(
chain.from_iterable([
replica_dict.keys()
for replica_dict in self.backend_replicas.values()
]))
async def _start_pending_backend_replicas(
self, current_state: SystemState) -> None:
"""Starts the pending backend replicas in self.backend_replicas_to_start.
Waits for replicas to start up, then removes them from
self.backend_replicas_to_start.
"""
fut_to_replica_info = {}
for backend_tag, replicas_to_create in self.backend_replicas_to_start.\
items():
for replica_tag in replicas_to_create:
replica_handle = await self._start_backend_replica(
current_state, backend_tag, replica_tag)
ready_future = replica_handle.ready.remote().as_future()
fut_to_replica_info[ready_future] = (backend_tag, replica_tag,
replica_handle)
start = time.time()
prev_warning = start
while fut_to_replica_info:
if time.time() - prev_warning > REPLICA_STARTUP_TIME_WARNING_S:
prev_warning = time.time()
logger.warning("Waited {:.2f}s for replicas to start up. Make "
"sure there are enough resources to create the "
"replicas.".format(time.time() - start))
done, pending = await asyncio.wait(
list(fut_to_replica_info.keys()), timeout=1)
for fut in done:
(backend_tag, replica_tag,
replica_handle) = fut_to_replica_info.pop(fut)
self.backend_replicas[backend_tag][
replica_tag] = replica_handle
self.backend_replicas_to_start.clear()
async def _start_backend_replica(self, current_state: SystemState,
backend_tag: BackendTag,
replica_tag: ReplicaTag) -> ActorHandle:
"""Start a replica and return its actor handle.
Checks if the named actor already exists before starting a new one.
Assumes that the backend configuration is already in the Goal State.
"""
# NOTE(edoakes): the replicas may already be created if we
# failed after creating them but before writing a
# checkpoint.
replica_name = format_actor_name(replica_tag, self.controller_name)
try:
replica_handle = ray.get_actor(replica_name)
except ValueError:
logger.debug("Starting replica '{}' for backend '{}'.".format(
replica_tag, backend_tag))
backend_info = current_state.get_backend(backend_tag)
replica_handle = ray.remote(backend_info.worker_class).options(
name=replica_name,
lifetime="detached" if self.detached else None,
max_restarts=-1,
max_task_retries=-1,
**backend_info.replica_config.ray_actor_options).remote(
backend_tag, replica_tag,
backend_info.replica_config.actor_init_args,
backend_info.backend_config, self.controller_name)
return replica_handle
def _scale_backend_replicas(self, backends: Dict[BackendTag, BackendInfo],
backend_tag: BackendTag,
num_replicas: int) -> None:
"""Scale the given backend to the number of replicas.
NOTE: this does not actually start or stop the replicas, but instead
adds the intention to start/stop them to self.backend_replicas_to_start
and self.backend_replicas_to_stop. The caller is responsible for then
first writing a checkpoint and then actually starting/stopping the
intended replicas. This avoids inconsistencies with starting/stopping a
replica and then crashing before writing a checkpoint.
"""
logger.debug("Scaling backend '{}' to {} replicas".format(
backend_tag, num_replicas))
assert (backend_tag in backends
), "Backend {} is not registered.".format(backend_tag)
assert num_replicas >= 0, ("Number of replicas must be"
" greater than or equal to 0.")
current_num_replicas = len(self.backend_replicas[backend_tag])
delta_num_replicas = num_replicas - current_num_replicas
backend_info = backends[backend_tag]
if delta_num_replicas > 0:
can_schedule = try_schedule_resources_on_nodes(requirements=[
backend_info.replica_config.resource_dict
for _ in range(delta_num_replicas)
])
if _RESOURCE_CHECK_ENABLED and not all(can_schedule):
num_possible = sum(can_schedule)
raise RayServeException(
"Cannot scale backend {} to {} replicas. Ray Serve tried "
"to add {} replicas but the resources only allows {} "
"to be added. To fix this, consider scaling to replica to "
"{} or add more resources to the cluster. You can check "
"avaiable resources with ray.nodes().".format(
backend_tag, num_replicas, delta_num_replicas,
num_possible, current_num_replicas + num_possible))
logger.debug("Adding {} replicas to backend {}".format(
delta_num_replicas, backend_tag))
for _ in range(delta_num_replicas):
replica_tag = "{}#{}".format(backend_tag, get_random_letters())
self.backend_replicas_to_start[backend_tag].append(replica_tag)
elif delta_num_replicas < 0:
logger.debug("Removing {} replicas from backend '{}'".format(
-delta_num_replicas, backend_tag))
assert len(
self.backend_replicas[backend_tag]) >= delta_num_replicas
for _ in range(-delta_num_replicas):
replica_tag, _ = self.backend_replicas[backend_tag].popitem()
if len(self.backend_replicas[backend_tag]) == 0:
del self.backend_replicas[backend_tag]
self.backend_replicas_to_stop[backend_tag].append(replica_tag)
async def _stop_pending_backend_replicas(self) -> None:
"""Stops the pending backend replicas in self.backend_replicas_to_stop.
Removes backend_replicas from the router, kills them, and clears
self.backend_replicas_to_stop.
"""
for backend_tag, replicas_list in self.backend_replicas_to_stop.items(
):
for replica_tag in replicas_list:
# NOTE(edoakes): the replicas may already be stopped if we
# failed after stopping them but before writing a checkpoint.
replica_name = format_actor_name(replica_tag,
self.controller_name)
try:
replica = ray.get_actor(replica_name)
except ValueError:
continue
# TODO(edoakes): this logic isn't ideal because there may be
# pending tasks still executing on the replica. However, if we
# use replica.__ray_terminate__, we may send it while the
# replica is being restarted and there's no way to tell if it
# successfully killed the worker or not.
ray.kill(replica, no_restart=True)
self.backend_replicas_to_stop.clear()
def _start_routers_if_needed(self, http_host: str, http_port: str,
http_middlewares: List[Any]) -> None:
"""Start a router on every node if it doesn't already exist."""
if http_host is None:
return
for node_id, node_resource in get_all_node_ids():
if node_id in self.routers_cache:
continue
router_name = format_actor_name(SERVE_PROXY_NAME,
self.controller_name, node_id)
try:
router = ray.get_actor(router_name)
except ValueError:
logger.info("Starting router with name '{}' on node '{}' "
"listening on '{}:{}'".format(
router_name, node_id, http_host, http_port))
router = HTTPProxyActor.options(
name=router_name,
lifetime="detached" if self.detached else None,
max_concurrency=ASYNC_CONCURRENCY,
max_restarts=-1,
max_task_retries=-1,
resources={
node_resource: 0.01
},
).remote(
http_host,
http_port,
controller_name=self.controller_name,
http_middlewares=http_middlewares)
self.routers_cache[node_id] = router
def _stop_routers_if_needed(self) -> bool:
"""Removes router actors from any nodes that no longer exist.
Returns whether or not any actors were removed (a checkpoint should
be taken).
"""
actor_stopped = False
all_node_ids = {node_id for node_id, _ in get_all_node_ids()}
to_stop = []
for node_id in self.routers_cache:
if node_id not in all_node_ids:
logger.info(
"Removing router on removed node '{}'.".format(node_id))
to_stop.append(node_id)
for node_id in to_stop:
router_handle = self.routers_cache.pop(node_id)
ray.kill(router_handle, no_restart=True)
actor_stopped = True
return actor_stopped
def _recover_actor_handles(self) -> None:
# Refresh the RouterCache
for node_id in self.routers_cache.keys():
router_name = format_actor_name(SERVE_PROXY_NAME,
self.controller_name, node_id)
self.routers_cache[node_id] = ray.get_actor(router_name)
# Fetch actor handles for all of the backend replicas in the system.
# All of these backend_replicas are guaranteed to already exist because
# they would not be written to a checkpoint in self.backend_replicas
# until they were created.
for backend_tag, replica_dict in self.backend_replicas.items():
for replica_tag in replica_dict.keys():
replica_name = format_actor_name(replica_tag,
self.controller_name)
self.backend_replicas[backend_tag][
replica_tag] = ray.get_actor(replica_name)
async def _recover_from_checkpoint(
self, current_state: SystemState, controller: "ServeController"
) -> Dict[BackendTag, BasicAutoscalingPolicy]:
self._recover_actor_handles()
autoscaling_policies = dict()
for backend, info in current_state.backends.items():
metadata = info.backend_config.internal_metadata
if metadata.autoscaling_config is not None:
autoscaling_policies[backend] = BasicAutoscalingPolicy(
backend, metadata.autoscaling_config)
# Start/stop any pending backend replicas.
await self._start_pending_backend_replicas(current_state)
await self._stop_pending_backend_replicas()
return autoscaling_policies
@dataclass
class Checkpoint:
goal_state: SystemState
current_state: SystemState
reconciler: ActorStateReconciler
# TODO(ilr) Rename reconciler to PendingState
@ray.remote
class ServeController:
"""Responsible for managing the state of the serving system.
The controller implements fault tolerance by persisting its state in
a new checkpoint each time a state change is made. If the actor crashes,
the latest checkpoint is loaded and the state is recovered. Checkpoints
are written/read using a provided KV-store interface.
All hard state in the system is maintained by this actor and persisted via
these checkpoints. Soft state required by other components is fetched by
those actors from this actor on startup and updates are pushed out from
this actor.
All other actors started by the controller are named, detached actors
so they will not fate share with the controller if it crashes.
The following guarantees are provided for state-changing calls to the
controller:
- If the call succeeds, the change was made and will be reflected in
the system even if the controller or other actors die unexpectedly.
- If the call fails, the change may have been made but isn't guaranteed
to have been. The client should retry in this case. Note that this
requires all implementations here to be idempotent.
"""
async def __init__(self,
controller_name: str,
http_host: str,
http_port: str,
http_middlewares: List[Any],
detached: bool = False):
# Used to read/write checkpoints.
self.kv_store = RayInternalKVStore(namespace=controller_name)
# Current State
self.current_state = SystemState()
# Goal State
# TODO(ilr) This is currently *unused* until the refactor of the serve
# controller.
self.goal_state = SystemState()
# ActorStateReconciler
self.actor_reconciler = ActorStateReconciler(controller_name, detached)
# backend -> AutoscalingPolicy
self.autoscaling_policies = dict()
# Dictionary of backend_tag -> router_name -> most recent queue length.
self.backend_stats = defaultdict(lambda: defaultdict(dict))
# Used to ensure that only a single state-changing operation happens
# at any given time.
self.write_lock = asyncio.Lock()
self.http_host = http_host
self.http_port = http_port
self.http_middlewares = http_middlewares
# If starting the actor for the first time, starts up the other system
# components. If recovering, fetches their actor handles.
self.actor_reconciler._start_routers_if_needed(
self.http_host, self.http_port, self.http_middlewares)
# NOTE(edoakes): unfortunately, we can't completely recover from a
# checkpoint in the constructor because we block while waiting for
# other actors to start up, and those actors fetch soft state from
# this actor. Because no other tasks will start executing until after
# the constructor finishes, if we were to run this logic in the
# constructor it could lead to deadlock between this actor and a child.
# However we do need to guarantee that we have fully recovered from a
# checkpoint before any other state-changing calls run. We address this
# by acquiring the write_lock and then posting the task to recover from
# a checkpoint to the event loop. Other state-changing calls acquire
# this lock and will be blocked until recovering from the checkpoint
# finishes.
checkpoint = self.kv_store.get(CHECKPOINT_KEY)
if checkpoint is None:
logger.debug("No checkpoint found")
else:
await self.write_lock.acquire()
asyncio.get_event_loop().create_task(
self._recover_from_checkpoint(checkpoint))
# NOTE(simon): Currently we do all-to-all broadcast. This means
# any listeners will receive notification for all changes. This
# can be problem at scale, e.g. updating a single backend config
# will send over the entire configs. In the future, we should
# optimize the logic to support subscription by key.
self.long_poll_host = LongPollerHost()
self.notify_backend_configs_changed()
self.notify_replica_handles_changed()
self.notify_traffic_policies_changed()
asyncio.get_event_loop().create_task(self.run_control_loop())
def notify_replica_handles_changed(self):
self.long_poll_host.notify_changed(
"worker_handles", {
backend_tag: list(replica_dict.values())
for backend_tag, replica_dict in
self.actor_reconciler.backend_replicas.items()
})
def notify_traffic_policies_changed(self):
self.long_poll_host.notify_changed("traffic_policies",
self.current_state.traffic_policies)
def notify_backend_configs_changed(self):
self.long_poll_host.notify_changed(
"backend_configs", self.current_state.get_backend_configs())
async def listen_for_change(self, keys_to_snapshot_ids: Dict[str, int]):
"""Proxy long pull client's listen request.
Args:
keys_to_snapshot_ids (Dict[str, int]): Snapshot IDs are used to
determine whether or not the host should immediately return the
data or wait for the value to be changed.
"""
return await (
self.long_poll_host.listen_for_change(keys_to_snapshot_ids))
def get_routers(self) -> Dict[str, ActorHandle]:
"""Returns a dictionary of node ID to router actor handles."""
return self.actor_reconciler.routers_cache
def get_router_config(self) -> Dict[str, Tuple[str, List[str]]]:
"""Called by the router on startup to fetch required state."""
return self.current_state.routes
def _checkpoint(self) -> None:
"""Checkpoint internal state and write it to the KV store."""
assert self.write_lock.locked()
logger.debug("Writing checkpoint")
start = time.time()
checkpoint = pickle.dumps(
Checkpoint(self.goal_state, self.current_state,
self.actor_reconciler))
self.kv_store.put(CHECKPOINT_KEY, checkpoint)
logger.debug("Wrote checkpoint in {:.2f}".format(time.time() - start))
if random.random(
) < _CRASH_AFTER_CHECKPOINT_PROBABILITY and self.detached:
logger.warning("Intentionally crashing after checkpoint")
os._exit(0)
async def _recover_from_checkpoint(self, checkpoint_bytes: bytes) -> None:
"""Recover the instance state from the provided checkpoint.
Performs the following operations:
1) Deserializes the internal state from the checkpoint.
2) Pushes the latest configuration to the routers
in case we crashed before updating them.
3) Starts/stops any replicas that are pending creation or
deletion.
NOTE: this requires that self.write_lock is already acquired and will
release it before returning.
"""
assert self.write_lock.locked()
start = time.time()
logger.info("Recovering from checkpoint")
restored_checkpoint: Checkpoint = pickle.loads(checkpoint_bytes)
# Restore SystemState
self.current_state = restored_checkpoint.current_state
# Restore ActorStateReconciler
self.actor_reconciler = restored_checkpoint.reconciler
self.autoscaling_policies = await self.actor_reconciler.\
_recover_from_checkpoint(self.current_state, self)
logger.info(
"Recovered from checkpoint in {:.3f}s".format(time.time() - start))
self.write_lock.release()
async def do_autoscale(self) -> None:
for backend, info in self.current_state.backends.items():
if backend not in self.autoscaling_policies:
continue
new_num_replicas = self.autoscaling_policies[backend].scale(
self.backend_stats[backend], info.backend_config.num_replicas)
if new_num_replicas > 0:
await self.update_backend_config(
backend, BackendConfig(num_replicas=new_num_replicas))
async def run_control_loop(self) -> None:
while True:
await self.do_autoscale()
async with self.write_lock:
self.actor_reconciler._start_routers_if_needed(
self.http_host, self.http_port, self.http_middlewares)
checkpoint_required = self.actor_reconciler.\
_stop_routers_if_needed()
if checkpoint_required:
self._checkpoint()
await asyncio.sleep(CONTROL_LOOP_PERIOD_S)
def get_backend_configs(self) -> Dict[str, BackendConfig]:
"""Fetched by the router on startup."""
return self.current_state.get_backend_configs()
def get_traffic_policies(self) -> Dict[str, TrafficPolicy]:
"""Fetched by the router on startup."""
return self.current_state.traffic_policies
def _list_replicas(self, backend_tag: BackendTag) -> List[ReplicaTag]:
"""Used only for testing."""
return list(self.actor_reconciler.backend_replicas[backend_tag].keys())
def get_traffic_policy(self, endpoint: str) -> TrafficPolicy:
"""Fetched by serve handles."""
return self.current_state.traffic_policies[endpoint]
def get_all_replica_handles(self) -> Dict[str, Dict[str, ActorHandle]]:
"""Fetched by the router on startup."""
return self.actor_reconciler.backend_replicas
def get_all_backends(self) -> Dict[str, BackendConfig]:
"""Returns a dictionary of backend tag to backend config."""
return self.current_state.get_backend_configs()
def get_all_endpoints(self) -> Dict[str, Dict[str, Any]]:
return self.current_state.get_endpoints()
async def _set_traffic(self, endpoint_name: str,
traffic_dict: Dict[str, float]) -> None:
if endpoint_name not in self.current_state.get_endpoints():
raise ValueError("Attempted to assign traffic for an endpoint '{}'"
" that is not registered.".format(endpoint_name))
assert isinstance(traffic_dict,
dict), "Traffic policy must be a dictionary."
for backend in traffic_dict:
if self.current_state.get_backend(backend) is None:
raise ValueError(
"Attempted to assign traffic to a backend '{}' that "
"is not registered.".format(backend))
traffic_policy = TrafficPolicy(traffic_dict)
self.current_state.traffic_policies[endpoint_name] = traffic_policy
# NOTE(edoakes): we must write a checkpoint before pushing the
# update to avoid inconsistent state if we crash after pushing the
# update.
self._checkpoint()
self.notify_traffic_policies_changed()
async def set_traffic(self, endpoint_name: str,
traffic_dict: Dict[str, float]) -> None:
"""Sets the traffic policy for the specified endpoint."""
async with self.write_lock:
await self._set_traffic(endpoint_name, traffic_dict)
async def shadow_traffic(self, endpoint_name: str, backend_tag: BackendTag,
proportion: float) -> None:
"""Shadow traffic from the endpoint to the backend."""
async with self.write_lock:
if endpoint_name not in self.current_state.get_endpoints():
raise ValueError("Attempted to shadow traffic from an "
"endpoint '{}' that is not registered."
.format(endpoint_name))
if self.current_state.get_backend(backend_tag) is None:
raise ValueError(
"Attempted to shadow traffic to a backend '{}' that "
"is not registered.".format(backend_tag))
self.current_state.traffic_policies[endpoint_name].set_shadow(
backend_tag, proportion)
# NOTE(edoakes): we must write a checkpoint before pushing the
# update to avoid inconsistent state if we crash after pushing the
# update.
self._checkpoint()
self.notify_traffic_policies_changed()
# TODO(architkulkarni): add Optional for route after cloudpickle upgrade
async def create_endpoint(self, endpoint: str,
traffic_dict: Dict[str, float], route,
methods) -> None:
"""Create a new endpoint with the specified route and methods.
If the route is None, this is a "headless" endpoint that will not
be exposed over HTTP and can only be accessed via a handle.
"""
async with self.write_lock:
# If this is a headless endpoint with no route, key the endpoint
# based on its name.
# TODO(edoakes): we should probably just store routes and endpoints
# separately.
if route is None:
route = endpoint
# TODO(edoakes): move this to client side.
err_prefix = "Cannot create endpoint."
if route in self.current_state.routes:
# Ensures this method is idempotent
if self.current_state.routes[route] == (endpoint, methods):
return
else:
raise ValueError(
"{} Route '{}' is already registered.".format(
err_prefix, route))
if endpoint in self.current_state.get_endpoints():
raise ValueError(
"{} Endpoint '{}' is already registered.".format(
err_prefix, endpoint))
logger.info(
"Registering route '{}' to endpoint '{}' with methods '{}'.".
format(route, endpoint, methods))
self.current_state.routes[route] = (endpoint, methods)
# NOTE(edoakes): checkpoint is written in self._set_traffic.
await self._set_traffic(endpoint, traffic_dict)
await asyncio.gather(*[
router.set_route_table.remote(self.current_state.routes)
for router in self.actor_reconciler.router_handles()
])
async def delete_endpoint(self, endpoint: str) -> None:
"""Delete the specified endpoint.
Does not modify any corresponding backends.
"""
logger.info("Deleting endpoint '{}'".format(endpoint))
async with self.write_lock:
# This method must be idempotent. We should validate that the
# specified endpoint exists on the client.
for route, (route_endpoint,
_) in self.current_state.routes.items():
if route_endpoint == endpoint:
route_to_delete = route
break
else:
logger.info("Endpoint '{}' doesn't exist".format(endpoint))
return
# Remove the routing entry.
del self.current_state.routes[route_to_delete]
# Remove the traffic policy entry if it exists.
if endpoint in self.current_state.traffic_policies:
del self.current_state.traffic_policies[endpoint]
self.actor_reconciler.endpoints_to_remove.append(endpoint)
# NOTE(edoakes): we must write a checkpoint before pushing the
# updates to the routers to avoid inconsistent state if we crash
# after pushing the update.
self._checkpoint()
await asyncio.gather(*[
router.set_route_table.remote(self.current_state.routes)
for router in self.actor_reconciler.router_handles()
])
async def create_backend(self, backend_tag: BackendTag,
backend_config: BackendConfig,
replica_config: ReplicaConfig) -> None:
"""Register a new backend under the specified tag."""
async with self.write_lock:
# Ensures this method is idempotent.
backend_info = self.current_state.get_backend(backend_tag)
if backend_info is not None:
if (backend_info.backend_config == backend_config
and backend_info.replica_config == replica_config):
return
backend_replica = create_backend_replica(
replica_config.func_or_class)
# Save creator that starts replicas, the arguments to be passed in,
# and the configuration for the backends.
self.current_state.add_backend(
backend_tag,
BackendInfo(
worker_class=backend_replica,
backend_config=backend_config,
replica_config=replica_config))
metadata = backend_config.internal_metadata
if metadata.autoscaling_config is not None:
self.autoscaling_policies[
backend_tag] = BasicAutoscalingPolicy(
backend_tag, metadata.autoscaling_config)
try:
self.actor_reconciler._scale_backend_replicas(
self.current_state.backends, backend_tag,
backend_config.num_replicas)
except RayServeException as e:
del self.current_state.backends[backend_tag]
raise e
# NOTE(edoakes): we must write a checkpoint before starting new
# or pushing the updated config to avoid inconsistent state if we
# crash while making the change.
self._checkpoint()
await self.actor_reconciler._start_pending_backend_replicas(
self.current_state)
self.notify_replica_handles_changed()
# Set the backend config inside the router
# (particularly for max_concurrent_queries).
self.notify_backend_configs_changed()
async def delete_backend(self, backend_tag: BackendTag) -> None:
async with self.write_lock:
# This method must be idempotent. We should validate that the
# specified backend exists on the client.
if self.current_state.get_backend(backend_tag) is None:
return
# Check that the specified backend isn't used by any endpoints.
for endpoint, traffic_policy in self.current_state.\
traffic_policies.items():
if (backend_tag in traffic_policy.traffic_dict
or backend_tag in traffic_policy.shadow_dict):
raise ValueError("Backend '{}' is used by endpoint '{}' "
"and cannot be deleted. Please remove "
"the backend from all endpoints and try "
"again.".format(backend_tag, endpoint))
# Scale its replicas down to 0. This will also remove the backend
# from self.current_state.backends and
# self.actor_reconciler.backend_replicas.
self.actor_reconciler._scale_backend_replicas(
self.current_state.backends, backend_tag, 0)
# Remove the backend's metadata.
del self.current_state.backends[backend_tag]
if backend_tag in self.autoscaling_policies:
del self.autoscaling_policies[backend_tag]
# Add the intention to remove the backend from the router.
self.actor_reconciler.backends_to_remove.append(backend_tag)
# NOTE(edoakes): we must write a checkpoint before removing the
# backend from the router to avoid inconsistent state if we crash
# after pushing the update.
self._checkpoint()
await self.actor_reconciler._stop_pending_backend_replicas()
self.notify_replica_handles_changed()
async def update_backend_config(self, backend_tag: BackendTag,
config_options: BackendConfig) -> None:
"""Set the config for the specified backend."""
async with self.write_lock:
assert (self.current_state.get_backend(backend_tag)
), "Backend {} is not registered.".format(backend_tag)
assert isinstance(config_options, BackendConfig)
stored_backend_config = self.current_state.get_backend(
backend_tag).backend_config
backend_config = stored_backend_config.copy(
update=config_options.dict(exclude_unset=True))
backend_config._validate_complete()
self.current_state.get_backend(
backend_tag).backend_config = backend_config
# Scale the replicas with the new configuration.
self.actor_reconciler._scale_backend_replicas(
self.current_state.backends, backend_tag,
backend_config.num_replicas)
# NOTE(edoakes): we must write a checkpoint before pushing the
# update to avoid inconsistent state if we crash after pushing the
# update.
self._checkpoint()
# Inform the router about change in configuration
# (particularly for setting max_batch_size).
await self.actor_reconciler._start_pending_backend_replicas(
self.current_state)
await self.actor_reconciler._stop_pending_backend_replicas()
self.notify_replica_handles_changed()
self.notify_backend_configs_changed()
def get_backend_config(self, backend_tag: BackendTag) -> BackendConfig:
"""Get the current config for the specified backend."""
assert (self.current_state.get_backend(backend_tag)
), "Backend {} is not registered.".format(backend_tag)
return self.current_state.get_backend(backend_tag).backend_config
async def shutdown(self) -> None:
"""Shuts down the serve instance completely."""
async with self.write_lock:
for router in self.actor_reconciler.router_handles():
ray.kill(router, no_restart=True)
for replica in self.actor_reconciler.get_replica_handles():
ray.kill(replica, no_restart=True)
self.kv_store.delete(CHECKPOINT_KEY)
| [
"[email protected]"
] | |
218f7f161ce570b21a5293386e4ddc9cc7759bd2 | 9b722ca41671eb2cea19bac5126d0920639261bd | /.history/app_20201124112830.py | dfe4f24e89674672c3d491d9d14c2ce2f017531e | [] | no_license | thawalk/db_flask_server | 7928fd481f99d30bdccc60d97f02db78324cfdbe | cd55f1c9bf84c734457ee02d9f64a6833e295fad | refs/heads/master | 2023-01-25T02:40:19.097457 | 2020-12-06T07:45:50 | 2020-12-06T07:45:50 | 314,229,480 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,465 | py | import json
import pymongo
from flask import Flask, jsonify, url_for, request, redirect,Response,Request
import pymongo
from bson.json_util import dumps
import mysql.connector
from werkzeug.serving import run_simple
import os
from dotenv import load_dotenv
import datetime
import time
app = Flask(__name__)
test_collection='test_collection'
mongo = pymongo.MongoClient('mongodb://54.83.130.150:27017/?readPreference=primary&appname=MongoDB%20Compass&ssl=false')
db = pymongo.database.Database(mongo, 'test')
metadata_col = pymongo.collection.Collection(db, 'test_collection')
db = mysql.connector.connect(
host ='3.84.158.241',
user = 'root',
password = '',
database = 'reviews',
)
cur = db.cursor()
@app.route('/',methods=["GET"])
def api_root():
data = {
'message': 'Welcome to our website. Where reviews are our number one priority'
}
js = json.dumps(data)
response = Response(js, status=200, mimetype='application/json')
return response
@app.route('/categories', methods = ['GET']) #TODO: #returns list of categories
def get_categories():
categories = []
js = json.dumps(data)
response = Response(js, status=200, mimetype='application/json')
return response
@app.route('/search', methods=['GET']) #now it only searches for TITLE. the mongo metadata does not have author
def search_book():
try:
data = request.json
title = data["title"]
result = metadata_col.find({"title":title})
result_array = dumps(list(result))
print(result_array)
js = json.dumps(result_array)
response = Response(js, status=200, mimetype='application/json')
return response
except:
errMsg = "Please include title."
js = json.dumps(errMsg)
response = Response(js, status=400, mimetype='application/json')
return response
# @app.route('/review', methods=['POST'])
# def add_review():
# if not request.json or not request.json['asin'] or type(request.json['asin']) != str or not request.json['overall'] or not request.json['reviewText'] or type(request.json['reviewText']) != str or not request.json['reviewTime'] or type(request.json['reviewTime']) != str or not request.json['reviewerID'] or type(request.json['reviewerID']) != str or not request.json['reviewerName'] or type(request.json['reviewerName']) != str or not request.json['summary'] or type(request.json['summary']) != str or not request.json['unixReviewTime'] or type(request.json['unixReviewTime']) != int :
# return 'invalid request msg', 404
# txt = "INSERT INTO 'kindle_reviews' ('id', 'asin', 'overall', 'reviewText', 'reviewTime', 'reviewerID', 'reviewerName', 'summary', 'unixReviewTime') VALUES (%s)"
# values = (None, request.json['asin'], request.json['overall'], request.json['reviewText'], request.json['reviewTime'], request.json['reviewerID'], request.json['reviewerName'], request.json['summary'], request.json['unixReviewTime'])
# cur.execute(txt, values)
# return 'successfully uploaded new review', 200
@app.route('/addBook',methods= ['POST'])
def add_book():
try:
data = request.json
title = data['title']
asin = data['asin']
description = data['description']
price = data['price']
categories = data['categories']
message = "Book added successfully"
metadata_col.insert({"title":title,"asin":asin,"description":description,"price":price,"categories":categories})
js = json.dumps(message)
response = Response(js, status=201, mimetype='application/json')
return response
except:
errMsg = "Please include title, asin, description, price and categories."
js = json.dumps(errMsg)
response = Response(js, status=400, mimetype='application/json')
return response
@app.route('/addReview',methods = ['POST']) #TODO: add review INTO sql part
def add_review():
try:
data = request.json
asin = data["asin"]
helpful = [0,0]
overall = data["overall"]
reviewTime = data["reviewTime"]
reviewerID = data["reviewerID"]
reviewerName = data["reviewerName"]
sum
@app.route('/sortByGenres', methods= ['GET']) #TODO: sort by genres from mongo metadata categories
def sort_by_genres():
pass
if __name__ == '__main__':
# app.run(host="0.0.0.0", port=80) #remember to change this part
app.run(debug=True)
| [
"[email protected]"
] | |
7bc09e89695184c589a6db756b746e3e9450f047 | ab8ea44704ea1a444e4f68ee740b584288d3e558 | /tests/test_execer.py | f0b0661a8b670a8b6d6093ea78392b65de20eb17 | [
"BSD-2-Clause"
] | permissive | jamiees2/xonsh | 96514c3381ac2fcca872e473ea9d414d74c2fdc9 | f7b5985a88b32fafdaf162012c1ebbd19e48e6b9 | refs/heads/master | 2021-01-18T07:49:58.323584 | 2015-03-11T01:56:42 | 2015-03-11T01:56:42 | 31,949,439 | 0 | 0 | null | 2015-03-10T09:42:21 | 2015-03-10T09:42:21 | null | UTF-8 | Python | false | false | 869 | py | """Tests the xonsh lexer."""
from __future__ import unicode_literals, print_function
import os
import sys
import ast
from xonsh.execer import Execer
from tools import mock_xonsh_env
DEBUG_LEVEL = 0
EXECER = None
#
# Helpers
#
def setup():
# only setup one parser
global EXECER
EXECER = Execer(debug_level=DEBUG_LEVEL)
def check_exec(input):
with mock_xonsh_env(None):
if not input.endswith('\n'):
input += '\n'
EXECER.debug_level = DEBUG_LEVEL
EXECER.exec(input)
def check_eval(input):
with mock_xonsh_env(None):
EXECER.debug_level = DEBUG_LEVEL
EXECER.eval(input)
#
# Tests
#
def test_bin_ls():
yield check_eval, '/bin/ls -l'
def test_ls_dashl():
yield check_eval, 'ls -l'
def test_which_ls():
yield check_eval, 'which ls'
if __name__ == '__main__':
nose.runmodule()
| [
"[email protected]"
] | |
3d787e6984f3eee88abe60dd5170ec3af6010e22 | c6cd9829966c730e52ba932ff04b05c186c3af99 | /udpserver.py | c14eb6b87daa9bfa2fcbed85a24e70c5792b7053 | [] | no_license | fotopretty/ESP8266Server | ba3b9c980c35edd57a5c759225bfedfdb82c26e6 | aca0baa6762e5230593a1fe3bf1379db89530a78 | refs/heads/master | 2021-05-29T12:19:18.611152 | 2015-09-16T17:03:40 | 2015-09-16T17:03:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 730 | py | # This is a Python UDP server to display UDP messages sent by the ESP8266 Arduino Shield by http://www.doit.am/
# Listen to UDP port 9000 and print any message received.
# Based on https://pymotw.com/2/socket/udp.html
__author__ = 'Luppy'
import socket
import sys
# Create a TCP/IP socket
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
# Bind the socket to any IP address, port 9000
server_address = ('', 9000)
print >>sys.stderr, 'starting up on %s port %s' % server_address
sock.bind(server_address)
print >>sys.stderr, '\nwaiting to receive message'
while True:
data, address = sock.recvfrom(4096)
print >>sys.stderr, '----received %s bytes from %s' % (len(data), address)
print >>sys.stderr, data
| [
"[email protected]"
] | |
d9f1f1ef4a21917821be03f6b3eae82be1d88ae0 | 2728543f61eb17dcccca9853ba6e6d2d932c8f8e | /roundsolutions/src/g4f_ws.py | c7273191eeff049503dbb05f0d4afbf69c165e74 | [
"MIT"
] | permissive | bewest/unapy | 7a77afb841e354de5943f4bdfe9a08f1d3f49c88 | cc55cfb90f38c7ac01ef244cc4b3509e4426b0e4 | refs/heads/master | 2016-09-11T06:09:39.908520 | 2012-06-17T23:10:20 | 2012-06-17T23:10:20 | 2,311,697 | 3 | 6 | null | null | null | null | WINDOWS-1252 | Python | false | false | 2,461 | py | ############################################
# gauge4free WS2300 Python application #
# Copyright 2008, © Round Solutions #
# #
############################################
#Redistribution and use in source and binary forms, with or without
#modification, are permitted provided that the following conditions
#are met:
#
#Redistributions of source code must retain the above copyright notice,
#this list of conditions and the following disclaimer.
#
#Redistributions in binary form must reproduce the above copyright
#notice, this list of conditions and the following disclaimer in
#the documentation and/or other materials provided with the distribution.
#
#
#THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS``AS
#IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
#TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
#PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR
#CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
#EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
#PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
#PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
#LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
#NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
#SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
#version 20080128.1
import MAIN
import LOCALS
# Change values below
# GPRS APN settings
# incorrect values lead to imposibility to use GPRS
apn = 'internet'
gprs_userid = ''
gprs_passw = ''
# gauge4free password
g4f_passw = 'demo'
# Interval between data upload to server
# in 1/10 of second
interval = 18000
# WS2300 driver
# how many times a command will be retried before declare fail
LOCALS.maxtrials = 30
# receive timeout when reading from WS2300
LOCALS.receive_timeout = 3
'''
Debug level is in LOCALS.debug_level
if bit 2 is set print driver level messages
if bit 1 is set print low level applications messages
if bit 0 is set print high level applications messages
'''
LOCALS.debug_level = 3
# !!! Do not change anything from here !!!
LOCALS.cgdcont = apn
LOCALS.gprsuserid = gprs_userid
LOCALS.gprspassw = gprs_passw
LOCALS.g4fpassw = g4f_passw
LOCALS.interval = interval
MAIN.main()
| [
"[email protected]"
] | |
1968cafb7539e6a61fb1b2c491c4ca2d09e9891d | e6dd376fdb9b511975e90c8ec73972984b4a4895 | /tests/test_numba.py | 772bb541e64c0e1871a8d23e54402a1295d5c67d | [
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | levandoskije/chemicals | 41fd8ad2230a64797953d16937eec61d82050c49 | a5c531c1c1822a816cce9d071eb03092fb311a41 | refs/heads/master | 2023-04-09T19:52:50.317171 | 2021-04-15T02:38:32 | 2021-04-15T02:38:32 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 23,414 | py | # -*- coding: utf-8 -*-
"""Chemical Engineering Design Library (ChEDL). Utilities for process modeling.
Copyright (C) 2020 Caleb Bell <[email protected]>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from __future__ import division
from chemicals import *
import chemicals.vectorized
from math import *
from random import random
from fluids.constants import *
from fluids.numerics import assert_close, assert_close1d, assert_close2d
import pytest
try:
import numba
import chemicals.numba
import chemicals.numba_vectorized
except:
numba = None
import numpy as np
def swap_funcs_and_test(names, substitutions, test):
'''
names : list[str]
object names to switch out
substitutions : list[obj]
Objects to put in
test : function
Unit test to run in the file
'''
originals = {}
glob = test.__globals__
for name, sub in zip(names, substitutions):
originals[name] = glob[name]
glob[name] = sub
try:
test()
except Exception as e:
glob.update(originals)
raise e
glob.update(originals)
def mark_as_numba(func):
func = pytest.mark.numba(func)
func = pytest.mark.skipif(numba is None, reason="Numba is missing")(func)
return func
@mark_as_numba
def test_return_1d_array():
# Functions which initialize an array, and then need to return the correct value
N = 30
zs = zs_orig = normalize([random() for i in range(N)])
MWs = [random()*200 for i in range(N)]
zs2 = np.array(zs)
MWs2 = np.array(MWs)
# Took the slightest performance hit to CPython only, 186 us original, 190 us revised
# at 1000 elements; no performance difference < 50 compounds
ws = zs_to_ws(zs, MWs)
ws_np = chemicals.numba.zs_to_ws(zs2, MWs2)
assert type(ws_np) is np.ndarray
assert_close1d(ws, ws_np)
zs = ws_to_zs(ws, MWs)
zs_np = chemicals.numba.ws_to_zs(ws_np, MWs2)
assert type(zs_np) is np.ndarray
assert_close1d(zs, zs_np)
# Treat MWs as Vfs; doesn't matter to math
Vfs = zs_to_Vfs(zs, MWs)
Vfs_np = chemicals.numba.zs_to_Vfs(zs2, MWs2)
assert type(Vfs_np) is np.ndarray
assert_close1d(Vfs, Vfs_np)
zs = Vfs_to_zs(Vfs, MWs)
zs_np = chemicals.numba.Vfs_to_zs(Vfs_np, MWs2)
assert type(Vfs_np) is np.ndarray
assert_close1d(zs, zs_np)
# Functions which have a return list comprehension
vals = [-2651.3181821109024, -2085.574403592012, -2295.0860830203587]
dxsn1 = chemicals.dxs_to_dxsn1(vals)
dxsn1_np = chemicals.numba.dxs_to_dxsn1(np.array(vals))
assert_close1d(dxsn1, dxsn1_np)
assert type(dxsn1_np) is np.ndarray
dxs, xs = [-0.0028, -0.00719, -0.00859], [0.7, 0.2, 0.1]
dns = dxs_to_dns(dxs, xs)
dns_np = chemicals.numba.dxs_to_dns(np.array(dxs), np.array(xs))
assert type(dns_np) is np.ndarray
assert_close1d(dns, dns_np)
dns = [0.001459, -0.002939, -0.004334]
dn_partials = dns_to_dn_partials(dns, -0.0016567)
dn_partials_np = chemicals.numba.dns_to_dn_partials(np.array(dns), -0.0016567)
assert type(dn_partials_np) is np.ndarray
assert_close1d(dn_partials_np, dn_partials)
dxs = [-0.0026404, -0.00719, -0.00859]
xs = [0.7, 0.2, 0.1]
F = -0.0016567
dn_partials = dxs_to_dn_partials(dxs, xs, F)
dn_partials_np = chemicals.numba.dxs_to_dn_partials(np.array(dxs), np.array(xs), F)
assert_close1d(dn_partials, dn_partials_np)
assert type(dn_partials_np) is np.ndarray
@mark_as_numba
def test_return_2d_array():
d2xs = [[0.152, 0.08, 0.547], [0.08, 0.674, 0.729], [0.547, 0.729, 0.131]]
xs = [0.7, 0.2, 0.1]
dxdn_partials = d2xs_to_dxdn_partials(d2xs, xs)
a, b = np.array(d2xs), np.array(xs)
dxdn_partials_np = chemicals.numba.d2xs_to_dxdn_partials(a, b)
assert type(dxdn_partials_np) is np.ndarray
assert_close1d(dxdn_partials, dxdn_partials_np)
@mark_as_numba
def test_mixing_simple():
a = np.array([1,2])
b = np.array([.1, .2])
tot = chemicals.numba.mixing_simple(a, b)
assert_close(tot, 0.5, rtol=1e-14)
a = np.array([.1, .9])
b = np.array([.01, .02])
val = chemicals.numba.mixing_logarithmic(a, b)
assert_close(val, 0.01866065983073615, rtol=1e-13)
@mark_as_numba
def test_dippr_correlations():
orders = (0, 1, -1, -1j)
args = (20, 33.19, 66.653, 6765.9, -123.63, 478.27)
for i in orders:
assert_close(chemicals.numba.EQ114(*args, order=i), chemicals.numba.EQ114(*args, order=i), rtol=1e-13)
args = (300, 276370., -2090.1, 8.125, -0.014116, 0.0000093701)
for i in orders:
assert_close(chemicals.numba.EQ100(*args, order=i), chemicals.numba.EQ100(*args, order=i), rtol=1e-13)
# EQ102 - numba-scipy does not support complex numbers so this does not work in numba
args = (300., 647.096, 17.863, 58.606, -95.396, 213.89, -141.26)
for i in orders:
assert_close(chemicals.numba.EQ116(*args, order=i), chemicals.numba.EQ116(*args, order=i), rtol=1e-13)
args = (20., 3.3258E4, 3.6199E4, 1.2057E3, 1.5373E7, 3.2122E3, -1.5318E7, 3.2122E3)
for i in orders:
assert_close(chemicals.numba.EQ127(*args, order=i), chemicals.numba.EQ127(*args, order=i), rtol=1e-13)
args = (300., 33363., 26790., 2610.5, 8896., 1169)
for i in orders:
assert_close(chemicals.numba.EQ107(*args, order=i), chemicals.numba.EQ107(*args, order=i), rtol=1e-13)
args = (300.0, 0.02222, -26.38, -16750000, -3.894E19, 3.133E21)
for i in orders:
assert_close(chemicals.numba.EQ104(*args, order=i), chemicals.numba.EQ104(*args, order=i), rtol=1e-13)
@mark_as_numba
def test_thermal_conductivity_misc():
assert_close(chemicals.numba.Bahadori_liquid(273.15, 170),
Bahadori_liquid(273.15, 170))
assert_close(chemicals.numba.Missenard(304., 6330E5, 591.8, 41E5, 0.129),
chemicals.Missenard(304., 6330E5, 591.8, 41E5, 0.129))
assert_close(chemicals.numba.DIPPR9H(np.array([0.258, 0.742]), np.array([0.1692, 0.1528])),
DIPPR9H([0.258, 0.742], [0.1692, 0.1528]))
assert_close(chemicals.numba.Filippov(np.array([0.258, 0.742]), np.array([0.1692, 0.1528])),
Filippov([0.258, 0.742], [0.1692, 0.1528]))
assert_close(chemicals.numba.DIPPR9B(200., 28.01, 20.826, 1.277E-5, 132.92, chemtype='linear'),
chemicals.DIPPR9B(200., 28.01, 20.826, 1.277E-5, 132.92, chemtype='linear'))
assert_close(chemicals.numba.Eli_Hanley(T=373.15, MW=72.151, Tc=460.4, Vc=3.06E-4, Zc=0.267, omega=0.227, Cvm=135.9),
chemicals.Eli_Hanley(T=373.15, MW=72.151, Tc=460.4, Vc=3.06E-4, Zc=0.267, omega=0.227, Cvm=135.9))
assert_close(chemicals.numba.Eli_Hanley_dense(T=473., MW=42.081, Tc=364.9, Vc=1.81E-4, Zc=0.274, omega=0.144, Cvm=82.70, Vm=1.721E-4),
chemicals.Eli_Hanley_dense(T=473., MW=42.081, Tc=364.9, Vc=1.81E-4, Zc=0.274, omega=0.144, Cvm=82.70, Vm=1.721E-4))
assert_close(chemicals.numba.Chung_dense(T=473., MW=42.081, Tc=364.9, Vc=184.6E-6, omega=0.142, Cvm=82.67, Vm=172.1E-6, mu=134E-7, dipole=0.4),
chemicals.Chung_dense(T=473., MW=42.081, Tc=364.9, Vc=184.6E-6, omega=0.142, Cvm=82.67, Vm=172.1E-6, mu=134E-7, dipole=0.4))
# Does not work - atom input
# chemicals.numba.Mersmann_Kind_thermal_conductivity_liquid(400, 170.33484, 658.0, 0.000754, {'C': 12, 'H': 26})
@mark_as_numba
def test_viscosity_misc():
assert_close(chemicals.mu_IAPWS(T=647.35, rho=222, drho_dP=175.456980972231e-6, drho_dP_Tr=3.119177410324e-6),
chemicals.numba.mu_IAPWS(T=647.35, rho=222, drho_dP=175.456980972231e-6, drho_dP_Tr=3.119177410324e-6), rtol=1e-13)
assert_close(chemicals.mu_IAPWS(T=647.35, rho=222, drho_dP=175.456980972231e-6),
chemicals.numba.mu_IAPWS(T=647.35, rho=222, drho_dP=175.456980972231e-6), rtol=1e-13)
assert_close(chemicals.mu_IAPWS(T=647.35, rho=222),
chemicals.numba.mu_IAPWS(T=647.35, rho=222), rtol=1e-13)
# Has a min, if statement
args = (300., 500E5, 572.2, 34.7E5, 0.236, 0, 0.00068)
ans = chemicals.numba.Lucas(*args)
ans_base = chemicals.viscosity.Lucas(*args)
assert_close(ans, ans_base, rtol=1e-14)
# There is a dict lokup but it is not always needed
new = Lucas_gas(T=550., Tc=512.6, Pc=80.9E5, Zc=0.224, MW=32.042, dipole=1.7)
fast = chemicals.numba.Lucas_gas(T=550., Tc=512.6, Pc=80.9E5, Zc=0.224, MW=32.042, dipole=1.7)
assert_close(new, fast, rtol=1e-12)
# Test the dict lookup has been turned into a couple if statements - not suitable for large
# tables but for three elements it is just as fast as a dict lookup
kwargs = dict(T=6, Tc=5.1889, Pc=226968.0, Zc=0.3014, MW=4.002602, CASRN='7440-59-7')
assert_close(chemicals.numba.Lucas_gas(**kwargs), Lucas_gas(**kwargs), rtol=1e-14)
# A couple of points with Herning-Sipperer; works fine
zs = np.array([0.5, 0.25, 0.25]*10)
mus = np.array([1.78e-05, 1.12e-05, 9.35e-06]*10)
MWs = np.array([28.0134, 16.043, 30.07]*10)
fast = chemicals.numba.Herning_Zipperer(zs, mus, MWs)
base = chemicals.Herning_Zipperer(zs.tolist(), mus.tolist(), MWs.tolist())
assert_close(fast, base, rtol=1e-14)
# Function calling other functions
n = 1
zs = np.array([.4, .3, .3]*n)
MWs = np.array([16.04246, 30.06904, 44.09562]*n)
Tcs = np.array([190.564, 305.32, 369.83]*n)
Pcs = np.array([4599000.0, 4872000.0, 4248000.0]*n)
Vcs = np.array([9.86e-05, 0.0001455, 0.0002]*n)
mu = chemicals.numba.Lorentz_Bray_Clarke(T=300.0, P=1e6, Vm=0.0023025, zs=zs, MWs=MWs, Tcs=Tcs, Pcs=Pcs, Vcs=Vcs)
assert_close(mu, 9.925488160761484e-06, rtol=1e-14)
# Viscosity index - works beautifully
assert_close(chemicals.numba.viscosity_index(73.3E-6, 8.86E-6, rounding=False),
chemicals.viscosity_index(73.3E-6, 8.86E-6, rounding=False), rtol=1e-14)
assert_close(chemicals.numba.viscosity_index(73.3E-6, 8.86E-6, rounding=True),
chemicals.viscosity_index(73.3E-6, 8.86E-6, rounding=True), rtol=1e-14)
@mark_as_numba
def test_interface_misc():
# Tested quite a bit with numba/PyPy
# At first numba had 3x the speed, but then I made the optimizations by hand
# I knew were possible. Their speed is about equal after, with a slight edge up
# by numba with large arrays
n = 1
xs = np.array([0.1606, 0.8394]*n)
xs /= sum(xs)
sigmas = np.array([0.01547, 0.02877]*n)
rhoms = np.array([8610., 15530.]*n)
xs2, sigmas2, rhoms2 = xs.tolist(), sigmas.tolist(), rhoms.tolist()
assert_close(chemicals.numba.Winterfeld_Scriven_Davis(xs, sigmas, rhoms),
Winterfeld_Scriven_Davis(xs2, sigmas2, rhoms2))
n = 1
xs = np.array([0.1606, 0.8394]*n)
sigmas_Tb = np.array([0.01424, 0.02530]*n)
Tbs = np.array([309.21, 312.95]*n)
Tcs = np.array([469.7, 508.0]*n)
assert_close(chemicals.Diguilio_Teja(T=298.15, xs=xs,sigmas_Tb=sigmas_Tb, Tbs=Tbs, Tcs=Tcs),
chemicals.numba.Diguilio_Teja(T=298.15, xs=xs,sigmas_Tb=sigmas_Tb, Tbs=Tbs, Tcs=Tcs), rtol=1e-12)
# Exception is correctly raised with numba
with pytest.raises(ValueError):
chemicals.numba.Diguilio_Teja(T=1000, xs=xs,sigmas_Tb=sigmas_Tb, Tbs=Tbs, Tcs=Tcs)
@mark_as_numba
def test_virial():
Z = chemicals.numba.Z_from_virial_pressure_form(102919.99946855308, 4.032286555169439e-09, 1.6197059494442215e-13, 6.483855042486911e-19)
assert_close(Z, 1.00283753944, rtol=1e-13)
# # Takes 8 seconds to compile. Fun!
# assert_close(chemicals.numba.BVirial_Tsonopoulos_extended(430., 405.65, 11.28E6, 0.252608, a=0, b=0, species_type='ketone', dipole=1.469),
# chemicals.BVirial_Tsonopoulos_extended(430., 405.65, 11.28E6, 0.252608, a=0, b=0, species_type='ketone', dipole=1.469),
# rtol=1e-13)
@mark_as_numba
def test_phase_change():
# Function had some duplicated powers; numba was optimizing them on me anyway
# Had list-in-list constants being indexed. I thought that would take a lot of time
# but instead removing it only saved 25%, and ~8% in CPython, and zilch in PyPy.
# PyPy takes 19% of the time numba does here, numba has a high overhead.
assert_close(chemicals.numba.MK(553.15, 751.35, 0.302),
chemicals.MK(553.15, 751.35, 0.302), rtol=1e-12)
@mark_as_numba
def test_vapor_pressure():
# PyPy 75 ns, CPython 2470 ns, numba 214 ns
assert_close(chemicals.numba.dPsat_IAPWS_dT(300.),
chemicals.dPsat_IAPWS_dT(300.), rtol=1e-14)
Psats_vec_expect = [34478.367349639906, 33596697.716487624, 109799836.81382856, 179376011.49286702, 234627689.09298804]
Ts = np.linspace(100, 1000, 5)
Psats_calc = chemicals.numba_vectorized.Antoine(Ts, 8.7687, 395.744, -6.469, 10)
assert_close(Psats_calc, Psats_vec_expect, rtol=1e-11)
@mark_as_numba
def test_temperature():
# Note also the last four decimals are different!
# 494 us numba, 388 us PyPy, 1740 us CPython
assert_close(chemicals.numba.ITS90_68_difference(1000.),
chemicals.ITS90_68_difference(1000.0), rtol=1e-12)
# Probably never going to work
# chemicals.numba.T_converter(500, 'ITS-68', 'ITS-48')
@mark_as_numba
def test_critical():
assert_close(chemicals.numba.Li(np.array([0.6449, 0.2359, 0.1192]), np.array([425.12, 469.7, 507.6]),np.array([0.000255, 0.000313, 0.000371])),
Li([0.6449, 0.2359, 0.1192], [425.12, 469.7, 507.6], [0.000255, 0.000313, 0.000371]), rtol=1e-13)
assert_close(chemicals.numba.Chueh_Prausnitz_Tc(np.array([0.6449, 0.2359, 0.1192]), np.array([425.12, 469.7, 507.6]),
np.array([0.000255, 0.000313, 0.000371]), np.array([[0, 1.92681, 6.80358],
[1.92681, 0, 1.89312], [ 6.80358, 1.89312, 0]])),
Chueh_Prausnitz_Tc([0.6449, 0.2359, 0.1192], [425.12, 469.7, 507.6],
[0.000255, 0.000313, 0.000371], [[0, 1.92681, 6.80358],
[1.92681, 0, 1.89312], [ 6.80358, 1.89312, 0]]), rtol=1e-13)
zs = np.array([0.6449, 0.2359, 0.1192])
Tcs = np.array([425.12, 469.7, 507.6])
Aijs = np.array([[0, 1.2503, 1.516], [0.799807, 0, 1.23843], [0.659633, 0.807474, 0]])
assert_close(chemicals.numba.Grieves_Thodos(zs, Tcs, Aijs),
Grieves_Thodos(zs, Tcs, Aijs), rtol=1e-12)
Aijs = np.array([[0, 1.174450, 1.274390], [0.835914, 0, 1.21038], [0.746878, 0.80677, 0]])
assert_close(chemicals.numba.modified_Wilson_Tc(zs, Tcs, Aijs),
modified_Wilson_Tc(zs, Tcs, Aijs), rtol=1e-12)
assert_close(chemicals.numba.Chueh_Prausnitz_Vc(np.array([0.4271, 0.5729]), np.array([0.000273, 0.000256]), np.array([[0, 5.61847], [5.61847, 0]])),
Chueh_Prausnitz_Vc([0.4271, 0.5729], [0.000273, 0.000256], [[0, 5.61847], [5.61847, 0]]), rtol=1e-13)
assert_close(chemicals.numba.modified_Wilson_Vc(np.array([0.4271, 0.5729]), np.array([0.000273, 0.000256]), np.array([[0, 0.6671250], [1.3939900, 0]])),
modified_Wilson_Vc([0.4271, 0.5729], [0.000273, 0.000256], [[0, 0.6671250], [1.3939900, 0]]), rtol=1e-13)
# Not working yet: Ihmels, Meissner, Grigoras, critical_surface_methods
# Maybe a future numba update will make this work.
@mark_as_numba
def test_volume():
assert_close(chemicals.numba.Yen_Woods_saturation(300, 647.14, 55.45E-6, 0.245),
chemicals.Yen_Woods_saturation(300, 647.14, 55.45E-6, 0.245))
assert_close(chemicals.numba.COSTALD(272.03889, 369.83333, 0.20008161E-3, 0.1532),
chemicals.COSTALD(272.03889, 369.83333, 0.20008161E-3, 0.1532))
assert_close(chemicals.numba.Bhirud_normal(280.0, 469.7, 33.7E5, 0.252),
Bhirud_normal(280.0, 469.7, 33.7E5, 0.252))
assert_close(chemicals.numba.SNM0(121, 150.8, 7.49e-05, -0.004),
SNM0(121, 150.8, 7.49e-05, -0.004))
assert_close(chemicals.numba.SNM0(121, 150.8, 7.49e-05, -0.004, -0.03259620),
SNM0(121, 150.8, 7.49e-05, -0.004, -0.03259620))
kwargs = dict(T=405.45, Tb=239.82, Tc=405.45, Pc=111.7*101325, MW=17.03, dipole=None)
assert_close(chemicals.numba.Campbell_Thodos(**kwargs),
Campbell_Thodos(**kwargs))
# Test a slow one
# 81.2 us orig, then 67.6 after optimizations in CPython
# numba: 2.25 µs, PYPY: 1.31; numba with numpy: 4 us
N = 100
xs = [0.4576, 0.5424]*N
MWs = [32.04, 18.01]*N
Tcs = [512.58, 647.29]*N
Pcs = [8.096E6, 2.209E7]*N
Zrs = [0.2332, 0.2374]*N
xs2 = np.array(xs)
MWs2 = np.array(MWs)
Tcs2 = np.array(Tcs)
Pcs2 = np.array(Pcs)
Zrs2 = np.array(Zrs)
orig = Rackett_mixture(T=298., xs=xs, MWs=MWs, Tcs=Tcs, Pcs=Pcs, Zrs=Zrs)
new = chemicals.numba.Rackett_mixture(T=298., xs=xs2, MWs=MWs2, Tcs=Tcs2, Pcs=Pcs2, Zrs=Zrs2)
assert_close(orig, new)
# Test COSTALD_mixture - even slower
# timing after optimization at 200 elements - 1.49 m CPython, 27.1 µs numba, 63.5 µs PyPy3, 71.4 us PyPy2
T = 300.0
N = 15
xs = normalize([0.4576, 0.5424]*N)
Tcs = [512.58, 647.29]*N
Vcs = [0.000117, 5.6e-05]*N
omegas = [0.559,0.344]*N
xs2 = np.array(xs)
Tcs2 = np.array(Tcs)
Vcs2 = np.array(Vcs)
omegas2 = np.array(omegas)
assert_close(COSTALD_mixture(xs, T, Tcs, Vcs, omegas),
chemicals.numba.COSTALD_mixture(xs2, T, Tcs2, Vcs2, omegas2))
@mark_as_numba
def test_solbility():
assert_close(Henry_converter(1.2e-5, old_scale='Hcp', new_scale='SI', rhom=55344.59, MW=18.01528),
chemicals.numba.Henry_converter(1.2e-5, old_scale='Hcp', new_scale='SI', rhom=55344.59, MW=18.01528))
@mark_as_numba
def test_refractivity():
assert_close(brix_to_RI(5.8), chemicals.numba.brix_to_RI(5.8))
@mark_as_numba
def test_rachford_rice():
n = 10
zs = np.array([0.5, 0.3, 0.2]*n)
Ks = np.array([1.685, 0.742, 0.532]*n)
assert_close(chemicals.numba.Rachford_Rice_flash_error(0.5, zs=zs, Ks=Ks),
Rachford_Rice_flash_error(0.5, zs=zs, Ks=Ks))
zs = np.array([0.5, 0.3, 0.2])
Ks = np.array([1.685, 0.742, 0.532])
VF_new, xs_new, ys_new = chemicals.numba.flash_inner_loop(zs=zs, Ks=Ks)
VF, xs, ys = flash_inner_loop(zs=zs.tolist(), Ks=Ks.tolist())
assert_close(VF, VF_new)
assert_close1d(xs, xs_new)
assert_close1d(ys, ys_new)
@mark_as_numba
def test_Rachford_Rice_solutionN():
ns = [0.204322076984, 0.070970999150, 0.267194323384, 0.296291964579, 0.067046080882, 0.062489248292, 0.031685306730]
Ks_y = [1.23466988745, 0.89727701141, 2.29525708098, 1.58954899888, 0.23349348597, 0.02038108640, 1.40715641002]
Ks_z = [1.52713341421, 0.02456487977, 1.46348240453, 1.16090546194, 0.24166289908, 0.14815282572, 14.3128010831]
ns2, Ks2, betas2 = np.array(ns), np.array([Ks_y, Ks_z]), np.array([.1, .6])
betas_new, zs_new = chemicals.numba.Rachford_Rice_solutionN(ns2, Ks2, betas2)
betas, zs = Rachford_Rice_solutionN(ns, [Ks_y, Ks_z], [.1, .6])
assert_close1d(betas, betas_new, rtol=1e-14)
assert_close2d(zs, zs_new, rtol=1e-14)
@mark_as_numba
def test_Rachford_Rice_solution2():
ns = [0.204322076984, 0.070970999150, 0.267194323384, 0.296291964579, 0.067046080882, 0.062489248292, 0.031685306730]
Ks_y = [1.23466988745, 0.89727701141, 2.29525708098, 1.58954899888, 0.23349348597, 0.02038108640, 1.40715641002]
Ks_z = [1.52713341421, 0.02456487977, 1.46348240453, 1.16090546194, 0.24166289908, 0.14815282572, 14.3128010831]
ns2, Ksy2, Ksz2 = np.array(ns), np.array(Ks_y), np.array(Ks_z)
beta0_new, beta1_new, z0_new, z1_new, z2_new = chemicals.numba.Rachford_Rice_solution2(ns2, Ksy2, Ksz2, beta_y=.1, beta_z=.6)
beta0, beta1, z0, z1, z2 = Rachford_Rice_solution2(ns, Ks_y, Ks_z, beta_y=.1, beta_z=.6)
assert_close(beta0_new, beta0)
assert_close(beta1_new, beta1)
assert_close1d(z0, z0_new)
assert_close1d(z1, z1_new)
assert_close1d(z2, z2_new)
@mark_as_numba
def test_rachford_rice_polynomial():
zs, Ks = [.4, .6], [2, .5]
VF_new, xs_new, ys_new = chemicals.numba.Rachford_Rice_solution_polynomial(np.array(zs), np.array(Ks))
VF, xs, ys = Rachford_Rice_solution_polynomial(zs, Ks)
assert_close(VF, VF_new)
assert_close1d(xs, xs_new)
assert_close1d(ys, ys_new)
zs = [0.5, 0.3, 0.2]
Ks = [1.685, 0.742, 0.532]
VF_new, xs_new, ys_new = chemicals.numba.Rachford_Rice_solution_polynomial(np.array(zs), np.array(Ks))
VF, xs, ys = Rachford_Rice_solution_polynomial(zs, Ks)
assert_close(VF, VF_new)
assert_close1d(xs, xs_new)
assert_close1d(ys, ys_new)
zs = [0.2, 0.3, 0.4, 0.1]
Ks = [2.5250, 0.7708, 1.0660, 0.2401]
VF_new, xs_new, ys_new = chemicals.numba.Rachford_Rice_solution_polynomial(np.array(zs), np.array(Ks))
VF, xs, ys = Rachford_Rice_solution_polynomial(zs, Ks)
assert_close(VF, VF_new)
assert_close1d(xs, xs_new)
assert_close1d(ys, ys_new)
zs = [0.2, 0.3, 0.4, 0.05, 0.05]
Ks = [2.5250, 0.7708, 1.0660, 0.2401, 0.3140]
VF_new, xs_new, ys_new = chemicals.numba.Rachford_Rice_solution_polynomial(np.array(zs), np.array(Ks))
VF, xs, ys = Rachford_Rice_solution_polynomial(zs, Ks)
assert_close(VF, VF_new)
assert_close1d(xs, xs_new)
assert_close1d(ys, ys_new)
# 6 and higher use generic routine
zs = [0.05, 0.10, 0.15, 0.30, 0.30, 0.10]
Ks = [6.0934, 2.3714, 1.3924, 1.1418, 0.6457, 0.5563]
VF_new, xs_new, ys_new = chemicals.numba.Rachford_Rice_solution_polynomial(np.array(zs), np.array(Ks))
VF, xs, ys = Rachford_Rice_solution_polynomial(zs, Ks)
assert_close(VF, VF_new)
assert_close1d(xs, xs_new)
assert_close1d(ys, ys_new)
@mark_as_numba
def test_lazy_loading():
# Numba interfers with to_num
# The data_reader functions are not part of the public API so are not converted
chemicals.numba.heat_capacity.zabransky_dicts
chemicals.numba.heat_capacity.CRC_standard_data
assert 'jitclass' in str(chemicals.numba.heat_capacity.ZabranskySpline)
assert 'jitclass' in str(chemicals.numba.heat_capacity.ZabranskyQuasipolynomial)
assert 'jitclass' in str(chemicals.numba.heat_capacity.zabransky_dict_iso_s['2016-57-1'].models[0])
@mark_as_numba
def test_safety_functions():
import test_safety
swap_funcs_and_test(['NFPA_30_classification'],
[chemicals.numba.NFPA_30_classification],
test_safety.test_NFPA_30_classification)
| [
"[email protected]"
] | |
bba3cbf765243f23c4a7e1d0c54c19cce2b7e9b6 | 08ee36e0bb1c250f7f2dfda12c1a73d1984cd2bc | /src/mnistk/networks/conv1dthenlinear_81.py | da2fc2b67de2ce6afe44794e2c90add3e214fc37 | [] | no_license | ahgamut/mnistk | 58dadffad204602d425b18549e9b3d245dbf5486 | 19a661185e6d82996624fc6fcc03de7ad9213eb0 | refs/heads/master | 2021-11-04T07:36:07.394100 | 2021-10-27T18:37:12 | 2021-10-27T18:37:12 | 227,103,881 | 2 | 1 | null | 2020-02-19T22:07:24 | 2019-12-10T11:33:09 | Python | UTF-8 | Python | false | false | 1,094 | py | # -*- coding: utf-8 -*-
"""
conv1dthenlinear_81.py
:copyright: (c) 2019 by Gautham Venkatasubramanian.
:license: MIT
"""
import torch
from torch import nn
class Conv1dThenLinear_81(nn.Module):
def __init__(self):
nn.Module.__init__(self)
self.f0 = nn.Conv1d(in_channels=16, out_channels=41, kernel_size=(30,), stride=(1,), padding=(0,), dilation=(1,), groups=1, bias=True, padding_mode='zeros')
self.f1 = nn.Conv1d(in_channels=41, out_channels=10, kernel_size=(20,), stride=(1,), padding=(0,), dilation=(1,), groups=1, bias=False, padding_mode='zeros')
self.f2 = nn.Linear(in_features=10, out_features=113, bias=False)
self.f3 = nn.Sigmoid()
self.f4 = nn.Linear(in_features=113, out_features=10, bias=False)
self.f5 = nn.LogSoftmax(dim=1)
def forward(self, *inputs):
x = inputs[0]
x = x.view(x.shape[0],16,49)
x = self.f0(x)
x = self.f1(x)
x = x.view(x.shape[0],10)
x = self.f2(x)
x = self.f3(x)
x = self.f4(x)
x = self.f5(x)
return x
| [
"[email protected]"
] | |
8aee48b71c0ebb2d53999918e1c552b0a87ce133 | 72409ee3ffad4d865bfd900ba989a0756ff12e24 | /time_series_detector/algorithm/xgboosting.py | e9ec290e6904b48ed32c1af6aaa202fbf2131f15 | [] | no_license | ncucjm/ts_detector | 559cb5b25932e1a46aac2966fc0b031382080b11 | 742f4026a6da89331b9d6e46ae6ae4e2ea697215 | refs/heads/master | 2020-07-06T01:04:41.299625 | 2019-08-30T15:07:14 | 2019-08-30T15:07:14 | 202,840,403 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,709 | py | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
import os
import xgboost as xgb
from time_series_detector.feature import feature_service
from time_series_detector.common.tsd_errorcode import *
from time_series_detector.common.tsd_common import *
MODEL_PATH = os.path.join(os.path.dirname(__file__), '../model/')
DEFAULT_MODEL = MODEL_PATH + "xgb_default_model"
class XGBoosting(object):
"""
XGBoost is an optimized distributed gradient boosting library designed to be highly efficient,
flexible and portable. It implements machine learning algorithms under the Gradient Boosting framework.
XGBoost provides a parallel tree boosting (also known as GBDT, GBM) that solve many data science problems
in a fast and accurate way. The same code runs on major distributed environment (Hadoop, SGE, MPI)
and can solve problems beyond billions of examples.
https://github.com/dmlc/xgboost
"""
def __init__(self,
threshold=0.15,
max_depth=10,
eta=0.05,
gamma=0.1,
silent=1,
min_child_weight=1,
subsample=0.8,
colsample_bytree=1,
booster='gbtree',
objective='binary:logistic',
eval_metric='auc'):
"""
:param threshold: The critical point of normal.
:param max_depth: Maximum tree depth for base learners.
:param eta: Value means model more robust to overfitting but slower to compute.
:param gamma: Minimum loss reduction required to make a further partition on a leaf node of the tree.
:param silent: If 1, it will print information about performance. If 2, some additional information will be printed out.
:param min_child_weight: Minimum sum of instance weight(hessian) needed in a child.
:param subsample: Subsample ratio of the training instance.
:param colsample_bytree: Subsample ratio of columns when constructing each tree.
:param booster: Specify which booster to use: gbtree, gblinear or dart.
:param objective: Specify the learning task and the corresponding learning objective or a custom objective function to be used (see note below).
:param eval_metric: If a str, should be a built-in evaluation metric to use. See doc/parameter.md. If callable, a custom evaluation metric.
"""
self.threshold = threshold
self.max_depth = max_depth
self.eta = eta
self.gamma = gamma
self.silent = silent
self.min_child_weight = min_child_weight
self.subsample = subsample
self.colsample_bytree = colsample_bytree
self.booster = booster
self.objective = objective
self.eval_metric = eval_metric
def __save_libsvm_format(self, data, feature_file_name):
"""
Save the time features to libsvm format.
:param data: feature values
:param file_name: file saves the time features and label
"""
try:
f = open(feature_file_name, "w")
except Exception as ex:
return TSD_CAL_FEATURE_ERR, str(ex)
times = 0
for temp in data:
if times > 0:
f.write("\n")
result = ['{0}:{1}'.format(int(index) + 1, value) for index, value in enumerate(temp[0])]
f.write(str(temp[1]))
for x in result:
f.write(' ' + x)
times = times + 1
return TSD_OP_SUCCESS, ""
def __calculate_features(self, data, feature_file_name, window=DEFAULT_WINDOW):
"""
Caculate time features and save as libsvm format.
:param data: the time series to detect of
:param feature_file_name: the file to use
:param window: the length of window
"""
features = []
for index in data:
if is_standard_time_series(index["data"], window):
temp = []
temp.append(feature_service.extract_features(index["data"], window))
temp.append(index["flag"])
features.append(temp)
try:
ret_code, ret_data = self.__save_libsvm_format(features, feature_file_name)
except Exception as ex:
ret_code = TSD_CAL_FEATURE_ERR
ret_data = str(ex)
return ret_code, ret_data
def xgb_train(self, data, task_id, num_round=300):
"""
Train an xgboost model.
:param data: Training dataset.
:param task_id: The id of the training task.
:param num_round: Max number of boosting iterations.
"""
model_name = MODEL_PATH + task_id + "_model"
feature_file_name = MODEL_PATH + task_id + "_features"
ret_code, ret_data = self.__calculate_features(data, feature_file_name)
if ret_code != TSD_OP_SUCCESS:
return ret_code, ret_data
try:
dtrain = xgb.DMatrix(feature_file_name)
except Exception as ex:
return TSD_READ_FEATURE_FAILED, str(ex)
params = {
'max_depth': self.max_depth,
'eta': self.eta,
'gamma': self.gamma,
'silent': self.silent,
'min_child_weight': self.min_child_weight,
'subsample': self.subsample,
'colsample_bytree': self.colsample_bytree,
'booster': self.booster,
'objective': self.objective,
'eval_metric': self.eval_metric,
}
try:
bst = xgb.train(params, dtrain, num_round)
bst.save_model(model_name)
except Exception as ex:
return TSD_TRAIN_ERR, str(ex)
return TSD_OP_SUCCESS, ""
def predict(self, X, window=DEFAULT_WINDOW, model_name=DEFAULT_MODEL):
"""
:param X: the time series to detect of
:type X: pandas.Series
:param window: the length of window
:param model_name: Use a xgboost model to predict a particular sample is an outlier or not.
:return 1 denotes normal, 0 denotes abnormal.
"""
if is_standard_time_series(X, window):
ts_features = []
features = [10]
features.extend(feature_service.extract_features(X, window))
ts_features.append(features)
res_pred = xgb.DMatrix(np.array(ts_features))
bst = xgb.Booster({'nthread': 4})
bst.load_model(model_name)
xgb_ret = bst.predict(res_pred)
if xgb_ret[0] < self.threshold:
value = 0
else:
value = 1
return [value, xgb_ret[0]]
else:
return [0, 0]
| [
"[email protected]"
] | |
5249901142f31f7c35f886b5c7193b60b5816526 | fb1e852da0a026fb59c8cb24aeb40e62005501f1 | /kosmos-2/torchscale/examples/fairseq/tasks/data/lm_loader.py | 6be575239088da94981dd64c5f831ab4cd96fc5f | [
"MIT",
"LicenseRef-scancode-generic-cla",
"LicenseRef-scancode-unknown-license-reference",
"LGPL-2.1-or-later",
"LicenseRef-scancode-free-unknown",
"Apache-2.0"
] | permissive | microsoft/unilm | 134aa44867c5ed36222220d3f4fd9616d02db573 | b60c741f746877293bb85eed6806736fc8fa0ffd | refs/heads/master | 2023-08-31T04:09:05.779071 | 2023-08-29T14:07:57 | 2023-08-29T14:07:57 | 198,350,484 | 15,313 | 2,192 | MIT | 2023-08-19T11:33:20 | 2019-07-23T04:15:28 | Python | UTF-8 | Python | false | false | 13,974 | py | import glob
import os
import torch
import numpy as np
import time
import json
import random
import itertools
import hydra
import copy
from omegaconf import DictConfig, OmegaConf
from infinibatch import iterators
from .basic_loader import BaseBatchGen
from .utils import NativeCheckpointableIterator, WeightIterator, EOL_SYMBOL
from .utils import safe_getattr, safe_hasattr
class LMLoader(BaseBatchGen):
def __init__(
self,
args,
dataset,
dictionary,
tokenizer,
max_tokens=None,
max_sentences=None,
max_positions=None,
ignore_invalid_inputs=False,
required_batch_size_multiple=1,
seed=1,
epoch=1,
num_shards=1,
shard_id=0,
disable_prefetching=False,
data_name='gpt',
):
super().__init__()
self.args = args
self.data = dataset.data
self.data_dir = dataset.data_dir
self.shuffle = dataset.shuffle
self.dictionary = dictionary
self.tokenizer = tokenizer
self.max_tokens = max_tokens
self.max_sentences = max_sentences
self.max_positions = max_positions
self.tokens_per_sample = args.tokens_per_sample
self.mlm_cut_length = safe_getattr(args, "mlm_cut_length", 0)
self.mlm_tokens_proportion = safe_getattr(args, "mlm_tokens_proportion", 0)
self.pad_to_max_len = safe_getattr(args, "pad_to_max_len", False)
self.ignore_invalid_inputs = ignore_invalid_inputs
self.required_batch_size_multiple = required_batch_size_multiple
self.seed = str(seed)
self.epoch = epoch
self.num_shards = num_shards
self.shard_id = shard_id
self.batch_read_ahead = args.batch_read_ahead
self.disable_prefetching = disable_prefetching
self.data_name = data_name
self._setup()
self._build_iter()
def _setup(self):
pass
def _build_iter(self):
tokenized_lines = self._tokenize()
self.padded_batches = self._batchify(tokenized_lines)
if self.disable_prefetching:
prefetch_batches = self.padded_batches
else:
prefetch_batches = iterators.PrefetchIterator(
self.padded_batches,
buffer_size=10000,
buffer_in_main_process=True,
log_empty_buffer_warning=True and self.shard_id == 0,
)
prefetch_batches = iterators.MapIterator(
prefetch_batches, self._move_to_tensor
)
self._iter = prefetch_batches
def _tokenize(self):
'''
data:
{
'source': list[Path],
}
'''
dataset = list(zip(self.data['source']))
if self.shuffle:
chunk_files = \
iterators.InfinitePermutationSourceIterator(
dataset,
seed=self.seed,
shuffle=self.shuffle,
num_instances=self.num_shards,
instance_rank=self.shard_id,
)
else:
chunk_files = \
iterators.ChunkedSourceIterator(
dataset,
num_instances=self.num_shards,
instance_rank=self.shard_id,
)
tokenized_lines = iterators.SelectManyIterator(chunk_files, lambda files: self._read_from_files(*files))
tokenized_lines = iterators.SamplingRandomMapIterator(tokenized_lines, self._prepare, self.seed)
return tokenized_lines
def getstate(self):
state = super().getstate()
state["epoch"] = self.epoch
state["iterations_in_epoch"] = None
return state
def _batchify(self, lines):
if self.max_sentences is not None:
if self.batch_read_ahead > 0:
lines = iterators.BlockwiseShuffleIterator(lines, self.batch_read_ahead, self.seed)
batches = iterators.FixedBatchIterator(lines, self.max_sentences)
else:
# -
def dynamic_batch_size(sample):
lengths = [len(x) for x in sample]
batch_size = self.max_tokens // max(lengths) // self.required_batch_size_multiple * self.required_batch_size_multiple
return max(1, batch_size)
batches = iterators.BucketedReadaheadBatchIterator(
lines,
read_ahead=self.batch_read_ahead,
key=(lambda x: max(len(x[0]), len(x[1]))) if self.shuffle else None,
batch_size=dynamic_batch_size,
shuffle=self.shuffle,
seed=self.seed,
)
def collate(batch):
batch_size = len(batch)
mlm_batch_size = sum([len(x[2]) for x in batch])
gpt_max_length = max([len(x[0]) for x in batch])
if self.pad_to_max_len:
gpt_max_length = self.tokens_per_sample
mlm_max_length = 0
mlm_ntokens = 0
for x in batch:
for y in x[2]:
mlm_max_length = max(mlm_max_length, len(y))
mlm_ntokens += len(y)
gpt_source_ids = np.full(shape=(batch_size, gpt_max_length-1), dtype=np.int32,
fill_value=self.dictionary.pad())
gpt_target_ids = np.full(shape=(batch_size, gpt_max_length-1), dtype=np.int32,
fill_value=self.dictionary.pad())
mlm_source_ids = np.full(shape=(mlm_batch_size, mlm_max_length), dtype=np.int32,
fill_value=self.dictionary.pad())
gpt_input_mask_all = np.full(shape=(batch_size, gpt_max_length-1), dtype=np.int32, fill_value=0)
gpt_loss_mask_all = np.full(shape=(batch_size, gpt_max_length-1), dtype=np.int32, fill_value=1)
mlm_mask_all = np.full(shape=(mlm_batch_size, mlm_max_length), dtype=np.int32, fill_value=0)
mlm_index = 0
for i, (gpt_ids, gpt_input_mask, mlm_ids_list, mlm_mask_list, gpt_loss_mask) in enumerate(batch):
gpt_source_ids[i, :len(gpt_ids)-1] = gpt_ids[:-1]
gpt_target_ids[i, :len(gpt_ids)-1] = gpt_ids[1:]
gpt_input_mask_all[i, :len(gpt_ids)-1] = gpt_input_mask[:-1]
gpt_loss_mask_all[i, :len(gpt_ids)-1] = gpt_loss_mask[1:]
for j, (mlm_ids, mlm_mask) in enumerate(zip(mlm_ids_list, mlm_mask_list)):
mlm_source_ids[mlm_index, :len(mlm_ids)] = mlm_ids
mlm_mask_all[mlm_index, :len(mlm_mask)] = mlm_mask
mlm_index += 1
ret_batch = {
'text':{
'net_input': {
'src_tokens': gpt_source_ids.astype(np.int64),
'mlm_src_tokens': mlm_source_ids.astype(np.int64) if mlm_batch_size !=0 else None,
'gpt_input_mask': gpt_input_mask_all.astype(np.bool_),
'gpt_loss_mask': gpt_loss_mask_all.astype(np.bool_),
'mlm_mask': mlm_mask_all.astype(np.bool_) if mlm_batch_size !=0 else None
},
'target': gpt_target_ids.astype(np.int64),
'nsentences': batch_size,
'ntokens': sum([len(x[0]) for x in batch]),
'mlm_ntokens': mlm_ntokens
}
}
return ret_batch
def collate_for_gpt(batch):
batch_size = len(batch)
gpt_max_length = max([len(x[0]) for x in batch])
if self.pad_to_max_len:
gpt_max_length = self.tokens_per_sample
gpt_source_ids = np.full(shape=(batch_size, gpt_max_length-1), dtype=np.int32,
fill_value=self.dictionary.pad())
gpt_target_ids = np.full(shape=(batch_size, gpt_max_length-1), dtype=np.int32,
fill_value=self.dictionary.pad())
gpt_input_mask_all = np.full(shape=(batch_size, gpt_max_length-1), dtype=np.int32, fill_value=0)
gpt_loss_mask_all = np.full(shape=(batch_size, gpt_max_length-1), dtype=np.int32, fill_value=1)
for i, (gpt_ids, gpt_input_mask, mlm_ids_list, mlm_mask_list, gpt_loss_mask) in enumerate(batch):
gpt_source_ids[i, :len(gpt_ids)-1] = gpt_ids[:-1]
gpt_target_ids[i, :len(gpt_ids)-1] = gpt_ids[1:]
gpt_input_mask_all[i, :len(gpt_ids)-1] = gpt_input_mask[:-1]
gpt_loss_mask_all[i, :len(gpt_ids)-1] = gpt_loss_mask[1:]
ret_batch = {
self.data_name:{
'net_input': {
'src_tokens': gpt_source_ids.astype(np.int64),
},
'target': gpt_target_ids.astype(np.int64),
'nsentences': batch_size,
'ntokens': sum([len(x[0]) for x in batch]),
'mlm_ntokens': 0
}
}
return ret_batch
if self.mlm_tokens_proportion == 0:
padded_batches = iterators.MapIterator(
batches, collate_for_gpt
)
else:
padded_batches = iterators.MapIterator(
batches, collate
)
return padded_batches
def _prepare(self, _random, doc):
mlm_tokens, mlm_mask, gpt_input_mask, gpt_loss_mask = self._mlm_cut(_random, doc)
full_tokens = self._gpt(doc)
return full_tokens, gpt_input_mask, mlm_tokens, mlm_mask, gpt_loss_mask
def _mlm_cut(self, _random, doc):
eod_index = self.dictionary.indices[EOL_SYMBOL]
if self.mlm_tokens_proportion == 0:
mlm_tokens = []
mlm_mask = []
gpt_input_mask = [0] * len(doc)
gpt_loss_mask = [1] * len(doc)
return mlm_tokens, mlm_mask, gpt_input_mask, gpt_loss_mask
cut_start = np.arange(1, len(doc)-3/2*self.mlm_cut_length, self.mlm_cut_length, dtype=int)
_random.shuffle(cut_start)
mlm_tokens = []
mlm_mask = []
start_list = []
gpt_input_mask = np.zeros(len(doc), dtype=int)
gpt_loss_mask = np.ones(len(doc), dtype=int)
mlm_tokens_total_num = (len(doc)-1) * self.mlm_tokens_proportion
mlm_tokens_cur_num = 0
for start in cut_start:
eod_num = doc[start:start+self.mlm_cut_length].count(eod_index)
if eod_num >= 2:
continue
elif eod_num == 1:
eod_pos = doc[start:start+self.mlm_cut_length].index(eod_index)
if self.mlm_cut_length - eod_pos < 20:
continue
start_ind, end_ind = start+eod_pos+1, start + self.mlm_cut_length
else:
cut_pos = _random.randint(0, self.mlm_cut_length-1)
if cut_pos >= self.mlm_cut_length/2:
start_ind, end_ind = start, start + cut_pos + 1
else:
start_ind, end_ind = start + cut_pos, start + self.mlm_cut_length
assert eod_index not in doc[start_ind:end_ind]
start_list.append(start)
mlm_tokens.append([self.dictionary.bos()] + doc[start_ind:end_ind])
mlm_tokens_cur_num += end_ind - start_ind
mlm_mask.append([0] + [1]*(end_ind - start_ind))
gpt_input_mask[start_ind:end_ind] = 1
gpt_loss_mask[start_ind:end_ind-1] = 0
if mlm_tokens_cur_num > mlm_tokens_total_num:
break
ind = np.array(start_list).argsort()
start_list = np.array(start_list)[ind]
mlm_tokens = np.array(mlm_tokens, dtype=object)[ind]
mlm_mask = np.array(mlm_mask, dtype=object)[ind]
return mlm_tokens, mlm_mask, gpt_input_mask, gpt_loss_mask
def _gpt(self, doc):
return doc
def _read_from_files(self, source_file):
data = []
file_path = os.path.join(self.data_dir, source_file)
if not os.path.exists(file_path):
print('| file {} not exists'.format(file_path), flush=True)
return iter([]) # skip bad file
with open(file_path, 'r', encoding='utf8') as f:
lines = f.read().strip().split('\n')
gpt_format_text = []
for line in lines:
gpt_format_text.extend(list(filter(None, json.loads(line)["text"].split("\n"))))
gpt_format_text.append('')
tokenized_lines = [self.tokenizer.encode(line) for line in gpt_format_text]
tokenized_ids = [self.dictionary.encode_line(line, add_if_not_exist=False) for line in tokenized_lines]
doc = [self.dictionary.bos()]
for ids in tokenized_ids:
if len(ids) > self.tokens_per_sample: # drop too long sentence
continue
if len(doc) + len(ids) > self.tokens_per_sample:
if len(doc) > 5/2*self.mlm_cut_length + 1:
data.append(doc)
doc = [self.dictionary.bos()]
doc.extend(ids)
if len(doc) > 1 and len(doc) <= self.tokens_per_sample:
if len(doc) > 5/2*self.mlm_cut_length + 1:
data.append(doc)
return data | [
"[email protected]"
] | |
db8d15fe436a1605c48b2d2a6915384b202132f1 | c44a3227d1c2b3a892a9a52438a324e675485ff7 | /odp/ui/admin/views/providers.py | 0b97bea6fff0e6ea171b7e750a6c02b9312ef3de | [
"MIT"
] | permissive | SAEONData/Open-Data-Platform | 4b87aece6a83befd82a67f97d4ae330380c1f947 | 50c52bf476fd5c82afdf44379805f8790bb20319 | refs/heads/main | 2022-11-07T00:30:38.697706 | 2022-11-04T15:09:37 | 2022-11-04T15:09:37 | 251,641,495 | 2 | 1 | MIT | 2022-09-20T12:35:56 | 2020-03-31T15:12:19 | Python | UTF-8 | Python | false | false | 2,215 | py | from flask import Blueprint, flash, redirect, render_template, request, url_for
from odp.ui.admin.forms import ProviderForm
from odplib.const import ODPScope
from odplib.ui import api
bp = Blueprint('providers', __name__)
@bp.route('/')
@api.client(ODPScope.PROVIDER_READ)
def index():
page = request.args.get('page', 1)
providers = api.get(f'/provider/?page={page}')
return render_template('provider_list.html', providers=providers)
@bp.route('/<id>')
@api.client(ODPScope.PROVIDER_READ)
def view(id):
provider = api.get(f'/provider/{id}')
return render_template('provider_view.html', provider=provider)
@bp.route('/new', methods=('GET', 'POST'))
@api.client(ODPScope.PROVIDER_ADMIN)
def create():
form = ProviderForm(request.form)
if request.method == 'POST' and form.validate():
try:
api.post('/provider/', dict(
id=(id := form.id.data),
name=form.name.data,
))
flash(f'Provider {id} has been created.', category='success')
return redirect(url_for('.view', id=id))
except api.ODPAPIError as e:
if response := api.handle_error(e):
return response
return render_template('provider_edit.html', form=form)
@bp.route('/<id>/edit', methods=('GET', 'POST'))
@api.client(ODPScope.PROVIDER_ADMIN)
def edit(id):
provider = api.get(f'/provider/{id}')
form = ProviderForm(request.form, data=provider)
if request.method == 'POST' and form.validate():
try:
api.put('/provider/', dict(
id=id,
name=form.name.data,
))
flash(f'Provider {id} has been updated.', category='success')
return redirect(url_for('.view', id=id))
except api.ODPAPIError as e:
if response := api.handle_error(e):
return response
return render_template('provider_edit.html', provider=provider, form=form)
@bp.route('/<id>/delete', methods=('POST',))
@api.client(ODPScope.PROVIDER_ADMIN)
def delete(id):
api.delete(f'/provider/{id}')
flash(f'Provider {id} has been deleted.', category='success')
return redirect(url_for('.index'))
| [
"[email protected]"
] | |
038fbd532f9fd4dbb174c02e9e979f5807987c8e | 9cbe84017abd74dd4863c60c3438420aeaa4cb5b | /OcCo_Torch/models/pointnet_util.py | 223edb2e5970e591f491deb0d0fde065371aadb5 | [
"MIT"
] | permissive | zebrajack/OcCo | 3c7be8e4c46b61e0899c533c5c101dad56127a3f | c218a2bb446f91702cf8fa6f56bb3a1da406009f | refs/heads/master | 2023-04-30T08:15:48.189980 | 2020-12-29T10:49:21 | 2020-12-29T10:49:21 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,576 | py | # Copyright (c) 2020. Hanchen Wang, [email protected]
# Ref: https://github.com/fxia22/pointnet.pytorch/pointnet/model.py
import torch, torch.nn as nn, numpy as np, torch.nn.functional as F
from torch.autograd import Variable
def feature_transform_regularizer(trans):
d = trans.size()[1]
I = torch.eye(d)[None, :, :]
if trans.is_cuda:
I = I.cuda()
loss = torch.mean(torch.norm(torch.bmm(trans, trans.transpose(2, 1) - I), dim=(1, 2)))
return loss
# STN -> Spatial Transformer Network
class STN3d(nn.Module):
def __init__(self, channel):
super(STN3d, self).__init__()
self.conv1 = nn.Conv1d(channel, 64, 1) # in-channel, out-channel, kernel size
self.conv2 = nn.Conv1d(64, 128, 1)
self.conv3 = nn.Conv1d(128, 1024, 1)
self.fc1 = nn.Linear(1024, 512)
self.fc2 = nn.Linear(512, 256)
self.fc3 = nn.Linear(256, 9)
self.relu = nn.ReLU()
self.bn1 = nn.BatchNorm1d(64)
self.bn2 = nn.BatchNorm1d(128)
self.bn3 = nn.BatchNorm1d(1024)
self.bn4 = nn.BatchNorm1d(512)
self.bn5 = nn.BatchNorm1d(256)
def forward(self, x):
B = x.size()[0]
x = F.relu(self.bn1(self.conv1(x)))
x = F.relu(self.bn2(self.conv2(x)))
x = F.relu(self.bn3(self.conv3(x)))
x = torch.max(x, 2, keepdim=False)[0] # global descriptors
x = F.relu(self.bn4(self.fc1(x)))
x = F.relu(self.bn5(self.fc2(x)))
x = self.fc3(x)
iden = Variable(torch.from_numpy(np.eye(3).flatten().astype(np.float32))).view(1, 9).repeat(B, 1)
if x.is_cuda:
iden = iden.cuda()
x = x + iden
x = x.view(-1, 3, 3)
return x
class STNkd(nn.Module):
def __init__(self, k=64):
super(STNkd, self).__init__()
self.conv1 = nn.Conv1d(k, 64, 1)
self.conv2 = nn.Conv1d(64, 128, 1)
self.conv3 = nn.Conv1d(128, 1024, 1)
self.fc1 = nn.Linear(1024, 512)
self.fc2 = nn.Linear(512, 256)
self.fc3 = nn.Linear(256, k * k)
self.relu = nn.ReLU()
self.bn1 = nn.BatchNorm1d(64)
self.bn2 = nn.BatchNorm1d(128)
self.bn3 = nn.BatchNorm1d(1024)
self.bn4 = nn.BatchNorm1d(512)
self.bn5 = nn.BatchNorm1d(256)
self.k = k
def forward(self, x):
B = x.size()[0]
x = F.relu(self.bn1(self.conv1(x)))
x = F.relu(self.bn2(self.conv2(x)))
x = F.relu(self.bn3(self.conv3(x)))
x = torch.max(x, 2, keepdim=False)[0]
x = F.relu(self.bn4(self.fc1(x)))
x = F.relu(self.bn5(self.fc2(x)))
x = self.fc3(x)
iden = Variable(torch.from_numpy(np.eye(self.k).flatten().astype(np.float32))).view(
1, self.k ** 2).repeat(B, 1)
if x.is_cuda:
iden = iden.cuda()
x = x + iden
x = x.view(-1, self.k, self.k)
return x
class PointNetEncoder(nn.Module):
def __init__(self, global_feat=True, feature_transform=False,
channel=3, detailed=False):
# when input include normals, it
super(PointNetEncoder, self).__init__()
self.stn = STN3d(channel) # Batch * 3 * 3
self.conv1 = nn.Conv1d(channel, 64, 1)
self.conv2 = nn.Conv1d(64, 128, 1)
self.conv3 = nn.Conv1d(128, 1024, 1)
self.bn1 = nn.BatchNorm1d(64)
self.bn2 = nn.BatchNorm1d(128)
self.bn3 = nn.BatchNorm1d(1024)
self.global_feat = global_feat
self.feature_transform = feature_transform
if self.feature_transform:
self.fstn = STNkd(k=64)
self.detailed = detailed
def forward(self, x):
_, D, N = x.size() # Batch Size, Dimension of Point Features, Num of Points
trans = self.stn(x)
x = x.transpose(2, 1)
if D > 3:
# pdb.set_trace()
x, feature = x.split([3, D-3], dim=2)
x = torch.bmm(x, trans)
# feature = torch.bmm(feature, trans) # feature -> normals
if D > 3:
x = torch.cat([x, feature], dim=2)
x = x.transpose(2, 1)
out1 = self.bn1(self.conv1(x))
x = F.relu(out1)
if self.feature_transform:
trans_feat = self.fstn(x)
x = x.transpose(2, 1)
x = torch.bmm(x, trans_feat)
x = x.transpose(2, 1)
else:
trans_feat = None
pointfeat = x
out2 = self.bn2(self.conv2(x))
x = F.relu(out2)
out3 = self.bn3(self.conv3(x))
# x = self.bn3(self.conv3(x))
x = torch.max(out3, 2, keepdim=False)[0]
if self.global_feat:
return x, trans, trans_feat
elif self.detailed:
return out1, out2, out3, x
else: # concatenate global and local feature together
x = x.view(-1, 1024, 1).repeat(1, 1, N)
return torch.cat([x, pointfeat], 1), trans, trans_feat
class PointNetPartSegEncoder(nn.Module):
def __init__(self, feature_transform=True, channel=3):
super(PointNetPartSegEncoder, self).__init__()
self.stn = STN3d(channel)
self.conv1 = nn.Conv1d(channel, 64, 1)
self.conv2 = nn.Conv1d(64, 128, 1)
self.conv3 = nn.Conv1d(128, 128, 1)
self.conv4 = nn.Conv1d(128, 512, 1)
self.conv5 = nn.Conv1d(512, 2048, 1)
self.bn1 = nn.BatchNorm1d(64)
self.bn2 = nn.BatchNorm1d(128)
self.bn3 = nn.BatchNorm1d(128)
self.bn4 = nn.BatchNorm1d(512)
self.bn5 = nn.BatchNorm1d(2048)
self.feature_transform = feature_transform
if self.feature_transform:
self.fstn = STNkd(k=128)
def forward(self, point_cloud, label):
B, D, N = point_cloud.size()
trans = self.stn(point_cloud)
point_cloud = point_cloud.transpose(2, 1)
if D > 3:
point_cloud, feature = point_cloud.split(3, dim=2)
point_cloud = torch.bmm(point_cloud, trans)
if D > 3:
point_cloud = torch.cat([point_cloud, feature], dim=2)
point_cloud = point_cloud.transpose(2, 1)
out1 = F.relu(self.bn1(self.conv1(point_cloud)))
out2 = F.relu(self.bn2(self.conv2(out1)))
out3 = F.relu(self.bn3(self.conv3(out2)))
if self.feature_transform:
trans_feat = self.fstn(out3)
net_transformed = torch.bmm(out3.transpose(2, 1), trans_feat)
out3 = net_transformed.transpose(2, 1)
out4 = F.relu(self.bn4(self.conv4(out3)))
out5 = self.bn5(self.conv5(out4))
out_max = torch.max(out5, 2, keepdim=False)[0]
out_max = torch.cat([out_max, label.squeeze(1)], 1)
expand = out_max.view(-1, 2048 + 16, 1).repeat(1, 1, N)
concat = torch.cat([expand, out1, out2, out3, out4, out5], 1)
if self.feature_transform:
return concat, trans_feat
return concat
class encoder(nn.Module):
def __init__(self, num_channel=3, **kwargs):
super(encoder, self).__init__()
self.feat = PointNetEncoder(global_feat=True, channel=num_channel)
def forward(self, x):
feat, _, _ = self.feat(x)
return feat
class detailed_encoder(nn.Module):
def __init__(self, num_channel=3, **kwargs):
super(detailed_encoder, self).__init__()
self.feat = PointNetEncoder(global_feat=False,
channel=num_channel,
detailed=True)
def forward(self, x):
out1, out2, out3, x = self.feat(x)
return out1, out2, out3, x | [
"[email protected]"
] | |
51a5067c854b3664f8ea3cae774a82ffda609903 | a5a99f646e371b45974a6fb6ccc06b0a674818f2 | /RecoBTag/PerformanceDB/python/PoolBTagPerformanceDBMC36X.py | 51f9ad82857168c4520d2aadbf7b5b494f03b156 | [
"Apache-2.0"
] | permissive | cms-sw/cmssw | 4ecd2c1105d59c66d385551230542c6615b9ab58 | 19c178740257eb48367778593da55dcad08b7a4f | refs/heads/master | 2023-08-23T21:57:42.491143 | 2023-08-22T20:22:40 | 2023-08-22T20:22:40 | 10,969,551 | 1,006 | 3,696 | Apache-2.0 | 2023-09-14T19:14:28 | 2013-06-26T14:09:07 | C++ | UTF-8 | Python | false | false | 112 | py | from RecoBTag.PerformanceDB.measure.Pool_pf36 import *
from RecoBTag.PerformanceDB.measure.Pool_calo36 import *
| [
"[email protected]"
] | |
9e19072ff7971bc211783e2524be3902ccd8e5c3 | 39b8dddb1bda5e8055c661da060a9c71040c0ae3 | /reinforcement/tensorflow/minigo/tests/test_shipname.py | 93a9e7848d426fc5cb67bf8191c89a4eecd8e1c1 | [
"Apache-2.0"
] | permissive | dagarcia-nvidia/mlperf_training | 22e7c120bce338ec84b008b5cd64a3e53c2362e3 | bad6f14e6f5a119bfffb3181a8a742874c441753 | refs/heads/master | 2022-12-11T03:28:22.641969 | 2019-02-27T19:05:59 | 2019-02-27T19:05:59 | 172,770,644 | 1 | 1 | Apache-2.0 | 2022-12-08T02:29:51 | 2019-02-26T18:54:19 | Jupyter Notebook | UTF-8 | Python | false | false | 1,119 | py | # Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import shipname
class TestShipname(unittest.TestCase):
def test_bootstrap_gen(self):
name = shipname.generate(0)
self.assertIn('bootstrap', name)
def test_detect_name(self):
string = '000017-model.index'
detected_name = shipname.detect_model_name(string)
self.assertEqual(detected_name, '000017-model')
def test_detect_num(self):
string = '000017-model.index'
detected_name = shipname.detect_model_num(string)
self.assertEqual(detected_name, 17)
| [
"[email protected]"
] | |
ddabea7784ef8342f76c1ca6530fde0cfab7b4f2 | 15f321878face2af9317363c5f6de1e5ddd9b749 | /solutions_python/Problem_200/3378.py | d9230c40d82e40ec840c98885d3db4d3250b8334 | [] | no_license | dr-dos-ok/Code_Jam_Webscraper | c06fd59870842664cd79c41eb460a09553e1c80a | 26a35bf114a3aa30fc4c677ef069d95f41665cc0 | refs/heads/master | 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,242 | py | # -*- coding: utf-8 -*-
"""
Created on Sun Apr 09 06:47:06 2017
@author: rajbhagat
For Code Jam - Faster Tidy numbers
"""
readfileopen=open("C:/Users/rajbh/Desktop/B-large.in",'r')
writefileout=open("C:/Users/rajbh/Desktop/B-large.out",'w')
caseno=0
for e in readfileopen:
if caseno>0:
checkno=int(e.strip().rstrip())
ch=str(e.strip().rstrip())
ls=list(ch)
startno=0
digiter=9
noofdigits=len(ls)
while startno<noofdigits:
j=startno
while j<noofdigits:
ls[j]=digiter
j+=1
createdno=int("".join(str(x) for x in ls))
ls=list(str(createdno))
if createdno<=checkno:
startno+=1
digiter=9
elif digiter!=1:
digiter-=1
else:
noofdigits-=1
startno=0
digiter=9
ls=ls[1:]
outstring="Case #"+str(caseno)+": "+str(createdno)+"\n"
writefileout.write(outstring)
caseno+=1
readfileopen.close()
writefileout.close() | [
"[email protected]"
] | |
9eada00291e92ba1f68d9cc92d349c53d4607a32 | e6dab5aa1754ff13755a1f74a28a201681ab7e1c | /.parts/lib/python2.7/test/badsyntax_future7.py | 016f61f770519913d07d97611d89fa2688ab4a4f | [] | no_license | ronkagan/Euler_1 | 67679203a9510147320f7c6513eefd391630703e | 022633cc298475c4f3fd0c6e2bde4f4728713995 | refs/heads/master | 2021-01-06T20:45:52.901025 | 2014-09-06T22:34:16 | 2014-09-06T22:34:16 | 23,744,842 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 82 | py | /home/action/.parts/packages/python2/2.7.6/lib/python2.7/test/badsyntax_future7.py | [
"[email protected]"
] | |
daf27f71cbc15575ee65fd0f02661c46889e6984 | 2efda4e99b5b9da5041d4984b71a2121561a29d3 | /EwhaEverytimeEverywhere/board/views.py | 4acb5691431bf2776659eac49a6cf82d7009ef6f | [] | no_license | yunaisme/Cyber_Graduation_Project | 2ff31284ced20688cad9e4546fad2d3af2217cdf | 5388fe8a3dce0c6053ff00522c50390e8a6160b1 | refs/heads/main | 2023-07-30T12:50:15.754026 | 2021-09-26T14:20:19 | 2021-09-26T14:20:19 | 397,037,621 | 0 | 0 | null | 2021-08-17T01:04:28 | 2021-08-17T01:04:27 | null | UTF-8 | Python | false | false | 2,572 | py | from django.contrib import messages
from django.contrib.auth.decorators import login_required
from django.shortcuts import render, redirect, get_object_or_404
from .forms import PostForm, CommentForm
from .models import Post, Comment
@login_required(login_url='login')
def post_list(request):
posts = Post.objects.all().order_by('-created_at')
return render(request, 'board/post_list.html',
{'posts': posts})
@login_required(login_url='login')
def post_detail(request, pk):
post = get_object_or_404(Post, pk=pk)
comments = Comment.objects.filter(post_id=pk).order_by('-comment_created')
if request.method == 'POST':
form = CommentForm(request.POST)
if form.is_valid():
comment_form = form.save(commit=False)
comment_form.post = post
comment_form.comment_writer = request.user
comment_form.save()
return redirect('board:post_detail', pk=post.pk)
else:
form = CommentForm()
context = {
'post': post,
'comments': comments,
'comment_form': form
}
return render(request, 'board/post_detail.html', context)
@login_required(login_url='login')
def post_upload(request):
if request.method == 'POST':
form = PostForm(request.POST)
if form.is_valid():
post = form.save(commit=False)
post.author = request.user
post.save()
return redirect('board:post_list')
else:
form = PostForm()
return render(request, 'board/post_upload.html', {
'form': form,
})
@login_required(login_url='login')
def post_edit(request, pk):
item = get_object_or_404(Post, pk=pk)
# 그 사람 id인거 인증하는거 있어야함
if request.method == 'POST':
form = PostForm(instance=item)
if form.is_valid():
item = form.save()
messages.success(request, '포스트를 수정했습니다.')
return redirect(item)
else:
form = PostForm(instance=item)
return render(request, 'board/post_edit.html', {
'form': form,
})
@login_required(login_url='login')
def post_delete(request, pk):
post = Post.objects.get(pk=pk)
if request.method == 'POST':
# 그 사람 id인거 인증하는거 있어야함
post.delete()
messages.success(request, '포스팅을 삭제했습니다.')
return redirect('board:post_list')
| [
"[email protected]"
] | |
8bce87db52839bfb325e37a18ea9b5a477384736 | 6b2a8dd202fdce77c971c412717e305e1caaac51 | /solutions_5634697451274240_0/Python/sachinr20/b.py | 1805d5abd977f454eca786173f8e9a14c75ab1cd | [] | no_license | alexandraback/datacollection | 0bc67a9ace00abbc843f4912562f3a064992e0e9 | 076a7bc7693f3abf07bfdbdac838cb4ef65ccfcf | refs/heads/master | 2021-01-24T18:27:24.417992 | 2017-05-23T09:23:38 | 2017-05-23T09:23:38 | 84,313,442 | 2 | 4 | null | null | null | null | UTF-8 | Python | false | false | 1,425 | py | import sys
def flip(st, count):
#if count==len(st):
# return st[::-1]
l = count
st2 = ""
for i in range(count):
if st[i]=='+':
st2 = st2 + "-"
else:
st2 = st2 + "+"
#print("st2new:"+st2)
st2 = st2[::-1]
return st2+st[count:]
def handleit(line, count):
#print("Handling "+line + " len:"+ str(len(line)))
chars = [x for x in line]
if len(line)<=0:
return count;
if len(line) == 1:
if chars[0]=='+':
return count;
else:
count = count + 1
return count
total = len(line)
if line[total-1] == '+':
return handleit(line[:-1], count)
else:
pluses = 0
for ch in chars:
if ch != '+':
break
pluses += 1
if pluses == 0:
line = flip(line, len(line))
count +=1
else:
line = flip(line, pluses)
line = flip(line, len(line))
count += 2
return handleit(line[:len(line)-pluses], count)
name = sys.argv[1]
with open(name) as f:
lines = f.readlines()
lines = lines[1:]
case = 0
with open("out", "w") as o:
for line in lines:
case += 1
line = line.strip()
count = 0
c = handleit(line, count)
op = "Case #"+str(case)+": "+str(c)+"\n"
print(op, end="")
o.write(op)
| [
"[email protected]"
] | |
95502ab595a584f1de7be5054524ea97671baa2f | dcb984494ae1ae88192f0ba685f76a0dbc73dcdb | /venv/lib/python3.6/bisect.py | 1f16dfa58a74cdbde010e65540d31c009ef9d94d | [] | no_license | hornLK/Django_LKproject | 55393a7d92e5a4441df309c5d7c898c1e91e248f | c0ce7a524f2fc4c37b79deaab06c4abc08de7398 | refs/heads/master | 2021-04-30T14:52:36.223985 | 2018-03-30T04:04:12 | 2018-03-30T04:04:12 | 121,226,741 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 43 | py | /usr/local/python36/lib/python3.6/bisect.py | [
"[email protected]"
] | |
97a9944271b7d91b192683ba190c0287a2a545fd | f281d0d6431c1b45c6e5ebfff5856c374af4b130 | /DAY001~099/DAY05-BOJ1260-DFS와BFS/joohyuk.py | db9bbd3aa34e66679027eda6a4ef5d38dca52708 | [] | no_license | tachyon83/code-rhino | ec802dc91dce20980fac401b26165a487494adb4 | b1af000f5798cd12ecdab36aeb9c7a36f91c1101 | refs/heads/master | 2022-08-13T09:10:16.369287 | 2022-07-30T11:27:34 | 2022-07-30T11:27:34 | 292,142,812 | 5 | 6 | null | null | null | null | UTF-8 | Python | false | false | 1,020 | py | import sys
from collections import deque
si = sys.stdin.readline
graph_unsorted = [set() for _ in range(1001)]
graph = [[]for _ in range(1001)]
visited = [False for _ in range(1001)]
def dfs(s):
print(s, end=' ')
for e in graph[s]:
if not visited[e]:
visited[e] = True
dfs(e)
def main():
n, m, v = [int(e) for e in si().split()]
while m:
m -= 1
a, b = [int(e) for e in si().split()]
graph_unsorted[a].add(b)
graph_unsorted[b].add(a)
for i in range(1, n+1):
e = list(graph_unsorted[i])
e.sort()
graph[i] = e
visited[v] = True
dfs(v)
print()
q = deque()
for i in range(1, n+1):
visited[i] = False
q.append(v)
visited[v] = True
while q:
curr = q.popleft()
print(curr, end=' ')
for e in graph[curr]:
if not visited[e]:
visited[e] = True
q.append(e)
print()
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
52333cc2e65db038cbb4d42924cde56aee596bdb | a290925e8c3103bb84327f6f38f0b4ffd7945c1d | /dataugmentation/reverse_para_order.py | 46b542bb4e9b7a02170208992335f7e00154d9dd | [] | no_license | enterpriseih/lightningHotpotQA | 6db502747b2b7a876e7f32743b839c65f851ee49 | b3a992f27a1c2b7881e6ab0c16132c20fb880f8d | refs/heads/master | 2023-08-24T05:38:32.419496 | 2021-05-27T01:09:29 | 2021-05-27T01:09:29 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,252 | py | import json
import sys
from tqdm import tqdm
assert len(sys.argv) == 4
raw_data = json.load(open(sys.argv[1], 'r'))
para_file = sys.argv[2]
with open(para_file, 'r', encoding='utf-8') as reader:
para_data = json.load(reader)
#################################
reverse_output_file = sys.argv[3]
################################
selected_para_dict_reverse = {}
################################
for case in tqdm(raw_data):
guid = case['_id']
##############################################
ir_selected_paras = para_data[guid]
selected_para_dict_reverse[guid] = []
assert len(ir_selected_paras) == 3
if len(ir_selected_paras[0]) == 2:
reverse_ir_paras_1st = [ir_selected_paras[0][1], ir_selected_paras[0][0]]
else:
reverse_ir_paras_1st = ir_selected_paras[0]
selected_para_dict_reverse[guid].append(reverse_ir_paras_1st)
selected_para_dict_reverse[guid].append(ir_selected_paras[1])
if len(ir_selected_paras[2]) == 2:
reverse_ir_paras_3rd = [ir_selected_paras[2][1], ir_selected_paras[2][0]]
else:
reverse_ir_paras_3rd = ir_selected_paras[2]
selected_para_dict_reverse[guid].append(reverse_ir_paras_3rd)
json.dump(selected_para_dict_reverse, open(reverse_output_file, 'w')) | [
"[email protected]"
] | |
93f40d4918907f15aad52856cb8a80bb9202195c | e6252e7ad0e024cd20e0e0779347945b735dd64a | /myenv/restdemo.py | 2c0d9453c08607d15e642c47b4412ccd350d5fee | [] | no_license | Icode4passion/FlaskApp_RestDemo_Calculator_WeightConvertor | 97391a9c7ed1f2b6eab402169f52ac17e4e49c64 | 8865d0d98c070331e3ebcd70ecd5b7ad2dd9c2e2 | refs/heads/master | 2020-04-11T07:33:25.152968 | 2018-12-13T14:33:29 | 2018-12-13T14:33:29 | 161,614,605 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,219 | py | from flask import Flask , jsonify , make_response , abort, render_template , request
app = Flask(__name__)
movies = [
{
'id' : 1,
'title': 'Batman',
'Author': 'Bob Kane',
'Director' : 'Christopher'
},
{
'id' : 2,
'title': 'Superman',
'Author': 'Jerry Siegel',
'Director' : 'Richard Donner'
}]
@app.route('/movie/api/v1.0/movies',methods=['GET'])
def get_tasks():
return jsonify({'movies':movies})
@app.route('/movie/api/v1.0/movies/<int:movie_id>',methods=['GET'])
def get_tasks_id(movie_id):
movie = [movie for movie in movies if movie['id'] == movie_id]
if len(movie) == 0 :
abort(400)
return jsonify({'movie': movie[0]})
@app.errorhandler(400)
def errorhandler(error):
return make_response(jsonify({'error':'Not Found'}),404)
#return render_template('home.html')
@app.route('/movie/api/v1.0/movies',methods=['POST'])
def create_tasks():
if not request.json or not 'title' in request.json:
abort(400)
movie ={
'id' : movies[-1]['id'] +1,
'title' : request.json['title'],
'Author' : request.json.get('Author', ''),
'Director' : request.json.get('Director', ''),
}
movies.append(movie)
return jsonify({'task':task}),201
if __name__ == '__main__':
app.run(debug = True)
| [
"[email protected]"
] | |
ce15a2c788b6fc97e976ebdd0a17dcdda74f20b8 | 67b440e37a6a613a9bb11f47fee1e0cf9531001b | /scripts/dict/amber_model_building_water.py | 167d4436fd4089249adf54e759f433ac3b4eeb20 | [
"WTFPL"
] | permissive | 09alpha/amber-in-the-dark | 96d8b93136ce749161b7c4ae2942e1feb95dd8c6 | 5183737ef71e87ebc9dd2d2ea729c928052310e7 | refs/heads/master | 2020-06-12T18:58:48.708114 | 2019-06-28T01:50:48 | 2019-06-28T01:50:48 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 29,729 | py | #----
# Водоснабжение (коррекция величин):
metadict_model['-Городское водоснабжение (литр)'] = {
'-Городское водоснабжение (кубометр)':1 / 1000,
}
metadict_model['-Сельское водоснабжение (литр)'] = {
'-Сельское водоснабжение (кубометр)':1 / 1000,
}
metadict_model['-Городское водоснабжение (кубометр)'] = {
'Городское водоснабжение (кубометр/сутки)':1 / 360,
'Вода водопроводная (кубометр)':1,
}
metadict_model['-Сельское водоснабжение (кубометр)'] = {
'Сельское водоснабжение (кубометр/сутки)':1 / 360,
'Вода колодезная (кубометр)':1,
}
#----
# Городской водопровод:
metadict_model['Сельское водоснабжение (кубометр/сутки)'] = {
# Производительность 0.5-3 кубометра/час (42 кубометра/сутки)
# Используется на 15%
'Шахтный колодец (эксплуатация)':1 / 42 / 0.15,
}
metadict_model['Шахтный колодец (эксплуатация)'] = {
'Отделка шахтного колодца':1,
'Обслуживание шахтных колодцев':1,
'Строительство шахтных колодцев':1 / 30,
}
metadict_model['Обслуживание шахтных колодцев'] = {
# Исправить
# Ремонт и чистка фильтра.
}
metadict_model['Строительство шахтных колодцев'] = {
'Шахтный колодец (смета)':1,
}
#----
# Городской водопровод:
metadict_model['Городское водоснабжение (кубометр/сутки)'] = {
# Водопровод рассчитан на очистку 3500 кубометров/сутки
# При этом используется на 75%
'Городской водопровод (эксплуатация)':1 / 3500 / 0.75,
}
metadict_model['Городской водопровод (эксплуатация)'] = {
'Обслуживание городского водопровода':1,
'Строительство городского водопровода':1 / 30,
}
metadict_model['Обслуживание городского водопровода'] = {
# Исправить
# Перенеси сюда оборудование водопровода.
# Фильтры чистят и пополняют раз в несколько месяцев.
# https://ru.wikisource.org/wiki/ЭСБЕ/Снабжение_городов_водой
# https://ru.wikisource.org/wiki/ЭСБЕ/Фильтр
# Питерский водопровод начала 1920-х годов -- 1000 вёдер воды требует 15 минут времени рабочих
# http://istmat.info/node/27934
# Тысяча вёдер, это 12.3 тонн воды, 49.2 тонны/час работы (0.02 нормо-часа/тонну)
# В сутки 3500 тонн воды, 360 дней в году.
'_-Работа водопроводчика (нормо-часов)':3500 * 0.02 * 360,
}
metadict_model['Строительство городского водопровода'] = {
'Городской водопровод (смета)':1,
}
#----
# Строительство, городской водопровод:
# СП 31.13330.2012 Водоснабжение. Наружные сети и сооружения.
# http://docs.cntd.ru/document/1200093820
# СНиП 2.04.02-84* ВОДОСНАБЖЕНИЕ НАРУЖНЫЕ СЕТИ И СООРУЖЕНИЯ
# http://soyuzproekt.ru/ntd/879.htm
# Н. Н. АБРАМОВ "Водоснабжение"
# http://www.bibliotekar.ru/spravochnik-15/index.htm
metadict_model['Городской водопровод (смета)'] = {
# https://ru.wikisource.org/wiki/ЭСБЕ/Снабжение_городов_водой
# https://ru.wikipedia.org/wiki/Московский_водопровод
# Третий мытищинский водопровод
# https://alex-avr2.livejournal.com/212771.html
# http://tmp.avr.net.ru/m3/zimin1908.pdf
# Московский водопровод
# https://alex-avr2.livejournal.com/95935.html
# https://alex-avr2.livejournal.com/95734.html
'Конструкция городского водопровода (смета)':1,
#'Оборудование городского водопровода':1,
#'Отделка городского водопровода':1,
}
metadict_model['Конструкция городского водопровода (смета)'] = {
# Исправить
# Нужны выпуски в нижней части сети и воздушные вантузы в верхней.
# В московском водопроводе 1900 года масса уличных труб была в 4 раза больше, чем городских.
# Размеры города (10 000 жителей) 500x2500 метров (5x10 кварталов)
# Городской водопровод ведёт от водозабора к водоочистной и водонапорной станции.
# Районные водопроводы пересекают город поперёк в 3 местах и замыкают кольцо.
# Уличные водопроводы пересекают город вдоль (по одному на каждую улицу)
'Водозаборная станция (смета)':1,
'Водоочистная станция (смета)':1,
'Водонапорная станция (смета)':1,
'Водоподъёмная станция (смета)':2,
'Водопровод городской (метр)':1000 + 1000,
'Водопровод районный (метр)':1000 + (1000 + 1500) * 2,
'Водопровод уличный (метр)':14 * 2500,
}
#----
# Строительство, водоочистные станции:
metadict_model['Водоочистная станция (смета)'] = {
# https://ru.wikisource.org/wiki/ЭСБЕ/Фильтр
# https://ru.wikipedia.org/wiki/Медленные_фильтры
'Конструкция водоочистной станции':1,
#'Оборудование водоочистной станции':1,
#'Отделка водоочистной станции':1,
}
metadict_model['Конструкция водоочистной станции'] = {
# http://www.another.kiev.ua/starinnoe-podzemnoe-vodoxranilishhe-xix-veka-v-kieve/
# https://mishanik-210.livejournal.com/1586.html
# -------------------------------|
# |..............................|
# |..............................|
# |==============================|
# |..............................|
# |..............................|
# |==============================|
# |..............................|
# |..............................|
# |==============================|
# |..............................|
# |..............................|
# -------------------------------|
'Внешняя стена водного резервуара (метр)':(61 * 2) + (6 * 2) * 4,
'Камера водного резервуара (6x60x3 метра)':4,
'Перегородка водного резервуара (метр)':60 * 3,
}
metadict_model['Оборудование водоочистной станции'] = {
'Медленный фильтр (6x60 метров)':4,
}
metadict_model['Медленный фильтр (6x60 метров)'] = {
# Камера 6x60x3 метра, площадь фильтра 360 кв.метров (860 кубометров/сутки)
'-Очистка воды (кубометров/сутки)':2.4 * (6 * 60),
'Медленный фильтр (квадратный метр)':6 * 60,
}
#----
# Строительство, водозаборные станции:
metadict_model['Водозаборная станция (смета)'] = {
# Исправить
# Допиливай. Ничего сложного же!
#'Конструкция водозаборной станции':1,
#'Оборудование водозаборной станции':1,
#'Отделка водозаборной станции':1,
}
#----
# Строительство, водоподъёмные станции:
metadict_model['Водоподъёмная станция (смета)'] = {
# Исправить
# Допиливай. Ничего сложного же!
#'Конструкция водоподъёмной станции':1,
#'Оборудование водоподъёмной станции':1,
#'Отделка водоподъёмной станции':1,
}
#----
# Строительство, водонапорные станции:
metadict_model['Водонапорная станция (смета)'] = {
# Заглублённый бассейн, должен стоять метров на 20 выше города.
# Сглаживает дневной пик потребления воды, облегчая работу насосам.
'Конструкция водонапорной станции':1,
#'Оборудование водонапорной станции':1,
#'Отделка водонапорной станции':1,
}
metadict_model['Конструкция водонапорной станции'] = {
# Резервуар 18x30x3 метров.
# ----------------|
# |...............|
# |...............|
# |===============|
# |...............|
# |...............|
# |===============|
# |...............|
# |...............|
# ----------------|
'Внешняя стена водного резервуара (метр)':(31 * 2) + (6 * 2) * 3,
'Камера водного резервуара (6x30x3 метра)':3,
'Перегородка водного резервуара (метр)':30 * 3,
}
metadict_model['Оборудование водонапорной станции'] = {
'Водный резервуар (6x30x3 метров)':3,
}
metadict_model['Водный резервуар (6x30x3 метров)'] = {
'-Хранение питьевой воды (кубометров)':6 * 30 *3,
}
#----
# Строительство, водопроводы:
metadict_model['Водопровод магистральный (метр)'] = {
# Исправить
# Допиливай. Полоностью перепили!
# Трубопрвод должен быть железобетонным/глиняным/кирпичным.
# Своды в два кирпича. Обязательно бутовое/щебёночное основание.
# Простой траншеей тут не обойтись.
# 200 000 жителей (5500 кубометров/час)
# Диаметр трубы (внутренний) 1.4 метра (наружный 2 метра). Профиль траншеи: 2x3x5.
# Площадь трапеции: S = 1/2 * (a + b) * h
'Траншея (кубометр)':(1/2 * (2 + 3) * 5),
'Насыпь (кубометр)':(1/2 * (2 + 3) * 5) - 3.14159265 * (2 / 2) ** 2,
'Устройство 1400-мм кирпичной водопропускной трубы (метр)':1,
'Устройство гравийного основания под трубопроводы (кубометр)':2,
}
metadict_model['Водопровод городской (метр)'] = {
# 20 000 жителей (450 кубометров/час)
# Диаметр трубы (внутренний) 0.4 метра (наружный 0.43 метра). Профиль траншеи: 0.8x2x3.
'Разборка щебёночного шоссе (квадратный метр)':2,
'Траншея (кубометр)':(1/2 * (0.8 + 2) * 3),
'Прокладка 400-мм водопровода (метр)':1,
'Установка фасонных частей 400-мм водопровода (штук)':1 / 20,
'Насыпь (кубометр)':(1/2 * (0.8 + 2) * 3) \
- 3.14159265 * (0.43 / 2) ** 2,
'Вывоз грунта (кубометр)':3.14159265 * (0.43 / 2) ** 2,
'Восстановление щебёночного шоссе (квадратный метр)':2,
}
metadict_model['Водопровод районный (метр)'] = {
# 5000 жителей (113 кубометров/час)
# Диаметр трубы (внутренний) 0.2 метра (наружный 0.23 метра). Профиль траншеи: 0.6x2x3.
'Разборка булыжной мостовой (квадратный метр)':2,
'Траншея (кубометр)':(1/2 * (0.6 + 2) * 3),
'Прокладка 200-мм водопровода (метр)':1,
'Установка фасонных частей 200-мм водопровода (штук)':1 / 20,
'Насыпь (кубометр)':(1/2 * (0.6 + 2) * 3) \
- 3.14159265 * (0.23 / 2) ** 2,
'Вывоз грунта (кубометр)':3.14159265 * (0.23 / 2) ** 2,
'Восстановление булыжной мостовой (квадратный метр)':2,
}
metadict_model['Водопровод уличный (метр)'] = {
# 1000 жителей (28 кубометров/час)
# Диаметр трубы (внутренний) 0.1 метра (наружный 0.12 метра). Профиль траншеи: 0.5x2x3.
'Разборка булыжной мостовой (квадратный метр)':2,
'Траншея (кубометр)':(1/2 * (0.5 + 2) * 3),
'Прокладка 100-мм водопровода (метр)':1,
'Установка фасонных частей 100-мм водопровода (штук)':1 / 20,
'Насыпь (кубометр)':(1/2 * (0.5 + 2) * 3) \
- 3.14159265 * (0.12 / 2) ** 2,
'Вывоз грунта (кубометр)':3.14159265 * (0.12 / 2) ** 2,
'Восстановление булыжной мостовой (квадратный метр)':2,
}
#----
# Строительство, сбор дождевой воды:
metadict_model['Водоотвод с крыши (8x8 метров)'] = {
# https://upload.wikimedia.org/wikipedia/commons/1/15/Wei%C3%9Fes_Wohnhaus_in_Schwetzingen_2010.JPG
# 1) Желоб из оцинкованной стали
# 2) Водопроводная труба с крыши
# 3) Цистерна для воды на подставке
# 4) Водопроводная труба в дом
# Можно собрать 60-80% выпадающей влаги:
# https://ru.wikisource.org/wiki/ЭСБЕ/Снабжение_городов_водой
# При осадках в 800 мм/год за сутки это даёт: 0.8 / 360 * (8 * 8) * 0.7 = 0.1 кубометра
'Прокладка 150-мм водосточной трубы (метр)':6 + 1,
'Кровля из оцинкованной стали (квадратный метр)':(8 + 8) * 2 * 0.2,
}
#----
# Строительство, шахтные колодцы:
metadict_model['Шахтный колодец (смета)'] = {
# Производительность 0.5-3 кубометра/час
# https://ru.wikisource.org/wiki/ЭСБЕ/Снабжение_городов_водой
# https://www.parthenon-house.ru/content/articles/index.php?article=5155
# http://gardenweb.ru/shakhtnye-kolodtsy
'Конструкция шахтного колодца':1,
#'Оборудование шахтного колодца':1,
#'Отделка шахтного колодца':1,
}
metadict_model['Конструкция шахтного колодца'] = {
# http://www.mukhin.ru/stroysovet/voda/2_06.html
# 1) Котлован с диаметром 3.25 метра и глубиной 1.5 метра.
# 2) Слой глины 1-2 метра вокруг скважины до глубины 1.5 метра.
# 3) Копание колодца с диаметром 1.25 метра на грубину 15 метров.
# 4) Кирпичная кладка в 1 кирпич (площадь вычисляем по периметру и глубине шахты)
# 6) Глино-соломенная кровля (конусообразная, высотой в 1.5 метра)
# 7) Вентиляция шахты.
'Котлован (кубометр)':3.14159265 * (((1.250 + 2) / 2) ** 2) * 1.5,
'Устройство глиняного замка (кубометр)':(3.14159265 * (((1.250 + 2) / 2) ** 2) * 1.5) \
- (3.14159265 * ((1.250 / 2) ** 2)) * 1.5,
'Сооружение шахтных колодцев копателем (кубометр)':(3.14159265 * ((1.250 / 2) ** 2)) * 15,
'Вывоз грунта (кубометр)':3.14159265 * (((1.250 + 2) / 2) ** 2) * 1.5 \
+ (3.14159265 * ((1.250 / 2) ** 2)) * 15,
'Кирпичная кладка сводов в 1 кирпич (квадратный метр)':(2 * 3.14159265 * (1.250 / 2)) * 15,
'Простая стропильная система (2x2 метра)':1,
'Соломенная кровля (квадратный метр)':3.14159265 * ((1.250 + 0.75) / 2) * 1.5,
'Прокладка 150-мм вытяжной трубы (метр)':2,
}
metadict_model['Оборудование шахтного колодца'] = {
'Устройство донного фильтра копателем (штука)':1,
'Медленный фильтр (квадратный метр)':(3.14159265 * ((1.250 / 2) ** 2)),
}
metadict_model['Отделка шахтного колодца'] = {
'|Механический насос (35 литров/минуту)':1,
'|Лампа светляковая (1200 Лм)':1,
'|Деревянная кадка (300 литров)':1,
'|Деревянная бочка (300 литров)':1,
'|Деревянное ведро (10 литров)':1,
}
#----
# Компостные ямы:
metadict_model['Люфт-клозет'] = {
# http://www.bibliotekar.ru/spravochnik-81/29.htm
# Нехитрая система, где сточные воды кое-как очищаются и уходят в почву,
# А твёрдые отходы остаются в ближней камере компостной ямы.
'Конструкция люфт-клозета':1,
'Оборудование люфт-клозета':1,
}
metadict_model['Конструкция люфт-клозета'] = {
'Внешняя стена компостной ямы (метр)':(4 * 1) * 2,
'Камера компостной ямы (4x1x2 метра)':1,
}
metadict_model['Оборудование люфт-клозета'] = {
'Прокладка 150-мм трубопровода канализации (метр)':6,
'Медленный фильтр (квадратный метр)':2,
}
metadict_model['Внешняя стена компостной ямы (метр)'] = {
# Исправить
# Высота должна увеличиваться, наклон же.
# Профиль траншеи: 0.5x1x2.
# Площадь трапеции: S = 1/2 * (a + b) * h
'Траншея (кубометр)':(1/2 * (0.5 + 1) * 1.5),
'Обваловка (кубометр)':0.5 * (1/2 * (0.5 + 1) * 1.5),
'Вывоз грунта (кубометр)':0.5 * (1/2 * (0.5 + 1) * 1.5),
'Устройство основания под фундаменты (кубометр)':0.5 * 0.25,
'Устройство каменного ленточного фундамента (кубометр)':0.25 * 1.25,
'Гидроизоляция боковая цементная с жидким стеклом (квадратный метр)':2,
'Устройство глиняного замка (кубометр)':2 * 0.2,
}
metadict_model['Камера компостной ямы (4x1x2 метра)'] = {
# ------
# |== |
# ------
'Котлован (кубометр)':4 * 2,
'Вывоз грунта (кубометр)':0.75 * 4 * 2,
'Устройство подстилающего слоя (кубометр)':4 * 0.25,
'Гидроизоляция горизонтальная цементная с жидким стеклом (квадратный метр)':2,
'Устройство дощатых перегородок (квадратный метр)':4,
'Насыпь (кубометр)':0.25 * 4 * 2,
}
#----
# Сложные элементы зданий:
metadict_model['Камера водного резервуара (6x60x3 метра)'] = {
# 1) Котлован 6x60x4 и столбчатый фундамент из бутового камня.
# 2) Глиняно-печанное подстилающий слой и плита из железобетона.
# 3) 36 кирпичных столбов и два полуцилиндрических свода.
# 4) Насыпь из 0.25 извлечённого грунта, 0.75 на вывоз.
# https://upload.wikimedia.org/wikipedia/commons/5/54/Brockhaus_and_Efron_Encyclopedic_Dictionary_b70_862-2.jpg
# |====1==1==1==1==1==1==1==1==1==1==1==1==1==1==1==1==1==1====|
# |............................................................|
# |............................................................|
# 0....1..1..1..1..1..1..1..1..1..1..1..1..1..1..1..1..1..1....0
# |............................................................|
# |............................................................|
# --------------------------------------------------------------
'Котлован (кубометр)':(6 * 60 * 4) \
+ 36 * (1.2 * 1.2 * 1),
'Вывоз грунта (кубометр)':0.75 * ((6 * 60 * 4) \
+ 36 * (1.2 * 1.2 * 1)),
'Устройство основания под фундаменты (кубометр)':36 * (1.2 * 1.2 * 0.5),
'Устройство каменного столбового фундамента (кубометр)':36 * (1.2 * 1.2 * 0.5),
'Устройство подстилающего слоя (кубометр)':(6 * 60 * 0.5) \
- 36 * (1.2 * 1.2 * 0.5),
'Железобетон тяжёлый (кубометр)':(6 * 60 * 0.5) \
- 36 * (0.8 * 0.8 * 0.5),
'Гидроизоляция горизонтальная цементная с жидким стеклом (квадратный метр)':(6 * 60) \
- 36 * (0.8 * 0.8),
'Кирпичная кладка столбов в 3 кирпича (метр)':36 * 0.5,
'Кирпичная кладка столбов в 2 кирпича (метр)':36 * 3,
'Кирпичная кладка сводов в 0.5 кирпича (квадратный метр)':2 * (3.14159265 * 1.5) * 60,
'Насыпь (кубометр)':0.25 * ((6 * 60 * 4) \
+ 36 * (1.2 * 1.2 * 1)),
}
metadict_model['Камера водного резервуара (6x30x3 метра)'] = {
# |====1==1==1==1==1==1==1==1==1=|
# |..............................|
# |..............................|
# 0....1..1..1..1..1..1..1..1..1.0
# |..............................|
# |..............................|
# --------------------------------
'Котлован (кубометр)':(6 * 30 * 4) \
+ 18 * (1.2 * 1.2 * 1),
'Вывоз грунта (кубометр)':0.75 * ((6 * 30 * 4) \
+ 18 * (1.2 * 1.2 * 1)),
'Устройство основания под фундаменты (кубометр)':18 * (1.2 * 1.2 * 0.5),
'Устройство каменного столбового фундамента (кубометр)':18 * (1.2 * 1.2 * 0.5),
'Устройство подстилающего слоя (кубометр)':(6 * 30 * 0.5) \
- 18 * (1.2 * 1.2 * 0.5),
'Железобетон тяжёлый (кубометр)':(6 * 30 * 0.5) \
- 18 * (0.8 * 0.8 * 0.5),
'Гидроизоляция горизонтальная цементная с жидким стеклом (квадратный метр)':(6 * 30) \
- 18 * (0.8 * 0.8),
'Кирпичная кладка столбов в 3 кирпича (метр)':18 * 0.5,
'Кирпичная кладка столбов в 2 кирпича (метр)':18 * 3,
'Кирпичная кладка сводов в 0.5 кирпича (квадратный метр)':2 * (3.14159265 * 1.5) * 30,
'Насыпь (кубометр)':0.25 * ((6 * 30 * 4) \
+ 18 * (1.2 * 1.2 * 1)),
}
metadict_model['Внешняя стена водного резервуара (метр)'] = {
# 1) Траншея и ленточный фундамент из бутового камня.
# 2) Стена высотой: 0.5 метра в основании; 3 метра до уровня воды; 1.5 метра в сводах.
# 3) Внешняя и внутренняя гидроизоляция.
# 4) Обваловка стены.
# Профиль траншеи: 1.2x3x4.5.
# Площадь трапеции: S = 1/2 * (a + b) * h
'Траншея (кубометр)':(1/2 * (1.2 + 3) * 4.5),
'Вывоз грунта (кубометр)':0.75 * (1/2 * (1.2 + 3) * 4.5),
'Устройство основания под фундаменты (кубометр)':1.2 * 0.5,
'Устройство каменного ленточного фундамента (кубометр)':1.2 * 0.5,
'Кирпичная кладка в 3 кирпича (квадратный метр)':0.5,
'Кирпичная кладка в 2.5 кирпича (квадратный метр)':3,
'Кирпичная кладка в 1.5 кирпича (квадратный метр)':1.5,
'Гидроизоляция боковая цементная с жидким стеклом (квадратный метр)':3 * 2,
'Устройство глиняного замка (кубометр)':4 * 0.5,
'Обваловка (кубометр)':0.25 * (1/2 * (1.2 + 3) * 4.5),
}
metadict_model['Перегородка водного резервуара (метр)'] = {
# Высота камеры -- 3 метра.
'Кирпичная кладка в 1.5 кирпича (квадратный метр)':3,
'Гидроизоляция боковая цементная с жидким стеклом (квадратный метр)':3 * 2,
}
metadict_model['Медленный фильтр (квадратный метр)'] = {
# Кварцевый песок (24 дюйма) -- 0.610 метра
# Крупный песок (3 дюйма) -- 0.076 метра
# Гравий (3 дюйма) -- 0.076 метра
# Крупный гравий (4 дюйма) -- 0.102 метра
# Крупный булыжник (8 дюймов) -- 0.203 метра
'Песок кварцевый (кубометр)':0.610,
'Песок крупный (кубометр)':0.076,
'Гравий мелкий (кубометр)':0.076,
'Гравий крупный (кубометр)':0.102,
'Камень бутовый (кубометр)':0.203,
}
| [
"[email protected]"
] | |
e504ebb5f478fb423b42fd1cbe28748625513ef9 | c93a0a6dedc8ebf100dd15eefc897457410e2d06 | /opsweb/resources/migrations/0008_cmdbmodel_dev_team.py | 334ca0cdc4e2e901f6fa80d9ece75cf34c848bee | [] | no_license | sungy2014/WS-OPS | efaab4ca8d3c56352c685508fe5b273daaedc2bb | 7563e40c130d0791ccacb259f7a71a9f276ca6c6 | refs/heads/master | 2020-03-11T12:25:42.030148 | 2018-04-11T12:44:02 | 2018-04-11T12:44:02 | 129,997,121 | 1 | 0 | null | 2018-04-18T03:14:03 | 2018-04-18T03:14:03 | null | UTF-8 | Python | false | false | 564 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.7 on 2018-03-22 14:40
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('auth', '0008_alter_user_username_max_length'),
('resources', '0007_cmdbmodel_ansible_playbook'),
]
operations = [
migrations.AddField(
model_name='cmdbmodel',
name='dev_team',
field=models.ManyToManyField(to='auth.Group', verbose_name='负责的开发组'),
),
]
| [
"root@172-17-134-23.(none)"
] | root@172-17-134-23.(none) |
51d68dfea5891cefb0d83811d3e1cff7af52b92b | f72ecf85bc1d6b4014af4b35f7677adb7c3a77f3 | /venv/lib/python3.7/heapq.py | c1d7b074887df5b805bb3d55fea8740954088e10 | [] | no_license | PropeReferio/covid19dashapp | cef5a803a26a00fc5a7adca57625d7f3de8710f8 | aea672aca23e0d6782080c966b24da6d826e1f91 | refs/heads/master | 2022-07-14T23:09:21.063273 | 2020-11-01T18:46:14 | 2020-11-01T18:46:14 | 253,976,374 | 0 | 0 | null | 2022-06-22T01:41:02 | 2020-04-08T03:32:05 | Python | UTF-8 | Python | false | false | 41 | py | /home/bo/anaconda3/lib/python3.7/heapq.py | [
"[email protected]"
] | |
f7931575c366e22a71c78e7146d1397848ab5a87 | 92bf9ddd7b92e7ed73fa6989164700b2be3657b8 | /Project1/download/google-cloud-sdk/.install/.backup/lib/surface/config/configurations/describe.py | 0b368769e222fe9b76407d71ace98df3c1c32661 | [
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] | permissive | bopopescu/8220-lab | bc991424557ff46f325d4611a84d02560ba5a6cb | 3f0ca82028962e5b1c0f4a2c4a2390ce6603e11c | refs/heads/master | 2022-11-19T22:31:54.741707 | 2018-01-07T16:56:47 | 2018-01-07T16:56:47 | 282,337,970 | 0 | 0 | null | 2020-07-25T00:01:24 | 2020-07-25T00:01:23 | null | UTF-8 | Python | false | false | 2,295 | py | # Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Command to describe named configuration."""
from googlecloudsdk.calliope import base
from googlecloudsdk.core import log
from googlecloudsdk.core import named_configs
from googlecloudsdk.core import properties
class Describe(base.Command):
"""Describes a named configuration by listing its properties."""
detailed_help = {
'DESCRIPTION': """\
{description}
See `gcloud topic configurations` for an overview of named
configurations.
""",
'EXAMPLES': """\
To describe esisting named configuration, run:
$ {command} my_config
This is similar in content to:
$ gcloud config configurations activate my_config
$ gcloud config list
""",
}
@staticmethod
def Args(parser):
"""Adds args for this command."""
parser.add_argument(
'configuration_name',
help='Configuration name to descrive')
parser.add_argument(
'--all', action='store_true',
help='Include unset properties in output.')
def Run(self, args):
fname = named_configs.GetPathForConfigName(args.configuration_name)
if not named_configs.IsPathReadable(fname):
raise named_configs.NamedConfigLoadError(
'Reading named configuration [{0}] failed because [{1}] cannot '
'be read.'.format(args.configuration_name, fname))
return properties.VALUES.AllValues(
list_unset=args.all,
properties_file=properties.PropertiesFile([fname]),
only_file_contents=True)
def Display(self, _, result):
if not result:
log.err.Print('(empty configuration)')
properties.DisplayProperties(log.out, result)
| [
"[email protected]"
] | |
64bb9f225783b606da3d8267a0ac7d33b510a04b | 2430b2a50efec6eebf27c0162b11d10d88f62729 | /pyprob/__init__.py | 57d07c2a883967767c0ab26beb5ea4593b133414 | [
"BSD-2-Clause",
"BSD-3-Clause"
] | permissive | feynmanliang/pyprob | 60d27e67c02e96f7a8116d9f1c5bdf1c374d908a | 16e345fde1d4305138bc909b087c81ad0f668cc5 | refs/heads/master | 2022-11-28T16:28:19.921825 | 2020-05-25T22:28:45 | 2020-05-25T22:28:45 | 268,595,724 | 0 | 0 | null | 2020-06-01T18:05:21 | 2020-06-01T18:05:21 | null | UTF-8 | Python | false | false | 336 | py | __version__ = '1.1.3'
from .util import TraceMode, PriorInflation, InferenceEngine, InferenceNetwork, ImportanceWeighting, Optimizer, LearningRateScheduler, ObserveEmbedding, set_verbosity, set_device, seed
from .state import sample, observe, tag
from .address_dictionary import AddressDictionary
from .model import Model, RemoteModel
| [
"[email protected]"
] | |
7eba8f570b7af1fd4e912d31ca096771effd2c08 | 99e0fef58ec7d3985f7471d0ab021333f8ea8c95 | /output_head_tables.py | 7eedb6ba85940104fdb796ed0260cc5c87a52a95 | [] | no_license | deBroglieeeen/get_pair_noun_in_corpus_2 | 176d6d1ea69a0947dbf7fe991525aafaab5d1e50 | 5667598604158c18f096c731f59780c83f79f8f7 | refs/heads/main | 2023-02-24T17:05:16.111928 | 2021-01-30T07:53:51 | 2021-01-30T07:53:51 | 326,991,591 | 0 | 0 | null | 2021-01-18T02:15:26 | 2021-01-05T12:26:49 | Python | UTF-8 | Python | false | false | 229 | py | import pandas as pd
import scipy as sp
import scipy.stats
# コンマ区切りのテキストデータを読み込む
data = pd.read_csv("output/df_sample2.tsv", sep='/t')
data.head(15).to_csv('output/head_alldata_sample.csv')
| [
"[email protected]"
] | |
cb0026bf57ccc9abc71541d4c3d1f690f344d7ae | 47aaa3f1fa5764779e5246fa3b765adaaac15bd1 | /distributed_jobman/parsers/config.py | f18760451974217f79da36fcfa3e1de8d8f31456 | [] | no_license | bouthilx/distributed-jobman | a3ec4958001b052a8327416b4be268f55dea2bf7 | d20aeda23bb9137445f754c8542d2f7e328a7fae | refs/heads/master | 2021-01-24T21:12:47.077725 | 2016-02-18T16:02:32 | 2016-02-18T16:05:16 | 49,673,710 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 883 | py | from ConfigParser import ConfigParser, Error
import os
p2_config = ConfigParser()
p2_config.read(os.path.join(os.environ["HOME"], ".distributed_jobman.rc"))
default_values = dict(cache_timeout=str(60 * 5))
keys = ["username", "password", "address", "name", "cache_timeout"]
database = dict()
for key in keys:
value = p2_config.get("database", key, vars=default_values)
if value is None:
raise ValueError("Option %s must be set in configuration file "
"~/.distributed_jobman.rc")
database[key] = value
database["cache_timeout"] = float(database["cache_timeout"])
scheduler_types = ["multi-gpu", "cluster"]
scheduler = dict(type=p2_config.get("scheduler", "type"))
if scheduler["type"] not in scheduler_types:
raise Error("Invalid scheduler type: %s" % scheduler["type"])
config = dict(database=database, scheduler=scheduler)
| [
"[email protected]"
] | |
24c5484f67c0ebe9391bd91e453f7b27f8619284 | 01b7cc0017c81c99d1da1c37c6a5dcb0bf4af9a5 | /python/PythonBinding.py | d2d8ed69c64c69724515739b1392ad016908ff42 | [
"BSD-2-Clause",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | lsst-camera-dh/jh-ccs-utils | 8c366c2cf3883944373a2aed02ee328c823a5cc7 | 4948df295311dff95d2b7d8e11f9ba392cd6b933 | refs/heads/master | 2022-03-09T09:37:31.640529 | 2022-01-17T03:27:47 | 2022-01-17T03:27:47 | 87,144,848 | 0 | 0 | NOASSERTION | 2022-01-17T03:27:48 | 2017-04-04T03:38:53 | Python | UTF-8 | Python | false | false | 4,898 | py | """
Socket connection interface to CCS Jython interpreter.
"""
import sys
import time
import re
import socket
import threading
import uuid
__all__ = ['CcsJythonInterpreter', 'CcsException', 'CcsExecutionResult']
class CcsExecutionResult:
"""Results class."""
def __init__(self, thread):
self.thread = thread
def getOutput(self):
"""Return the result of a jython command as a string."""
while self.thread.running:
time.sleep(0.1)
return self.thread.execution_output
class CcsException(Exception):
"""Exception class for CCS Jython interface."""
def __init__(self, value):
super(CcsException, self).__init__()
self.value = value
def __str__(self):
return repr(self.value)
class CcsJythonInterpreter:
"""Interface class to CCS Jython interpreter."""
def __init__(self, name=None, host=None, port=4444):
self.port = port
if host is None:
# Get local machine name
self.host = socket.gethostname()
else:
self.host = host
host_and_port = '{}:{}'.format(self.host, self.port)
try:
self.socket_connection = self._socket_connection()
print('Connected to CCS Python interpreter on host:port',
host_and_port)
except Exception as eobj:
print(eobj)
raise CcsException("Could not connect to CCS Python Interpreter " +
"on host:port " + host_and_port)
if name is not None:
name = name.replace("\n", "")
self.syncExecution("initializeInterpreter " + name)
def _socket_connection(self):
sc = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sc.connect((self.host, self.port))
connectionResult = sc.recv(1024).decode('utf-8')
if "ConnectionRefused" in connectionResult:
raise CcsException("Connection Refused")
return sc
def aSyncExecution(self, statement):
return self.sendInterpreterServer(statement)
def syncExecution(self, statement):
result = self.sendInterpreterServer(statement)
# Calling .getOutput() here causes the object to wait for the
# underlying thread to stop running.
result.getOutput()
return result
def aSyncScriptExecution(self, filename):
with open(filename, "r") as fd:
fileContent = fd.read()
return self.sendInterpreterServer(fileContent)
def syncScriptExecution(self, filename, setup_commands=(), verbose=False):
if verbose and setup_commands:
print("Executing setup commands for", filename)
for command in setup_commands:
if verbose:
print(command)
self.syncExecution(command)
if verbose:
print("Executing %s..." % filename)
with open(filename, "r") as fd:
fileContent = fd.read()
result = self.sendInterpreterServer(fileContent)
# Calling .getOutput() here causes the object to wait for the
# underlying thread to stop running.
result.getOutput()
return result
def sendInterpreterServer(self, content):
thread_id = str(uuid.uuid4())
executor_thread = CcsPythonExecutorThread(thread_id,
self.socket_connection)
return executor_thread.executePythonContent(content)
class CcsPythonExecutorThread:
def __init__(self, thread_id, socket_connection):
self.socket_connection = socket_connection
self.thread_id = thread_id
self.output_thread = threading.Thread(target=self.listenToSocketOutput)
self.java_exceptions = []
def executePythonContent(self, content):
self.running = True
self.output_thread.start()
content = ("startContent:" + self.thread_id + "\n" +
content + "\nendContent:" + self.thread_id + "\n")
self.socket_connection.send(content.encode('utf-8'))
return CcsExecutionResult(self)
def listenToSocketOutput(self):
re_obj = re.compile(r'.*java.*[Ee]xception.*')
self.execution_output = ""
while self.running:
try:
output = self.socket_connection.recv(1024).decode('utf-8')
except Exception as eobj:
print(eobj)
raise CcsException("Communication Problem with Socket")
for item in output.split('\n'):
if re_obj.match(item):
self.java_exceptions.append(item)
if "doneExecution:" + self.thread_id not in output:
sys.stdout.write(output)
sys.stdout.flush()
self.execution_output += output
else:
self.running = False
del self.output_thread
| [
"[email protected]"
] | |
580b18797f6bcd128bf024691e448bb0b188ad18 | 52b5773617a1b972a905de4d692540d26ff74926 | /.history/fish1_20200805130243.py | e999797ad429e042ebb73a7054817607af8ed019 | [] | no_license | MaryanneNjeri/pythonModules | 56f54bf098ae58ea069bf33f11ae94fa8eedcabc | f4e56b1e4dda2349267af634a46f6b9df6686020 | refs/heads/master | 2022-12-16T02:59:19.896129 | 2020-09-11T12:05:22 | 2020-09-11T12:05:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 509 | py | def fish(A,B):
# we place fish moving downwards to a downstream stack
# then when its not empty we'll check with the empty at A[i]
# if it eats that fish we deduct it from the alive fish and del from A
# otherwise we shall pop from the down stream stack
downStream = []
j = 0
aliveFish = len(A)
fishRemoved = 0
for j in range(len(B)):
if B[j] == 0:
while downStream !=[] and :
print(fish([4,3],[0,1]))
# print(fish([4,3,2,1,5],[0,1,0,0,0])) | [
"[email protected]"
] | |
30e1d63614fa8d56d2ca697cb2da652ee3a00995 | e836275adf8adca9b77acdd3d25bac157592a995 | /dyconnmap/cluster/__init__.py | 958f031b8568d142c88bb25e94a002ca0a8d42f5 | [
"BSD-3-Clause"
] | permissive | makism/dyconnmap | 3de6f482d1370bf25ec3813ddf576b675ed99d9e | cbef247e635d55cb1489ba1e429d9d472b501b56 | refs/heads/master | 2023-08-03T19:30:40.779333 | 2022-03-14T18:24:16 | 2022-03-14T18:24:16 | 98,643,787 | 67 | 25 | BSD-3-Clause | 2023-07-24T04:49:03 | 2017-07-28T11:37:17 | Python | UTF-8 | Python | false | false | 485 | py | # -*- coding: utf-8 -*-
"""
"""
# Author: Avraam Marimpis <[email protected]>
from .ng import NeuralGas
from .mng import MergeNeuralGas
from .rng import RelationalNeuralGas
from .gng import GrowingNeuralGas
from .som import SOM
from .umatrix import umatrix
from .validity import ray_turi, davies_bouldin
__all__ = [
"NeuralGas",
"MergeNeuralGas",
"RelationalNeuralGas",
"GrowingNeuralGas",
"SOM",
"umatrix",
"ray_turi",
"davies_bouldin",
]
| [
"[email protected]"
] | |
7533ca90907b697d0dc23a74d914beb543005ff5 | 03f6ad21c4332b9b26dfb11ed04e63bdb9236b3c | /codegen/funcs2_testgen.py | edada136932bf16c484a72dc13c4369ce9f380ad | [
"Apache-2.0"
] | permissive | m1griffin/arrayfunc | ddf9ea9c8fa363f79babd788c8d0428ede8dfc60 | c04561c5d565ae8d3ee776783bfb34b242deca93 | refs/heads/master | 2023-08-05T00:09:27.530893 | 2023-07-19T12:46:37 | 2023-07-19T12:46:37 | 40,577,669 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 103,190 | py | #!/usr/bin/env python3
##############################################################################
# Project: arrayfunc
# Purpose: Generate the unit tests for math functions which use two
# input parameters.
# Language: Python 3.5
# Date: 08-Dec-2017
#
###############################################################################
#
# Copyright 2014 - 2017 Michael Griffin <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
##############################################################################
# ==============================================================================
import itertools
import codegen_common
# ==============================================================================
# ==============================================================================
# This template is for operators which use a second numeric parameter.
test_template = '''
##############################################################################
class %(funclabel)s_general_%(typelabel)s(unittest.TestCase):
"""Test for basic general function operation using numeric
data %(test_op_y)s.
test_template
"""
##############################################################################
def FloatassertEqual(self, expecteditem, dataoutitem, msg=None):
"""This function is patched into assertEqual to allow testing for
the floating point special values NaN, Inf, and -Inf.
"""
# NaN cannot be compared using normal means.
if math.isnan(dataoutitem) and math.isnan(expecteditem):
pass
# Anything else can be compared normally.
else:
if not math.isclose(expecteditem, dataoutitem, rel_tol=0.01, abs_tol=0.0):
raise self.failureException('%%0.3f != %%0.3f' %% (expecteditem, dataoutitem))
########################################################
def setUp(self):
"""Initialise.
"""
self.addTypeEqualityFunc(float, self.FloatassertEqual)
########################################################
def test_%(funclabel)s_basic_array_num_none_a1(self):
"""Test %(funclabel)s as *array-num-none* for basic function - Array code %(typelabel)s.
"""
for testval in [%(test_op_y)s]:
with self.subTest(msg='Failed with parameter', testval = testval):
data1 = array.array('%(typecode)s', [%(test_op_x)s])
expected = [%(pyoperator)s(x, testval) for x in data1]
arrayfunc.%(funcname)s(data1, testval)
for dataoutitem, expecteditem in zip(data1, expected):
# The behavour of assertEqual is modified by addTypeEqualityFunc.
self.assertEqual(dataoutitem, expecteditem)
########################################################
def test_%(funclabel)s_basic_array_num_none_a2(self):
"""Test %(funclabel)s as *array-num-none* for basic function with matherrors=True - Array code %(typelabel)s.
"""
for testval in [%(test_op_y)s]:
with self.subTest(msg='Failed with parameter', testval = testval):
data1 = array.array('%(typecode)s', [%(test_op_x)s])
expected = [%(pyoperator)s(x, testval) for x in data1]
arrayfunc.%(funcname)s(data1, testval, matherrors=True)
for dataoutitem, expecteditem in zip(data1, expected):
# The behavour of assertEqual is modified by addTypeEqualityFunc.
self.assertEqual(dataoutitem, expecteditem)
########################################################
def test_%(funclabel)s_basic_array_num_none_a3(self):
"""Test %(funclabel)s as *array-num-none* for basic function with array limit - Array code %(typelabel)s.
"""
for testval in [%(test_op_y)s]:
with self.subTest(msg='Failed with parameter', testval = testval):
data1 = array.array('%(typecode)s', [%(test_op_x)s])
limited = len(data1) // 2
pydataout = [%(pyoperator)s(x, testval) for x in data1]
expected = pydataout[0:limited] + list(data1)[limited:]
arrayfunc.%(funcname)s(data1, testval, maxlen=limited)
for dataoutitem, expecteditem in zip(data1, expected):
# The behavour of assertEqual is modified by addTypeEqualityFunc.
self.assertEqual(dataoutitem, expecteditem)
########################################################
def test_%(funclabel)s_basic_array_num_none_a4(self):
"""Test %(funclabel)s as *array-num-none* for basic function with matherrors=True and with array limit - Array code %(typelabel)s.
"""
for testval in [%(test_op_y)s]:
with self.subTest(msg='Failed with parameter', testval = testval):
data1 = array.array('%(typecode)s', [%(test_op_x)s])
limited = len(data1) // 2
pydataout = [%(pyoperator)s(x, testval) for x in data1]
expected = pydataout[0:limited] + list(data1)[limited:]
arrayfunc.%(funcname)s(data1, testval, matherrors=True, maxlen=limited)
for dataoutitem, expecteditem in zip(data1, expected):
# The behavour of assertEqual is modified by addTypeEqualityFunc.
self.assertEqual(dataoutitem, expecteditem)
########################################################
def test_%(funclabel)s_basic_array_num_array_b1(self):
"""Test %(funclabel)s as *array-num-array* for basic function - Array code %(typelabel)s.
"""
for testval in [%(test_op_y)s]:
with self.subTest(msg='Failed with parameter', testval = testval):
data1 = array.array('%(typecode)s', [%(test_op_x)s])
dataout = array.array('%(typecode)s', [0]*len(data1))
expected = [%(pyoperator)s(x, testval) for x in data1]
arrayfunc.%(funcname)s(data1, testval, dataout)
for dataoutitem, expecteditem in zip(dataout, expected):
# The behavour of assertEqual is modified by addTypeEqualityFunc.
self.assertEqual(dataoutitem, expecteditem)
########################################################
def test_%(funclabel)s_basic_array_num_array_b2(self):
"""Test %(funclabel)s as *array-num-array* for basic function with matherrors=True - Array code %(typelabel)s.
"""
for testval in [%(test_op_y)s]:
with self.subTest(msg='Failed with parameter', testval = testval):
data1 = array.array('%(typecode)s', [%(test_op_x)s])
dataout = array.array('%(typecode)s', [0]*len(data1))
expected = [%(pyoperator)s(x, testval) for x in data1]
arrayfunc.%(funcname)s(data1, testval, dataout, matherrors=True)
for dataoutitem, expecteditem in zip(dataout, expected):
# The behavour of assertEqual is modified by addTypeEqualityFunc.
self.assertEqual(dataoutitem, expecteditem)
########################################################
def test_%(funclabel)s_basic_array_num_array_b3(self):
"""Test %(funclabel)s as *array-num-array* for basic function with array limit - Array code %(typelabel)s.
"""
for testval in [%(test_op_y)s]:
with self.subTest(msg='Failed with parameter', testval = testval):
data1 = array.array('%(typecode)s', [%(test_op_x)s])
dataout = array.array('%(typecode)s', [0]*len(data1))
limited = len(data1) // 2
pydataout = [%(pyoperator)s(x, testval) for x in data1]
expected = pydataout[0:limited] + list(dataout)[limited:]
arrayfunc.%(funcname)s(data1, testval, dataout, maxlen=limited)
for dataoutitem, expecteditem in zip(dataout, expected):
# The behavour of assertEqual is modified by addTypeEqualityFunc.
self.assertEqual(dataoutitem, expecteditem)
########################################################
def test_%(funclabel)s_basic_array_num_array_b4(self):
"""Test %(funclabel)s as *array-num-array* for basic function with matherrors=True and with array limit - Array code %(typelabel)s.
"""
for testval in [%(test_op_y)s]:
with self.subTest(msg='Failed with parameter', testval = testval):
data1 = array.array('%(typecode)s', [%(test_op_x)s])
dataout = array.array('%(typecode)s', [0]*len(data1))
limited = len(data1) // 2
pydataout = [%(pyoperator)s(x, testval) for x in data1]
expected = pydataout[0:limited] + list(dataout)[limited:]
arrayfunc.%(funcname)s(data1, testval, dataout, matherrors=True, maxlen=limited)
for dataoutitem, expecteditem in zip(dataout, expected):
# The behavour of assertEqual is modified by addTypeEqualityFunc.
self.assertEqual(dataoutitem, expecteditem)
########################################################
def test_%(funclabel)s_basic_num_array_none_c1(self):
"""Test %(funclabel)s as *num-array-none* for basic function - Array code %(typelabel)s.
"""
for testval in [%(test_op_y)s]:
with self.subTest(msg='Failed with parameter', testval = testval):
data1 = array.array('%(typecode)s', [%(test_op_x)s])
expected = [%(pyoperator)s(testval, x) for x in data1]
arrayfunc.%(funcname)s(testval, data1)
for dataoutitem, expecteditem in zip(data1, expected):
# The behavour of assertEqual is modified by addTypeEqualityFunc.
self.assertEqual(dataoutitem, expecteditem)
########################################################
def test_%(funclabel)s_basic_num_array_none_c2(self):
"""Test %(funclabel)s as *num-array-none* for basic function with matherrors=True - Array code %(typelabel)s.
"""
for testval in [%(test_op_y)s]:
with self.subTest(msg='Failed with parameter', testval = testval):
data1 = array.array('%(typecode)s', [%(test_op_x)s])
expected = [%(pyoperator)s(testval, x) for x in data1]
arrayfunc.%(funcname)s(testval, data1, matherrors=True)
for dataoutitem, expecteditem in zip(data1, expected):
# The behavour of assertEqual is modified by addTypeEqualityFunc.
self.assertEqual(dataoutitem, expecteditem)
########################################################
def test_%(funclabel)s_basic_num_array_none_c3(self):
"""Test %(funclabel)s as *num-array-none* for basic function with array limit - Array code %(typelabel)s.
"""
for testval in [%(test_op_y)s]:
with self.subTest(msg='Failed with parameter', testval = testval):
data1 = array.array('%(typecode)s', [%(test_op_x)s])
limited = len(data1) // 2
pydataout = [%(pyoperator)s(testval, x) for x in data1]
expected = pydataout[0:limited] + list(data1)[limited:]
arrayfunc.%(funcname)s(testval, data1, maxlen=limited)
for dataoutitem, expecteditem in zip(data1, expected):
# The behavour of assertEqual is modified by addTypeEqualityFunc.
self.assertEqual(dataoutitem, expecteditem)
########################################################
def test_%(funclabel)s_basic_num_array_none_c4(self):
"""Test %(funclabel)s as *num-array-none* for basic function with matherrors=True and with array limit - Array code %(typelabel)s.
"""
for testval in [%(test_op_y)s]:
with self.subTest(msg='Failed with parameter', testval = testval):
data1 = array.array('%(typecode)s', [%(test_op_x)s])
limited = len(data1) // 2
pydataout = [%(pyoperator)s(testval, x) for x in data1]
expected = pydataout[0:limited] + list(data1)[limited:]
arrayfunc.%(funcname)s(testval, data1, matherrors=True, maxlen=limited)
for dataoutitem, expecteditem in zip(data1, expected):
# The behavour of assertEqual is modified by addTypeEqualityFunc.
self.assertEqual(dataoutitem, expecteditem)
########################################################
def test_%(funclabel)s_basic_num_array_array_d1(self):
"""Test %(funclabel)s as *num-array-array* for basic function - Array code %(typelabel)s.
"""
for testval in [%(test_op_y)s]:
with self.subTest(msg='Failed with parameter', testval = testval):
data1 = array.array('%(typecode)s', [%(test_op_x)s])
dataout = array.array('%(typecode)s', [0]*len(data1))
expected = [%(pyoperator)s(testval, x) for x in data1]
arrayfunc.%(funcname)s(testval, data1, dataout)
for dataoutitem, expecteditem in zip(dataout, expected):
# The behavour of assertEqual is modified by addTypeEqualityFunc.
self.assertEqual(dataoutitem, expecteditem)
########################################################
def test_%(funclabel)s_basic_num_array_array_d2(self):
"""Test %(funclabel)s as *num-array-array* for basic function with matherrors=True - Array code %(typelabel)s.
"""
for testval in [%(test_op_y)s]:
with self.subTest(msg='Failed with parameter', testval = testval):
data1 = array.array('%(typecode)s', [%(test_op_x)s])
dataout = array.array('%(typecode)s', [0]*len(data1))
expected = [%(pyoperator)s(testval, x) for x in data1]
arrayfunc.%(funcname)s(testval, data1, dataout, matherrors=True)
for dataoutitem, expecteditem in zip(dataout, expected):
# The behavour of assertEqual is modified by addTypeEqualityFunc.
self.assertEqual(dataoutitem, expecteditem)
########################################################
def test_%(funclabel)s_basic_num_array_array_d3(self):
"""Test %(funclabel)s as *num-array-array* for basic function with array limit - Array code %(typelabel)s.
"""
for testval in [%(test_op_y)s]:
with self.subTest(msg='Failed with parameter', testval = testval):
data1 = array.array('%(typecode)s', [%(test_op_x)s])
dataout = array.array('%(typecode)s', [0]*len(data1))
limited = len(data1) // 2
pydataout = [%(pyoperator)s(testval, x) for x in data1]
expected = pydataout[0:limited] + list(dataout)[limited:]
arrayfunc.%(funcname)s(testval, data1, dataout, maxlen=limited)
for dataoutitem, expecteditem in zip(dataout, expected):
# The behavour of assertEqual is modified by addTypeEqualityFunc.
self.assertEqual(dataoutitem, expecteditem)
########################################################
def test_%(funclabel)s_basic_num_array_array_d4(self):
"""Test %(funclabel)s as *num-array-array* for basic function with matherrors=True and with array limit - Array code %(typelabel)s.
"""
for testval in [%(test_op_y)s]:
with self.subTest(msg='Failed with parameter', testval = testval):
data1 = array.array('%(typecode)s', [%(test_op_x)s])
dataout = array.array('%(typecode)s', [0]*len(data1))
limited = len(data1) // 2
pydataout = [%(pyoperator)s(testval, x) for x in data1]
expected = pydataout[0:limited] + list(dataout)[limited:]
arrayfunc.%(funcname)s(testval, data1, dataout, matherrors=True, maxlen=limited)
for dataoutitem, expecteditem in zip(dataout, expected):
# The behavour of assertEqual is modified by addTypeEqualityFunc.
self.assertEqual(dataoutitem, expecteditem)
########################################################
def test_%(funclabel)s_basic_array_array_none_e1(self):
"""Test %(funclabel)s as *array-array-none* for basic function - Array code %(typelabel)s.
"""
data1 = array.array('%(typecode)s', [%(test_op_x)s])
data2 = array.array('%(typecode)s', [x for (x,y) in zip(itertools.cycle([%(test_op_y)s]), data1)])
expected = [%(pyoperator)s(x, y) for (x, y) in zip(data1, data2)]
arrayfunc.%(funcname)s(data1, data2)
for dataoutitem, expecteditem in zip(data1, expected):
# The behavour of assertEqual is modified by addTypeEqualityFunc.
self.assertEqual(dataoutitem, expecteditem)
########################################################
def test_%(funclabel)s_basic_array_array_none_e2(self):
"""Test %(funclabel)s as *array-array-none* for basic function with matherrors=True - Array code %(typelabel)s.
"""
data1 = array.array('%(typecode)s', [%(test_op_x)s])
data2 = array.array('%(typecode)s', [x for (x,y) in zip(itertools.cycle([%(test_op_y)s]), data1)])
expected = [%(pyoperator)s(x, y) for (x, y) in zip(data1, data2)]
arrayfunc.%(funcname)s(data1, data2, matherrors=True)
for dataoutitem, expecteditem in zip(data1, expected):
# The behavour of assertEqual is modified by addTypeEqualityFunc.
self.assertEqual(dataoutitem, expecteditem)
########################################################
def test_%(funclabel)s_basic_array_array_none_e3(self):
"""Test %(funclabel)s as *array-array-none* for basic function with array limit - Array code %(typelabel)s.
"""
data1 = array.array('%(typecode)s', [%(test_op_x)s])
data2 = array.array('%(typecode)s', [x for (x,y) in zip(itertools.cycle([%(test_op_y)s]), data1)])
limited = len(data1) // 2
pydataout = [%(pyoperator)s(x, y) for (x, y) in zip(data1, data2)]
expected = pydataout[0:limited] + list(data1)[limited:]
arrayfunc.%(funcname)s(data1, data2, maxlen=limited)
for dataoutitem, expecteditem in zip(data1, expected):
# The behavour of assertEqual is modified by addTypeEqualityFunc.
self.assertEqual(dataoutitem, expecteditem)
########################################################
def test_%(funclabel)s_basic_array_array_none_e4(self):
"""Test %(funclabel)s as *array-array-none* for basic function with matherrors=True and with array limit - Array code %(typelabel)s.
"""
data1 = array.array('%(typecode)s', [%(test_op_x)s])
data2 = array.array('%(typecode)s', [x for (x,y) in zip(itertools.cycle([%(test_op_y)s]), data1)])
limited = len(data1) // 2
pydataout = [%(pyoperator)s(x, y) for (x, y) in zip(data1, data2)]
expected = pydataout[0:limited] + list(data1)[limited:]
arrayfunc.%(funcname)s(data1, data2, matherrors=True, maxlen=limited)
for dataoutitem, expecteditem in zip(data1, expected):
# The behavour of assertEqual is modified by addTypeEqualityFunc.
self.assertEqual(dataoutitem, expecteditem)
########################################################
def test_%(funclabel)s_basic_array_array_array_e5(self):
"""Test %(funclabel)s as *array-array-array* for basic function - Array code %(typelabel)s.
"""
data1 = array.array('%(typecode)s', [%(test_op_x)s])
data2 = array.array('%(typecode)s', [x for (x,y) in zip(itertools.cycle([%(test_op_y)s]), data1)])
dataout = array.array('%(typecode)s', [0]*len(data1))
expected = [%(pyoperator)s(x, y) for (x, y) in zip(data1, data2)]
arrayfunc.%(funcname)s(data1, data2, dataout)
for dataoutitem, expecteditem in zip(dataout, expected):
# The behavour of assertEqual is modified by addTypeEqualityFunc.
self.assertEqual(dataoutitem, expecteditem)
########################################################
def test_%(funclabel)s_basic_array_array_array_e6(self):
"""Test %(funclabel)s as *array-array-array* for basic function - Array code %(typelabel)s.
"""
data1 = array.array('%(typecode)s', [%(test_op_x)s])
data2 = array.array('%(typecode)s', [x for (x,y) in zip(itertools.cycle([%(test_op_y)s]), data1)])
dataout = array.array('%(typecode)s', [0]*len(data1))
expected = [%(pyoperator)s(x, y) for (x, y) in zip(data1, data2)]
arrayfunc.%(funcname)s(data1, data2, dataout, matherrors=True)
for dataoutitem, expecteditem in zip(dataout, expected):
# The behavour of assertEqual is modified by addTypeEqualityFunc.
self.assertEqual(dataoutitem, expecteditem)
########################################################
def test_%(funclabel)s_basic_array_array_array_e7(self):
"""Test %(funclabel)s as *array-array-array* for basic function - Array code %(typelabel)s.
"""
data1 = array.array('%(typecode)s', [%(test_op_x)s])
data2 = array.array('%(typecode)s', [x for (x,y) in zip(itertools.cycle([%(test_op_y)s]), data1)])
dataout = array.array('%(typecode)s', [0]*len(data1))
limited = len(data1) // 2
pydataout = [%(pyoperator)s(x, y) for (x, y) in zip(data1, data2)]
expected = pydataout[0:limited] + list(dataout)[limited:]
arrayfunc.%(funcname)s(data1, data2, dataout, matherrors=True, maxlen=limited)
for dataoutitem, expecteditem in zip(dataout, expected):
# The behavour of assertEqual is modified by addTypeEqualityFunc.
self.assertEqual(dataoutitem, expecteditem)
##############################################################################
'''
# ==============================================================================
# The template used to generate the tests for testing invalid parameter types.
param_invalid_template = '''
##############################################################################
class %(funclabel)s_param_errors_%(typelabel)s(unittest.TestCase):
"""Test for invalid parameters.
param_invalid_template
"""
########################################################
def setUp(self):
"""Initialise.
"""
self.floatarray1 = array.array('%(typecode)s', [%(test_op_x)s])
self.floatarray2 = array.array('%(typecode)s', [x for (x,y) in zip(itertools.cycle([%(test_op_y)s]), self.floatarray1)])
arraysize = len(self.floatarray1)
self.dataout = array.array('%(typecode)s', itertools.repeat(0.0, arraysize))
# Create some integer array equivalents.
self.intarray1 = array.array('i', [int(x) for x in self.floatarray1])
self.intarray2 = array.array('i', [int(x) for x in self.floatarray2])
self.intdataout = array.array('i', [int(x) for x in self.dataout])
########################################################
def test_%(funclabel)s_array_num_none_a1(self):
"""Test %(funclabel)s as *array-num-none* for integer array - Array code %(typelabel)s.
"""
for testfloat, testint in zip(self.floatarray2, self.intarray2):
with self.subTest(msg='Failed with parameter', testfloat = testfloat):
# Copy the array so we don't change the original data.
floatarray1 = copy.copy(self.floatarray1)
intarray1 = copy.copy(self.intarray1)
# This version is expected to pass.
arrayfunc.%(funcname)s(floatarray1, testfloat)
# This is the actual test.
with self.assertRaises(TypeError):
arrayfunc.%(funcname)s(intarray1, testfloat)
########################################################
def test_%(funclabel)s_array_num_none_a2(self):
"""Test %(funclabel)s as *array-num-none* for integer number - Array code %(typelabel)s.
"""
for testfloat, testint in zip(self.floatarray2, self.intarray2):
with self.subTest(msg='Failed with parameter', testfloat = testfloat):
# Copy the array so we don't change the original data.
floatarray1 = copy.copy(self.floatarray1)
# This version is expected to pass.
arrayfunc.%(funcname)s(floatarray1, testfloat)
floatarray1 = copy.copy(self.floatarray1)
# This is the actual test.
with self.assertRaises(TypeError):
arrayfunc.%(funcname)s(floatarray1, testint)
########################################################
def test_%(funclabel)s_array_num_none_a3(self):
"""Test %(funclabel)s as *array-num-none* for integer number and array - Array code %(typelabel)s.
"""
for testfloat, testint in zip(self.floatarray2, self.intarray2):
with self.subTest(msg='Failed with parameter', testfloat = testfloat):
# Copy the array so we don't change the original data.
floatarray1 = copy.copy(self.floatarray1)
# This version is expected to pass.
arrayfunc.%(funcname)s(floatarray1, testfloat)
intarray1 = copy.copy(self.intarray1)
# This is the actual test.
with self.assertRaises(TypeError):
arrayfunc.%(funcname)s(intarray1, testint)
########################################################
def test_%(funclabel)s_array_num_none_a4(self):
"""Test %(funclabel)s as *array-num-none* for matherrors='a' - Array code %(typelabel)s.
"""
# Copy the array so we don't change the original data.
floatarray1 = copy.copy(self.floatarray1)
testfloat = self.floatarray2[0]
# This version is expected to pass.
arrayfunc.%(funcname)s(floatarray1, testfloat, matherrors=True)
floatarray1 = copy.copy(self.floatarray1)
# This is the actual test.
with self.assertRaises(TypeError):
arrayfunc.%(funcname)s(floatarray1, testfloat, matherrors='a')
########################################################
def test_%(funclabel)s_array_num_none_a5(self):
"""Test %(funclabel)s as *array-num-none* for maxlen='a' - Array code %(typelabel)s.
"""
# Copy the array so we don't change the original data.
floatarray1 = copy.copy(self.floatarray1)
testfloat = self.floatarray2[0]
testmaxlen = len(floatarray1) // 2
# This version is expected to pass.
arrayfunc.%(funcname)s(floatarray1, testfloat, maxlen=testmaxlen)
floatarray1 = copy.copy(self.floatarray1)
# This is the actual test.
with self.assertRaises(TypeError):
arrayfunc.%(funcname)s(floatarray1, testfloat, maxlen='a')
########################################################
def test_%(funclabel)s_array_num_array_b1(self):
"""Test %(funclabel)s as *array-num-array* for integer array - Array code %(typelabel)s.
"""
for testfloat, testint in zip(self.floatarray2, self.intarray2):
with self.subTest(msg='Failed with parameter', testfloat = testfloat):
# Copy the array so we don't change the original data.
floatarray1 = copy.copy(self.floatarray1)
intarray1 = copy.copy(self.intarray1)
# This version is expected to pass.
arrayfunc.%(funcname)s(floatarray1, testfloat, self.dataout)
# This is the actual test.
with self.assertRaises(TypeError):
arrayfunc.%(funcname)s(intarray1, testfloat, self.dataout)
########################################################
def test_%(funclabel)s_array_num_array_b2(self):
"""Test %(funclabel)s as *array-num-array* for integer number - Array code %(typelabel)s.
"""
for testfloat, testint in zip(self.floatarray2, self.intarray2):
with self.subTest(msg='Failed with parameter', testfloat = testfloat):
# Copy the array so we don't change the original data.
intarray1 = copy.copy(self.intarray1)
# This version is expected to pass.
arrayfunc.%(funcname)s(self.floatarray1, testfloat, self.dataout)
# This is the actual test.
with self.assertRaises(TypeError):
arrayfunc.%(funcname)s(self.floatarray1, testint, self.dataout)
########################################################
def test_%(funclabel)s_array_num_array_b3(self):
"""Test %(funclabel)s as *array-num-array* for integer output array - Array code %(typelabel)s.
"""
for testfloat in self.floatarray2:
with self.subTest(msg='Failed with parameter', testfloat = testfloat):
# Copy the array so we don't change the original data.
floatarray1 = copy.copy(self.floatarray1)
# This version is expected to pass.
arrayfunc.%(funcname)s(floatarray1, testfloat, self.dataout)
# This is the actual test.
with self.assertRaises(TypeError):
arrayfunc.%(funcname)s(floatarray1, testfloat, self.intdataout)
########################################################
def test_%(funclabel)s_array_num_array_b4(self):
"""Test %(funclabel)s as *array-num-array* for integer number and array - Array code %(typelabel)s.
"""
for testfloat, testint in zip(self.floatarray2, self.intarray2):
with self.subTest(msg='Failed with parameter', testfloat = testfloat):
# This version is expected to pass.
arrayfunc.%(funcname)s(self.floatarray1, testfloat, self.dataout)
# This is the actual test.
with self.assertRaises(TypeError):
arrayfunc.%(funcname)s(self.intarray1, testint, self.intdataout)
########################################################
def test_%(funclabel)s_array_num_array_b5(self):
"""Test %(funclabel)s as *array-num-array* for matherrors='a' - Array code %(typelabel)s.
"""
# Copy the array so we don't change the original data.
floatarray1 = copy.copy(self.floatarray1)
testfloat = self.floatarray2[0]
# This version is expected to pass.
arrayfunc.%(funcname)s(floatarray1, testfloat, self.dataout, matherrors=True)
floatarray1 = copy.copy(self.floatarray1)
# This is the actual test.
with self.assertRaises(TypeError):
arrayfunc.%(funcname)s(floatarray1, testfloat, self.dataout, matherrors='a')
########################################################
def test_%(funclabel)s_array_num_array_b6(self):
"""Test %(funclabel)s as *array-num-array* for maxlen='a' - Array code %(typelabel)s.
"""
# Copy the array so we don't change the original data.
floatarray1 = copy.copy(self.floatarray1)
testfloat = self.floatarray2[0]
testmaxlen = len(floatarray1) // 2
# This version is expected to pass.
arrayfunc.%(funcname)s(floatarray1, testfloat, self.dataout, maxlen=testmaxlen)
floatarray1 = copy.copy(self.floatarray1)
# This is the actual test.
with self.assertRaises(TypeError):
arrayfunc.%(funcname)s(floatarray1, testfloat, self.dataout, maxlen='a')
########################################################
def test_%(funclabel)s_num_array_none_c1(self):
"""Test %(funclabel)s as *num-array-none* for integer array - Array code %(typelabel)s.
"""
for testfloat, testint in zip(self.floatarray2, self.intarray2):
with self.subTest(msg='Failed with parameter', testfloat = testfloat):
# Copy the array so we don't change the original data.
floatarray1 = copy.copy(self.floatarray1)
intarray1 = copy.copy(self.intarray1)
# This version is expected to pass.
arrayfunc.%(funcname)s(testfloat, floatarray1)
# This is the actual test.
with self.assertRaises(TypeError):
arrayfunc.%(funcname)s(testfloat, intarray1)
########################################################
def test_%(funclabel)s_num_array_none_c2(self):
"""Test %(funclabel)s as *num-array-none* for integer number - Array code %(typelabel)s.
"""
for testfloat, testint in zip(self.floatarray2, self.intarray2):
with self.subTest(msg='Failed with parameter', testfloat = testfloat):
# Copy the array so we don't change the original data.
floatarray1 = copy.copy(self.floatarray1)
# This version is expected to pass.
arrayfunc.%(funcname)s(testfloat, floatarray1)
floatarray1 = copy.copy(self.floatarray1)
# This is the actual test.
with self.assertRaises(TypeError):
arrayfunc.%(funcname)s(testint, floatarray1)
########################################################
def test_%(funclabel)s_num_array_none_c3(self):
"""Test %(funclabel)s as *num-array-none* for integer number and array - Array code %(typelabel)s.
"""
for testfloat, testint in zip(self.floatarray2, self.intarray2):
with self.subTest(msg='Failed with parameter', testfloat = testfloat):
# Copy the array so we don't change the original data.
floatarray1 = copy.copy(self.floatarray1)
intarray1 = copy.copy(self.intarray1)
# This version is expected to pass.
arrayfunc.%(funcname)s(testfloat, floatarray1)
# This is the actual test.
with self.assertRaises(TypeError):
arrayfunc.%(funcname)s(testint, intarray1)
########################################################
def test_%(funclabel)s_num_array_none_c4(self):
"""Test %(funclabel)s as *num-array-none* for matherrors='a' - Array code %(typelabel)s.
"""
# Copy the array so we don't change the original data.
floatarray1 = copy.copy(self.floatarray1)
testfloat = self.floatarray2[0]
# This version is expected to pass.
arrayfunc.%(funcname)s(testfloat, floatarray1, matherrors=True)
floatarray1 = copy.copy(self.floatarray1)
# This is the actual test.
with self.assertRaises(TypeError):
arrayfunc.%(funcname)s(testfloat, floatarray1, matherrors='a')
########################################################
def test_%(funclabel)s_num_array_none_c5(self):
"""Test %(funclabel)s as *num-array-none* for maxlen='a' - Array code %(typelabel)s.
"""
# Copy the array so we don't change the original data.
floatarray1 = copy.copy(self.floatarray1)
testfloat = self.floatarray2[0]
testmaxlen = len(floatarray1) // 2
# This version is expected to pass.
arrayfunc.%(funcname)s(testfloat, floatarray1, maxlen=testmaxlen)
floatarray1 = copy.copy(self.floatarray1)
# This is the actual test.
with self.assertRaises(TypeError):
arrayfunc.%(funcname)s(testfloat, floatarray1, maxlen='a')
########################################################
def test_%(funclabel)s_num_array_array_d1(self):
"""Test %(funclabel)s as *num-array-array* for integer array - Array code %(typelabel)s.
"""
for testfloat, testint in zip(self.floatarray2, self.intarray2):
with self.subTest(msg='Failed with parameter', testfloat = testfloat):
# This version is expected to pass.
arrayfunc.%(funcname)s(testfloat, self.floatarray1, self.dataout)
# This is the actual test.
with self.assertRaises(TypeError):
arrayfunc.%(funcname)s(testfloat, self.intarray1, self.dataout)
########################################################
def test_%(funclabel)s_num_array_array_d2(self):
"""Test %(funclabel)s as *num-array-array* for integer number - Array code %(typelabel)s.
"""
for testfloat, testint in zip(self.floatarray2, self.intarray2):
with self.subTest(msg='Failed with parameter', testfloat = testfloat):
# This version is expected to pass.
arrayfunc.%(funcname)s(testfloat, self.floatarray1, self.dataout)
# This is the actual test.
with self.assertRaises(TypeError):
arrayfunc.%(funcname)s(testfloat, self.intarray1, self.dataout)
########################################################
def test_%(funclabel)s_num_array_array_d3(self):
"""Test %(funclabel)s as *num-array-array* for integer output array - Array code %(typelabel)s.
"""
for testfloat in self.floatarray2:
with self.subTest(msg='Failed with parameter', testfloat = testfloat):
# This version is expected to pass.
arrayfunc.%(funcname)s(testfloat, self.floatarray1, self.dataout)
# This is the actual test.
with self.assertRaises(TypeError):
arrayfunc.%(funcname)s(testfloat, self.floatarray1, self.intdataout)
########################################################
def test_%(funclabel)s_num_array_array_d4(self):
"""Test %(funclabel)s as *num-array-array* for integer number and array - Array code %(typelabel)s.
"""
for testfloat, testint in zip(self.floatarray2, self.intarray2):
with self.subTest(msg='Failed with parameter', testfloat = testfloat):
# This version is expected to pass.
arrayfunc.%(funcname)s(testfloat, self.floatarray1, self.dataout)
# This is the actual test.
with self.assertRaises(TypeError):
arrayfunc.%(funcname)s(testint, self.intarray1, self.intdataout)
########################################################
def test_%(funclabel)s_num_array_array_d5(self):
"""Test %(funclabel)s as *num-array-array* for matherrors='a' - Array code %(typelabel)s.
"""
testfloat = self.floatarray2[0]
# This version is expected to pass.
arrayfunc.%(funcname)s(testfloat, self.floatarray1, self.dataout, matherrors=True)
# This is the actual test.
with self.assertRaises(TypeError):
arrayfunc.%(funcname)s(testfloat, self.intarray1, self.dataout, matherrors='a')
########################################################
def test_%(funclabel)s_num_array_array_d6(self):
"""Test %(funclabel)s as *num-array-array* for maxlen='a' - Array code %(typelabel)s.
"""
testfloat = self.floatarray2[0]
testmaxlen = len(self.floatarray1) // 2
# This version is expected to pass.
arrayfunc.%(funcname)s(testfloat, self.floatarray1, self.dataout, maxlen=testmaxlen)
# This is the actual test.
with self.assertRaises(TypeError):
arrayfunc.%(funcname)s(testfloat, self.intarray1, self.dataout, maxlen='a')
########################################################
def test_%(funclabel)s_array_array_none_e1(self):
"""Test %(funclabel)s as *array-array-none* for integer array - Array code %(typelabel)s.
"""
# Copy the array so we don't change the original data.
floatarray1 = copy.copy(self.floatarray1)
# This version is expected to pass.
arrayfunc.%(funcname)s(floatarray1, self.floatarray2)
# Copy the array so we don't change the original data.
floatarray1 = copy.copy(self.floatarray1)
# This is the actual test.
with self.assertRaises(TypeError):
arrayfunc.%(funcname)s(floatarray1, self.intarray2)
########################################################
def test_%(funclabel)s_array_array_none_e2(self):
"""Test %(funclabel)s as *array-array-none* for integer array - Array code %(typelabel)s.
"""
# This version is expected to pass.
arrayfunc.%(funcname)s(self.floatarray1, self.floatarray2)
# This is the actual test.
with self.assertRaises(TypeError):
arrayfunc.%(funcname)s(self.intarray1, self.floatarray2)
########################################################
def test_%(funclabel)s_array_array_none_e3(self):
"""Test %(funclabel)s as *array-array-none* for all integer array - Array code %(typelabel)s.
"""
# This version is expected to pass.
arrayfunc.%(funcname)s(self.floatarray1, self.floatarray2)
# This is the actual test.
with self.assertRaises(TypeError):
arrayfunc.%(funcname)s(self.intarray1, self.intarray2)
########################################################
def test_%(funclabel)s_array_array_none_e4(self):
"""Test %(funclabel)s as *array-array-none* for matherrors='a' - Array code %(typelabel)s.
"""
# Copy the array so we don't change the original data.
floatarray1 = copy.copy(self.floatarray1)
# This version is expected to pass.
arrayfunc.%(funcname)s(floatarray1, self.floatarray2, matherrors=True)
# Copy the array so we don't change the original data.
floatarray1 = copy.copy(self.floatarray1)
# This is the actual test.
with self.assertRaises(TypeError):
arrayfunc.%(funcname)s(floatarray1, self.floatarray2, matherrors='a')
########################################################
def test_%(funclabel)s_array_array_none_e5(self):
"""Test %(funclabel)s as *array-array-none* for maxlen='a' - Array code %(typelabel)s.
"""
# Copy the array so we don't change the original data.
floatarray1 = copy.copy(self.floatarray1)
testmaxlen = len(floatarray1) // 2
# This version is expected to pass.
arrayfunc.%(funcname)s(floatarray1, self.floatarray2, maxlen=testmaxlen)
# Copy the array so we don't change the original data.
floatarray1 = copy.copy(self.floatarray1)
# This is the actual test.
with self.assertRaises(TypeError):
arrayfunc.%(funcname)s(floatarray1, self.floatarray2, maxlen='a')
########################################################
def test_%(funclabel)s_array_array_array_f1(self):
"""Test %(funclabel)s as *array-array-array* for integer array - Array code %(typelabel)s.
"""
# This version is expected to pass.
arrayfunc.%(funcname)s(self.floatarray1, self.floatarray2, self.dataout)
# This is the actual test.
with self.assertRaises(TypeError):
arrayfunc.%(funcname)s(self.floatarray1, self.intarray2, self.dataout)
########################################################
def test_%(funclabel)s_array_array_array_f2(self):
"""Test %(funclabel)s as *array-array-array* for integer array - Array code %(typelabel)s.
"""
# This version is expected to pass.
arrayfunc.%(funcname)s(self.floatarray1, self.floatarray2, self.dataout)
# This is the actual test.
with self.assertRaises(TypeError):
arrayfunc.%(funcname)s(self.intarray1, self.floatarray2, self.dataout)
########################################################
def test_%(funclabel)s_array_array_array_f3(self):
"""Test %(funclabel)s as *array-array-array* for integer output array - Array code %(typelabel)s.
"""
# This version is expected to pass.
arrayfunc.%(funcname)s(self.floatarray1, self.floatarray2, self.dataout)
# This is the actual test.
with self.assertRaises(TypeError):
arrayfunc.%(funcname)s(self.floatarray1, self.floatarray2, self.intdataout)
########################################################
def test_%(funclabel)s_array_array_array_f4(self):
"""Test %(funclabel)s as *array-array-array* for all integer array - Array code %(typelabel)s.
"""
# This version is expected to pass.
arrayfunc.%(funcname)s(self.floatarray1, self.floatarray2, self.dataout)
# This is the actual test.
with self.assertRaises(TypeError):
arrayfunc.%(funcname)s(self.intarray1, self.intarray2, self.intdataout)
########################################################
def test_%(funclabel)s_array_array_array_f5(self):
"""Test %(funclabel)s as *array-array-array* for matherrors='a' - Array code %(typelabel)s.
"""
# This version is expected to pass.
arrayfunc.%(funcname)s(self.floatarray1, self.floatarray2, self.dataout, matherrors=True)
# This is the actual test.
with self.assertRaises(TypeError):
arrayfunc.%(funcname)s(self.floatarray1, self.floatarray2, self.dataout, matherrors='a')
########################################################
def test_%(funclabel)s_array_array_array_f6(self):
"""Test %(funclabel)s as *array-array-array* for maxlen='a' - Array code %(typelabel)s.
"""
testmaxlen = len(self.floatarray1) // 2
# This version is expected to pass.
arrayfunc.%(funcname)s(self.floatarray1, self.floatarray2, self.dataout, maxlen=testmaxlen)
# This is the actual test.
with self.assertRaises(TypeError):
arrayfunc.%(funcname)s(self.floatarray1, self.floatarray2, self.dataout, maxlen='a')
########################################################
def test_%(funclabel)s_no_params_g1(self):
"""Test %(funclabel)s with no parameters - Array code %(typelabel)s.
"""
with self.assertRaises(TypeError):
arrayfunc.%(funcname)s()
##############################################################################
'''
# ==============================================================================
# The template used to generate the tests for nan, inf, -inf in data arrays
# when exceptions are expected.
nan_data_error_template = '''
##############################################################################
class %(funclabel)s_%(errorlabel)s_errors_%(typelabel)s(unittest.TestCase):
"""Test for basic general function operation using parameter %(errordata)s.
nan_data_error_template
"""
##############################################################################
def FloatassertEqual(self, expecteditem, dataoutitem, msg=None):
"""This function is patched into assertEqual to allow testing for
the floating point special values NaN, Inf, and -Inf.
"""
# NaN cannot be compared using normal means.
if math.isnan(dataoutitem) and math.isnan(expecteditem):
pass
# Anything else can be compared normally.
else:
if not math.isclose(expecteditem, dataoutitem, rel_tol=0.01, abs_tol=0.0):
raise self.failureException('%%0.3f != %%0.3f' %% (expecteditem, dataoutitem))
########################################################
def PyOp(self, x, y, default):
"""Handle exceptions due to math domain errors when calling the math
library function. If an exception occurs, return the default value
instead.
"""
try:
return %(pyoperator)s(x, y)
except:
return default
########################################################
def setUp(self):
"""Initialise.
"""
self.addTypeEqualityFunc(float, self.FloatassertEqual)
self.dataok1 = array.array('%(typecode)s', [%(test_op_x)s])
self.dataok2 = array.array('%(typecode)s', [x for (x,y) in zip(itertools.cycle([%(test_op_y)s]), self.dataok1)])
arraysize = len(self.dataok1)
self.dataout = array.array('%(typecode)s', itertools.repeat(0.0, arraysize))
self.errordata = array.array('%(typecode)s', [float('%(errordata)s')] * arraysize)
self.expectedep = [self.PyOp(x, y, float('%(test_nan_default)s')) for x,y in zip(self.errordata, self.dataok2)]
self.expectedpe = [self.PyOp(y, x, float('%(test_nan_default)s')) for x,y in zip(self.errordata, self.dataok1)]
########################################################
def test_%(funclabel)s_%(errorlabel)s_array_num_none_a1(self):
"""Test %(funclabel)s as *array-num-none* for %(errordata)s - Array code %(typelabel)s.
"""
for testval in [%(test_op_y)s]:
with self.subTest(msg='Failed with parameter', testval = testval):
# Copy the array so we don't change the original data.
dataok1 = copy.copy(self.dataok1)
errordata = copy.copy(self.errordata)
# This version is expected to pass.
arrayfunc.%(funcname)s(dataok1, testval)
# This is the actual test.
with self.assertRaises(ArithmeticError):
arrayfunc.%(funcname)s(errordata, testval)
########################################################
def test_%(funclabel)s_%(errorlabel)s_array_num_none_a2(self):
"""Test %(funclabel)s as *array-num-none* for %(errordata)s with error check off - Array code %(typelabel)s.
"""
for testval in [%(test_op_y)s]:
with self.subTest(msg='Failed with parameter', testval = testval):
# Copy the array so we don't change the original data.
errordata = copy.copy(self.errordata)
expectedep = [self.PyOp(x, testval, float('%(test_nan_default)s')) for x in self.errordata]
arrayfunc.%(funcname)s(errordata, testval, matherrors=True)
for dataoutitem, expecteditem in zip(errordata, expectedep):
# The behavour of assertEqual is modified by addTypeEqualityFunc.
self.assertEqual(dataoutitem, expecteditem)
########################################################
def test_%(funclabel)s_%(errorlabel)s_array_num_array_b1(self):
"""Test %(funclabel)s as *array-num-array* for %(errordata)s - Array code %(typelabel)s.
"""
for testval in [%(test_op_y)s]:
with self.subTest(msg='Failed with parameter', testval = testval):
# Copy the array so we don't change the original data.
dataok1 = copy.copy(self.dataok1)
errordata = copy.copy(self.errordata)
# This version is expected to pass.
arrayfunc.%(funcname)s(dataok1, testval, self.dataout)
# This is the actual test.
with self.assertRaises(ArithmeticError):
arrayfunc.%(funcname)s(errordata, testval, self.dataout)
########################################################
def test_%(funclabel)s_%(errorlabel)s_array_num_array_b2(self):
"""Test %(funclabel)s as *array-num-array* for %(errordata)s with error check off - Array code %(typelabel)s.
"""
for testval in [%(test_op_y)s]:
with self.subTest(msg='Failed with parameter', testval = testval):
expectedep = [self.PyOp(x, testval, float('%(test_nan_default)s')) for x in self.errordata]
arrayfunc.%(funcname)s(self.errordata, testval, self.dataout, matherrors=True)
for dataoutitem, expecteditem in zip(self.dataout, expectedep):
# The behavour of assertEqual is modified by addTypeEqualityFunc.
self.assertEqual(dataoutitem, expecteditem)
########################################################
def test_%(funclabel)s_%(errorlabel)s_num_array_none_c1(self):
"""Test %(funclabel)s as *num-array-none* for %(errordata)s - Array code %(typelabel)s.
"""
for testval in [%(test_op_y)s]:
with self.subTest(msg='Failed with parameter', testval = testval):
# Copy the array so we don't change the original data.
dataok1 = copy.copy(self.dataok1)
errordata = copy.copy(self.errordata)
# This version is expected to pass.
arrayfunc.%(funcname)s(testval, dataok1)
# This is the actual test.
with self.assertRaises(ArithmeticError):
arrayfunc.%(funcname)s(testval, errordata)
########################################################
def test_%(funclabel)s_%(errorlabel)s_num_array_none_c2(self):
"""Test %(funclabel)s as *num-array-none* for %(errordata)s with error check off - Array code %(typelabel)s.
"""
for testval in [%(test_op_x)s]:
with self.subTest(msg='Failed with parameter', testval = testval):
# Copy the array so we don't change the original data.
errordata = copy.copy(self.errordata)
expectedpe = [self.PyOp(testval, x, float('%(test_nan_default)s')) for x in self.errordata]
arrayfunc.%(funcname)s(testval, errordata, matherrors=True)
for dataoutitem, expecteditem in zip(errordata, expectedpe):
# The behavour of assertEqual is modified by addTypeEqualityFunc.
self.assertEqual(dataoutitem, expecteditem)
########################################################
def test_%(funclabel)s_%(errorlabel)s_num_array_array_d1(self):
"""Test %(funclabel)s as *num-array-array* for %(errordata)s - Array code %(typelabel)s.
"""
for testval in [%(test_op_y)s]:
with self.subTest(msg='Failed with parameter', testval = testval):
# This version is expected to pass.
arrayfunc.%(funcname)s(testval, self.dataok1, self.dataout)
# This is the actual test.
with self.assertRaises(ArithmeticError):
arrayfunc.%(funcname)s(testval, self.errordata, self.dataout)
########################################################
def test_%(funclabel)s_%(errorlabel)s_num_array_array_d2(self):
"""Test %(funclabel)s as *num-array-array* for %(errordata)s with error check off - Array code %(typelabel)s.
"""
for testval in [%(test_op_x)s]:
with self.subTest(msg='Failed with parameter', testval = testval):
expectedpe = [self.PyOp(testval, x, float('%(test_nan_default)s')) for x in self.errordata]
arrayfunc.%(funcname)s(testval, self.errordata, self.dataout, matherrors=True)
for dataoutitem, expecteditem in zip(self.dataout, expectedpe):
# The behavour of assertEqual is modified by addTypeEqualityFunc.
self.assertEqual(dataoutitem, expecteditem)
########################################################
def test_%(funclabel)s_%(errorlabel)s_array_array_none_e1(self):
"""Test %(funclabel)s as *array-array-none* for %(errordata)s - Array code %(typelabel)s.
"""
# Copy the array so we don't change the original data.
dataok1 = copy.copy(self.dataok1)
dataok2 = copy.copy(self.dataok2)
# This version is expected to pass.
arrayfunc.%(funcname)s(dataok1, dataok2)
# Copy the array so we don't change the original data.
dataok1 = copy.copy(self.dataok1)
# This is the actual test.
with self.assertRaises(ArithmeticError):
arrayfunc.%(funcname)s(dataok1, self.errordata)
########################################################
def test_%(funclabel)s_%(errorlabel)s_array_array_none_e2(self):
"""Test %(funclabel)s as *array-array-none* for %(errordata)s with error check off - Array code %(typelabel)s.
"""
arrayfunc.%(funcname)s(self.dataok1, self.errordata, matherrors=True)
for dataoutitem, expecteditem in zip(self.dataok1, self.expectedpe):
# The behavour of assertEqual is modified by addTypeEqualityFunc.
self.assertEqual(dataoutitem, expecteditem)
########################################################
def test_%(funclabel)s_%(errorlabel)s_array_array_array_f1(self):
"""Test %(funclabel)s as *array-array-array* for %(errordata)s - Array code %(typelabel)s.
"""
# This version is expected to pass.
arrayfunc.%(funcname)s(self.dataok1, self.dataok2, self.dataout)
# This is the actual test.
with self.assertRaises(ArithmeticError):
arrayfunc.%(funcname)s(self.dataok1, self.errordata, self.dataout)
########################################################
def test_%(funclabel)s_%(errorlabel)s_array_array_array_f2(self):
"""Test %(funclabel)s as *array-array-array* for %(errordata)s with error check off - Array code %(typelabel)s.
"""
arrayfunc.%(funcname)s(self.dataok1, self.errordata, self.dataout, matherrors=True)
for dataoutitem, expecteditem in zip(self.dataout, self.expectedpe):
# The behavour of assertEqual is modified by addTypeEqualityFunc.
self.assertEqual(dataoutitem, expecteditem)
##############################################################################
'''
# ==============================================================================
# The template used to generate the tests for nan, inf, -inf in data arrays
# when exceptions are not expected.
nan_data_noerror_template = '''
##############################################################################
class %(funclabel)s_%(errorlabel)s_noerrors_%(typelabel)s(unittest.TestCase):
"""Test for basic general function operation using parameter %(errordata)s.
nan_data_noerror_template
"""
##############################################################################
def FloatassertEqual(self, expecteditem, dataoutitem, msg=None):
"""This function is patched into assertEqual to allow testing for
the floating point special values NaN, Inf, and -Inf.
"""
# NaN cannot be compared using normal means.
if math.isnan(dataoutitem) and math.isnan(expecteditem):
pass
# Anything else can be compared normally.
else:
if not math.isclose(expecteditem, dataoutitem, rel_tol=0.01, abs_tol=0.0):
raise self.failureException('%%0.3f != %%0.3f' %% (expecteditem, dataoutitem))
########################################################
def PyOp(self, x, y, default):
"""Handle exceptions due to math domain errors when calling the math
library function. If an exception occurs, return the default value
instead.
"""
try:
return %(pyoperator)s(x, y)
except:
return default
########################################################
def setUp(self):
"""Initialise.
"""
self.addTypeEqualityFunc(float, self.FloatassertEqual)
self.dataok1 = array.array('%(typecode)s', [%(test_op_x)s])
self.dataok2 = array.array('%(typecode)s', [x for (x,y) in zip(itertools.cycle([%(test_op_y)s]), self.dataok1)])
arraysize = len(self.dataok1)
self.dataout = array.array('%(typecode)s', itertools.repeat(0.0, arraysize))
self.errordata = array.array('%(typecode)s', [float('%(errordata)s')] * arraysize)
self.expectedep = [self.PyOp(x, y, float('%(test_nan_default)s')) for x,y in zip(self.errordata, self.dataok2)]
self.expectedpe = [self.PyOp(y, x, float('%(test_nan_default)s')) for x,y in zip(self.errordata, self.dataok1)]
########################################################
def test_%(funclabel)s_%(errorlabel)s_array_num_none_a1(self):
"""Test %(funclabel)s as *array-num-none* for %(errordata)s - Array code %(typelabel)s.
"""
for testval in [%(test_op_y)s]:
with self.subTest(msg='Failed with parameter', testval = testval):
# Copy the array so we don't change the original data.
errordata = copy.copy(self.errordata)
arrayfunc.%(funcname)s(errordata, testval)
for dataoutitem, expecteditem in zip(errordata, self.expectedep):
# The behavour of assertEqual is modified by addTypeEqualityFunc.
self.assertEqual(dataoutitem, expecteditem)
########################################################
def test_%(funclabel)s_%(errorlabel)s_array_num_none_a2(self):
"""Test %(funclabel)s as *array-num-none* for %(errordata)s with error check off - Array code %(typelabel)s.
"""
for testval in [%(test_op_y)s]:
with self.subTest(msg='Failed with parameter', testval = testval):
# Copy the array so we don't change the original data.
errordata = copy.copy(self.errordata)
arrayfunc.%(funcname)s(errordata, testval, matherrors=True)
for dataoutitem, expecteditem in zip(errordata, self.expectedep):
# The behavour of assertEqual is modified by addTypeEqualityFunc.
self.assertEqual(dataoutitem, expecteditem)
########################################################
def test_%(funclabel)s_%(errorlabel)s_array_num_array_b1(self):
"""Test %(funclabel)s as *array-num-array* for %(errordata)s - Array code %(typelabel)s.
"""
for testval in [%(test_op_y)s]:
with self.subTest(msg='Failed with parameter', testval = testval):
arrayfunc.%(funcname)s(self.errordata, testval, self.dataout)
for dataoutitem, expecteditem in zip(self.dataout, self.expectedep):
# The behavour of assertEqual is modified by addTypeEqualityFunc.
self.assertEqual(dataoutitem, expecteditem)
########################################################
def test_%(funclabel)s_%(errorlabel)s_array_num_array_b2(self):
"""Test %(funclabel)s as *array-num-array* for %(errordata)s with error check off - Array code %(typelabel)s.
"""
for testval in [%(test_op_y)s]:
with self.subTest(msg='Failed with parameter', testval = testval):
arrayfunc.%(funcname)s(self.errordata, testval, self.dataout, matherrors=True)
for dataoutitem, expecteditem in zip(self.dataout, self.expectedep):
# The behavour of assertEqual is modified by addTypeEqualityFunc.
self.assertEqual(dataoutitem, expecteditem)
########################################################
def test_%(funclabel)s_%(errorlabel)s_num_array_none_c1(self):
"""Test %(funclabel)s as *num-array-none* for %(errordata)s - Array code %(typelabel)s.
"""
for testval in [%(test_op_x)s]:
with self.subTest(msg='Failed with parameter', testval = testval):
# Copy the array so we don't change the original data.
errordata = copy.copy(self.errordata)
arrayfunc.%(funcname)s(testval, errordata)
for dataoutitem, expecteditem in zip(errordata, self.expectedpe):
# The behavour of assertEqual is modified by addTypeEqualityFunc.
self.assertEqual(dataoutitem, expecteditem)
########################################################
def test_%(funclabel)s_%(errorlabel)s_num_array_none_c2(self):
"""Test %(funclabel)s as *num-array-none* for %(errordata)s with error check off - Array code %(typelabel)s.
"""
for testval in [%(test_op_x)s]:
with self.subTest(msg='Failed with parameter', testval = testval):
# Copy the array so we don't change the original data.
errordata = copy.copy(self.errordata)
arrayfunc.%(funcname)s(testval, errordata, matherrors=True)
for dataoutitem, expecteditem in zip(errordata, self.expectedpe):
# The behavour of assertEqual is modified by addTypeEqualityFunc.
self.assertEqual(dataoutitem, expecteditem)
########################################################
def test_%(funclabel)s_%(errorlabel)s_num_array_array_d1(self):
"""Test %(funclabel)s as *num-array-array* for %(errordata)s - Array code %(typelabel)s.
"""
for testval in [%(test_op_x)s]:
with self.subTest(msg='Failed with parameter', testval = testval):
arrayfunc.%(funcname)s(testval, self.errordata, self.dataout)
for dataoutitem, expecteditem in zip(self.dataout, self.expectedpe):
# The behavour of assertEqual is modified by addTypeEqualityFunc.
self.assertEqual(dataoutitem, expecteditem)
########################################################
def test_%(funclabel)s_%(errorlabel)s_num_array_array_d2(self):
"""Test %(funclabel)s as *num-array-array* for %(errordata)s with error check off - Array code %(typelabel)s.
"""
for testval in [%(test_op_x)s]:
with self.subTest(msg='Failed with parameter', testval = testval):
arrayfunc.%(funcname)s(testval, self.errordata, self.dataout, matherrors=True)
for dataoutitem, expecteditem in zip(self.dataout, self.expectedpe):
# The behavour of assertEqual is modified by addTypeEqualityFunc.
self.assertEqual(dataoutitem, expecteditem)
########################################################
def test_%(funclabel)s_%(errorlabel)s_array_array_none_e1(self):
"""Test %(funclabel)s as *array-array-none* for %(errordata)s - Array code %(typelabel)s.
"""
arrayfunc.%(funcname)s(self.dataok1, self.errordata)
for dataoutitem, expecteditem in zip(self.dataok1, self.expectedpe):
# The behavour of assertEqual is modified by addTypeEqualityFunc.
self.assertEqual(dataoutitem, expecteditem)
########################################################
def test_%(funclabel)s_%(errorlabel)s_array_array_none_e2(self):
"""Test %(funclabel)s as *array-array-none* for %(errordata)s with error check off - Array code %(typelabel)s.
"""
arrayfunc.%(funcname)s(self.dataok1, self.errordata, matherrors=True)
for dataoutitem, expecteditem in zip(self.dataok1, self.expectedpe):
# The behavour of assertEqual is modified by addTypeEqualityFunc.
self.assertEqual(dataoutitem, expecteditem)
########################################################
def test_%(funclabel)s_%(errorlabel)s_array_array_array_f1(self):
"""Test %(funclabel)s as *array-array-array* for %(errordata)s - Array code %(typelabel)s.
"""
arrayfunc.%(funcname)s(self.dataok1, self.errordata, self.dataout)
for dataoutitem, expecteditem in zip(self.dataout, self.expectedpe):
# The behavour of assertEqual is modified by addTypeEqualityFunc.
self.assertEqual(dataoutitem, expecteditem)
########################################################
def test_%(funclabel)s_%(errorlabel)s_array_array_array_f2(self):
"""Test %(funclabel)s as *array-array-array* for %(errordata)s with error check off - Array code %(typelabel)s.
"""
arrayfunc.%(funcname)s(self.dataok1, self.errordata, self.dataout, matherrors=True)
for dataoutitem, expecteditem in zip(self.dataout, self.expectedpe):
# The behavour of assertEqual is modified by addTypeEqualityFunc.
self.assertEqual(dataoutitem, expecteditem)
##############################################################################
'''
# ==============================================================================
# The template used to generate the tests forinf, -inf in data arrays
# when exceptions are expected. This is a special version for fmod.
nan_data_fmod_inf_template = '''
##############################################################################
class %(funclabel)s_%(errorlabel)s_noerrors_%(typelabel)s(unittest.TestCase):
"""Test for fmod(x, y) operation using parameter %(errordata)s.
For math.fmod:
if x=nan, the result is always nan
if y=nan, the result is always nan
if x=inf or -inf, the result is always err
if y=inf or -inf, the result is OK
For our purposes here, we treat a "NaN" output as an error even if
"math.fmod" does not.
nan_data_fmod_inf_template
"""
##############################################################################
def FloatassertEqual(self, expecteditem, dataoutitem, msg=None):
"""This function is patched into assertEqual to allow testing for
the floating point special values NaN, Inf, and -Inf.
"""
# NaN cannot be compared using normal means.
if math.isnan(dataoutitem) and math.isnan(expecteditem):
pass
# Anything else can be compared normally.
else:
if not math.isclose(expecteditem, dataoutitem, rel_tol=0.01, abs_tol=0.0):
raise self.failureException('%%0.3f != %%0.3f' %% (expecteditem, dataoutitem))
########################################################
def setUp(self):
"""Initialise.
"""
self.addTypeEqualityFunc(float, self.FloatassertEqual)
# A "1" suffix means the data is meant for the first parameter.
# A "2" suffix means the data is meant for the second parameter.
self.okarray1 = array.array('%(typecode)s', [%(test_op_x)s])
self.okarray2 = array.array('%(typecode)s', [x for x,y in zip(itertools.cycle([%(test_op_y)s]), self.okarray1)])
# This is how long the test arrays should be.
testarraysize = len(self.okarray1)
self.dataout = array.array('%(typecode)s', itertools.repeat(0.0, testarraysize))
self.errorarray = array.array('%(typecode)s', [float('%(errordata)s')] * testarraysize)
self.errorparam = float('%(errordata)s')
# When error data is calculated with error checking off, the result is
# always NaN.
self.nanresult = [math.nan] * testarraysize
########################################################
def test_%(funclabel)s_%(errorlabel)s_array_num_none_a1(self):
"""Test %(funclabel)s as *array-num-none* for error array with error check on - Array code %(typelabel)s.
"""
for testval in [%(test_op_y)s]:
with self.subTest(msg='Failed with parameter', testval = testval):
# Copy the array so we don't change the original data.
okarray1 = copy.copy(self.okarray1)
errorarray = copy.copy(self.errorarray)
# This version is expected to pass.
arrayfunc.%(funcname)s(okarray1, testval)
# This is the actual test.
with self.assertRaises(ArithmeticError):
arrayfunc.%(funcname)s(errorarray, testval)
########################################################
def test_%(funclabel)s_%(errorlabel)s_array_num_none_a2(self):
"""Test %(funclabel)s as *array-num-none* for error array with error check off - Array code %(typelabel)s.
"""
for testval in [%(test_op_y)s]:
with self.subTest(msg='Failed with parameter', testval = testval):
# Copy the array so we don't change the original data.
errorarray = copy.copy(self.errorarray)
# The output goes into the first array.
arrayfunc.%(funcname)s(errorarray, testval, matherrors=True)
for dataoutitem, expecteditem in zip(errorarray, self.nanresult):
# The behavour of assertEqual is modified by addTypeEqualityFunc.
self.assertEqual(dataoutitem, expecteditem)
########################################################
def test_%(funclabel)s_%(errorlabel)s_array_num_array_a3(self):
"""Test %(funclabel)s as *array-num-array* for error array with error check on - Array code %(typelabel)s.
"""
for testval in [%(test_op_y)s]:
with self.subTest(msg='Failed with parameter', testval = testval):
# Copy the array so we don't change the original data.
okarray1 = copy.copy(self.okarray1)
errorarray = copy.copy(self.errorarray)
# This version is expected to pass.
arrayfunc.%(funcname)s(okarray1, testval, self.dataout)
# This is the actual test.
with self.assertRaises(ArithmeticError):
arrayfunc.%(funcname)s(errorarray, testval, self.dataout)
########################################################
def test_%(funclabel)s_%(errorlabel)s_array_num_array_a4(self):
"""Test %(funclabel)s as *array-num-array* for error array with error check off - Array code %(typelabel)s.
"""
for testval in [%(test_op_y)s]:
with self.subTest(msg='Failed with parameter', testval = testval):
arrayfunc.%(funcname)s(self.errorarray, testval, self.dataout, matherrors=True)
for dataoutitem, expecteditem in zip(self.dataout, self.nanresult):
# The behavour of assertEqual is modified by addTypeEqualityFunc.
self.assertEqual(dataoutitem, expecteditem)
########################################################
def test_%(funclabel)s_%(errorlabel)s_array_num_none_a5(self):
"""Test %(funclabel)s as *array-num-none* for error number with error check on - Array code %(typelabel)s.
"""
expected = [%(pyoperator)s(x, self.errorparam) for x in self.okarray1]
# The output goes into the first array.
arrayfunc.%(funcname)s(self.okarray1, self.errorparam)
for dataoutitem, expecteditem in zip(self.okarray1, expected):
# The behavour of assertEqual is modified by addTypeEqualityFunc.
self.assertEqual(dataoutitem, expecteditem)
########################################################
def test_%(funclabel)s_%(errorlabel)s_array_num_none_a6(self):
"""Test %(funclabel)s as *array-num-none* for error number with error check off - Array code %(typelabel)s.
"""
expected = [%(pyoperator)s(x, self.errorparam) for x in self.okarray1]
# The output goes into the first array.
arrayfunc.%(funcname)s(self.okarray1, self.errorparam, matherrors=True)
for dataoutitem, expecteditem in zip(self.okarray1, expected):
# The behavour of assertEqual is modified by addTypeEqualityFunc.
self.assertEqual(dataoutitem, expecteditem)
########################################################
def test_%(funclabel)s_%(errorlabel)s_array_num_array_a7(self):
"""Test %(funclabel)s as *array-num-array* for error number with error check on - Array code %(typelabel)s.
"""
expected = [%(pyoperator)s(x, self.errorparam) for x in self.okarray1]
arrayfunc.%(funcname)s(self.okarray1, self.errorparam, self.dataout)
for dataoutitem, expecteditem in zip(self.dataout, expected):
# The behavour of assertEqual is modified by addTypeEqualityFunc.
self.assertEqual(dataoutitem, expecteditem)
########################################################
def test_%(funclabel)s_%(errorlabel)s_array_num_array_a8(self):
"""Test %(funclabel)s as *array-num-array* for error number with error check off - Array code %(typelabel)s.
"""
expected = [%(pyoperator)s(x, self.errorparam) for x in self.okarray1]
arrayfunc.%(funcname)s(self.okarray1, self.errorparam, self.dataout, matherrors=True)
for dataoutitem, expecteditem in zip(self.dataout, expected):
# The behavour of assertEqual is modified by addTypeEqualityFunc.
self.assertEqual(dataoutitem, expecteditem)
########################################################
def test_%(funclabel)s_%(errorlabel)s_num_array_none_b1(self):
"""Test %(funclabel)s as *num-array-none* for error number with error check on - Array code %(typelabel)s.
"""
for testval in [%(test_op_x)s]:
with self.subTest(msg='Failed with parameter', testval = testval):
# Copy the array so we don't change the original data.
okarray2 = copy.copy(self.okarray2)
# This version is expected to pass.
arrayfunc.%(funcname)s(testval, okarray2)
# This is the actual test.
with self.assertRaises(ArithmeticError):
arrayfunc.%(funcname)s(self.errorparam, okarray2)
########################################################
def test_%(funclabel)s_%(errorlabel)s_num_array_none_b2(self):
"""Test %(funclabel)s as *num-array-none* for error number with error check off - Array code %(typelabel)s.
"""
# The output goes into the first array.
arrayfunc.%(funcname)s(self.errorparam, self.okarray2, matherrors=True)
for dataoutitem, expecteditem in zip(self.okarray2, self.nanresult):
# The behavour of assertEqual is modified by addTypeEqualityFunc.
self.assertEqual(dataoutitem, expecteditem)
########################################################
def test_%(funclabel)s_%(errorlabel)s_num_array_array_b3(self):
"""Test %(funclabel)s as *num-array-array* for error number with error check on - Array code %(typelabel)s.
"""
for testval in [%(test_op_x)s]:
with self.subTest(msg='Failed with parameter', testval = testval):
# This version is expected to pass.
arrayfunc.%(funcname)s(testval, self.okarray2, self.dataout)
# This is the actual test.
with self.assertRaises(ArithmeticError):
arrayfunc.%(funcname)s(self.errorparam, self.okarray2, self.dataout)
########################################################
def test_%(funclabel)s_%(errorlabel)s_num_array_array_b4(self):
"""Test %(funclabel)s as *num-array-array* for error number with error check off - Array code %(typelabel)s.
"""
arrayfunc.%(funcname)s(self.errorparam, self.okarray2, self.dataout, matherrors=True)
for dataoutitem, expecteditem in zip(self.dataout, self.nanresult):
# The behavour of assertEqual is modified by addTypeEqualityFunc.
self.assertEqual(dataoutitem, expecteditem)
########################################################
def test_%(funclabel)s_%(errorlabel)s_num_array_none_b5(self):
"""Test %(funclabel)s as *num-array-none* for error array with error check on - Array code %(typelabel)s.
"""
for testval in [%(test_op_x)s]:
with self.subTest(msg='Failed with parameter', testval = testval):
# Copy the array so we don't change the original data.
errorarray = copy.copy(self.errorarray)
expected = [%(pyoperator)s(testval, x) for x in self.errorarray]
# The output goes into the first array.
arrayfunc.%(funcname)s(testval, errorarray)
for dataoutitem, expecteditem in zip(errorarray, expected):
# The behavour of assertEqual is modified by addTypeEqualityFunc.
self.assertEqual(dataoutitem, expecteditem)
########################################################
def test_%(funclabel)s_%(errorlabel)s_num_array_none_b6(self):
"""Test %(funclabel)s as *num-array-none* for error array with error check off - Array code %(typelabel)s.
"""
for testval in [%(test_op_x)s]:
with self.subTest(msg='Failed with parameter', testval = testval):
# Copy the array so we don't change the original data.
errorarray = copy.copy(self.errorarray)
expected = [%(pyoperator)s(testval, x) for x in self.errorarray]
# The output goes into the first array.
arrayfunc.%(funcname)s(testval, errorarray, matherrors=True)
for dataoutitem, expecteditem in zip(errorarray, expected):
# The behavour of assertEqual is modified by addTypeEqualityFunc.
self.assertEqual(dataoutitem, expecteditem)
########################################################
def test_%(funclabel)s_%(errorlabel)s_num_array_array_b7(self):
"""Test %(funclabel)s as *num-array-array* for error array with error check on - Array code %(typelabel)s.
"""
for testval in [%(test_op_x)s]:
with self.subTest(msg='Failed with parameter', testval = testval):
expected = [%(pyoperator)s(testval, x) for x in self.errorarray]
arrayfunc.%(funcname)s(testval, self.errorarray, self.dataout)
for dataoutitem, expecteditem in zip(self.dataout, expected):
# The behavour of assertEqual is modified by addTypeEqualityFunc.
self.assertEqual(dataoutitem, expecteditem)
########################################################
def test_%(funclabel)s_%(errorlabel)s_num_array_array_b8(self):
"""Test %(funclabel)s as *num-array-array* for error array with error check off - Array code %(typelabel)s.
"""
for testval in [%(test_op_x)s]:
with self.subTest(msg='Failed with parameter', testval = testval):
expected = [%(pyoperator)s(testval, x) for x in self.errorarray]
arrayfunc.%(funcname)s(testval, self.errorarray, self.dataout, matherrors=True)
for dataoutitem, expecteditem in zip(self.dataout, expected):
# The behavour of assertEqual is modified by addTypeEqualityFunc.
self.assertEqual(dataoutitem, expecteditem)
########################################################
def test_%(funclabel)s_%(errorlabel)s_array_array_none_c1(self):
"""Test %(funclabel)s as *array-array-none* for error array with error check on - Array code %(typelabel)s.
"""
# This version is expected to pass.
arrayfunc.%(funcname)s(self.okarray1, self.okarray2)
# This is the actual test.
with self.assertRaises(ArithmeticError):
arrayfunc.%(funcname)s(self.errorarray, self.okarray2)
########################################################
def test_%(funclabel)s_%(errorlabel)s_array_array_none_c2(self):
"""Test %(funclabel)s as *array-array-none* for error array with error check off - Array code %(typelabel)s.
"""
# The output goes into the first array.
arrayfunc.%(funcname)s(self.errorarray, self.okarray2, matherrors=True)
for dataoutitem, expecteditem in zip(self.errorarray, self.nanresult):
# The behavour of assertEqual is modified by addTypeEqualityFunc.
self.assertEqual(dataoutitem, expecteditem)
########################################################
def test_%(funclabel)s_%(errorlabel)s_array_array_array_c3(self):
"""Test %(funclabel)s as *array-array-array* for error array with error check on - Array code %(typelabel)s.
"""
# This version is expected to pass.
arrayfunc.%(funcname)s(self.okarray1, self.okarray2, self.dataout)
# This is the actual test.
with self.assertRaises(ArithmeticError):
arrayfunc.%(funcname)s(self.errorarray, self.okarray2, self.dataout)
########################################################
def test_%(funclabel)s_%(errorlabel)s_array_array_array_c4(self):
"""Test %(funclabel)s as *array-array-array* for error array with error check off - Array code %(typelabel)s.
"""
arrayfunc.%(funcname)s(self.errorarray, self.okarray2, self.dataout, matherrors=True)
for dataoutitem, expecteditem in zip(self.dataout, self.nanresult):
# The behavour of assertEqual is modified by addTypeEqualityFunc.
self.assertEqual(dataoutitem, expecteditem)
########################################################
def test_%(funclabel)s_%(errorlabel)s_array_array_none_c5(self):
"""Test %(funclabel)s as *array-array-none* for error array with error check on - Array code %(typelabel)s.
"""
expected = [%(pyoperator)s(x, y) for x,y in zip(self.okarray1, self.errorarray)]
# The output goes into the first array.
arrayfunc.%(funcname)s(self.okarray1, self.errorarray)
for dataoutitem, expecteditem in zip(self.okarray1, expected):
# The behavour of assertEqual is modified by addTypeEqualityFunc.
self.assertEqual(dataoutitem, expecteditem)
########################################################
def test_%(funclabel)s_%(errorlabel)s_array_array_none_c6(self):
"""Test %(funclabel)s as *array-array-none* for error array with error check off - Array code %(typelabel)s.
"""
expected = [%(pyoperator)s(x, y) for x,y in zip(self.okarray1, self.errorarray)]
# The output goes into the first array.
arrayfunc.%(funcname)s(self.okarray1, self.errorarray, matherrors=True)
for dataoutitem, expecteditem in zip(self.okarray1, expected):
# The behavour of assertEqual is modified by addTypeEqualityFunc.
self.assertEqual(dataoutitem, expecteditem)
########################################################
def test_%(funclabel)s_%(errorlabel)s_array_array_array_c7(self):
"""Test %(funclabel)s as *array-array-array* for error array with error check on - Array code %(typelabel)s.
"""
expected = [%(pyoperator)s(x, y) for x,y in zip(self.okarray1, self.errorarray)]
arrayfunc.%(funcname)s(self.okarray1, self.errorarray, self.dataout)
for dataoutitem, expecteditem in zip(self.dataout, expected):
# The behavour of assertEqual is modified by addTypeEqualityFunc.
self.assertEqual(dataoutitem, expecteditem)
########################################################
def test_%(funclabel)s_%(errorlabel)s_array_array_array_c8(self):
"""Test %(funclabel)s as *array-array-array* for error array with error check off - Array code %(typelabel)s.
"""
expected = [%(pyoperator)s(x, y) for x,y in zip(self.okarray1, self.errorarray)]
arrayfunc.%(funcname)s(self.okarray1, self.errorarray, self.dataout, matherrors=True)
for dataoutitem, expecteditem in zip(self.dataout, expected):
# The behavour of assertEqual is modified by addTypeEqualityFunc.
self.assertEqual(dataoutitem, expecteditem)
##############################################################################
'''
# ==============================================================================
# Used for pow only.
nan_data_powerror_template = '''
##############################################################################
class %(funclabel)s_%(errorlabel)s_errors_%(typelabel)s(unittest.TestCase):
"""Test for pow using parameter %(errordata)s.
nan_data_powerror_template
"""
##############################################################################
def FloatassertEqual(self, expecteditem, dataoutitem, msg=None):
"""This function is patched into assertEqual to allow testing for
the floating point special values NaN, Inf, and -Inf.
"""
# NaN cannot be compared using normal means.
if math.isnan(dataoutitem) and math.isnan(expecteditem):
pass
# Anything else can be compared normally.
else:
if not math.isclose(expecteditem, dataoutitem, rel_tol=0.01, abs_tol=0.0):
raise self.failureException('%%0.3f != %%0.3f' %% (expecteditem, dataoutitem))
########################################################
def PyOp(self, x, y, default):
"""Handle exceptions due to math domain errors when calling the math
library function. If an exception occurs, return the default value
instead.
"""
try:
return %(pyoperator)s(x, y)
except:
return default
########################################################
def setUp(self):
"""Initialise.
"""
self.addTypeEqualityFunc(float, self.FloatassertEqual)
self.dataok1 = array.array('%(typecode)s', [%(test_op_x)s])
self.dataok2 = array.array('%(typecode)s', [x for (x,y) in zip(itertools.cycle([%(test_op_y)s]), self.dataok1)])
arraysize = len(self.dataok1)
self.dataout = array.array('%(typecode)s', itertools.repeat(0.0, arraysize))
self.errordata = array.array('%(typecode)s', [float('%(errordata)s')] * arraysize)
self.expectedep = [self.PyOp(x, y, float('%(test_nan_default)s')) for x,y in zip(self.errordata, self.dataok2)]
self.expectedpe = [self.PyOp(y, x, float('%(test_nan_default)s')) for x,y in zip(self.errordata, self.dataok1)]
########################################################
def test_%(funclabel)s_%(errorlabel)s_array_num_none_a1(self):
"""Test %(funclabel)s as *array-num-none* for %(errordata)s - Array code %(typelabel)s.
"""
for testval in [%(test_op_y)s]:
with self.subTest(msg='Failed with parameter', testval = testval):
# Copy the array so we don't change the original data.
dataok1 = copy.copy(self.dataok1)
errordata = copy.copy(self.errordata)
# This version is expected to pass.
arrayfunc.%(funcname)s(dataok1, testval)
# This is the actual test. When the test value parameter is 0,
# no error is expected. Any other value should raise an error.
if testval != 0.0:
with self.assertRaises(ArithmeticError):
arrayfunc.%(funcname)s(errordata, testval)
else:
arrayfunc.%(funcname)s(errordata, testval)
########################################################
def test_%(funclabel)s_%(errorlabel)s_array_num_none_a2(self):
"""Test %(funclabel)s as *array-num-none* for %(errordata)s with error check off - Array code %(typelabel)s.
"""
for testval in [%(test_op_y)s]:
with self.subTest(msg='Failed with parameter', testval = testval):
# Copy the array so we don't change the original data.
errordata = copy.copy(self.errordata)
expectedep = [self.PyOp(x, testval, float('%(test_nan_default)s')) for x in self.errordata]
arrayfunc.%(funcname)s(errordata, testval, matherrors=True)
for dataoutitem, expecteditem in zip(errordata, expectedep):
# The behavour of assertEqual is modified by addTypeEqualityFunc.
self.assertEqual(dataoutitem, expecteditem)
########################################################
def test_%(funclabel)s_%(errorlabel)s_array_num_array_b1(self):
"""Test %(funclabel)s as *array-num-array* for %(errordata)s - Array code %(typelabel)s.
"""
for testval in [%(test_op_y)s]:
with self.subTest(msg='Failed with parameter', testval = testval):
# Copy the array so we don't change the original data.
dataok1 = copy.copy(self.dataok1)
errordata = copy.copy(self.errordata)
# This version is expected to pass.
arrayfunc.%(funcname)s(dataok1, testval, self.dataout)
# This is the actual test. When the test value parameter is 0,
# no error is expected. Any other value should raise an error.
if testval != 0.0:
with self.assertRaises(ArithmeticError):
arrayfunc.%(funcname)s(errordata, testval, self.dataout)
else:
arrayfunc.%(funcname)s(errordata, testval, self.dataout)
########################################################
def test_%(funclabel)s_%(errorlabel)s_array_num_array_b2(self):
"""Test %(funclabel)s as *array-num-array* for %(errordata)s with error check off - Array code %(typelabel)s.
"""
for testval in [%(test_op_y)s]:
with self.subTest(msg='Failed with parameter', testval = testval):
expectedep = [self.PyOp(x, testval, float('%(test_nan_default)s')) for x in self.errordata]
arrayfunc.%(funcname)s(self.errordata, testval, self.dataout, matherrors=True)
for dataoutitem, expecteditem in zip(self.dataout, expectedep):
# The behavour of assertEqual is modified by addTypeEqualityFunc.
self.assertEqual(dataoutitem, expecteditem)
########################################################
def test_%(funclabel)s_%(errorlabel)s_num_array_none_c1(self):
"""Test %(funclabel)s as *num-array-none* for %(errordata)s - Array code %(typelabel)s.
"""
for testval in [%(test_op_y)s]:
with self.subTest(msg='Failed with parameter', testval = testval):
# Copy the array so we don't change the original data.
dataok1 = copy.copy(self.dataok1)
errordata = copy.copy(self.errordata)
# This version is expected to pass.
arrayfunc.%(funcname)s(testval, dataok1)
# This is the actual test. When testing for errors, the result
# will depend upon whether the test is for nan or inf, and
# what numeric values are involved.
# The template auto-generating this unit test is re-used for
# different test values, so we need a conditional test for this.
if '%(errordata)s' == 'nan' and testval != 1.0:
with self.assertRaises(ArithmeticError):
arrayfunc.%(funcname)s(testval, errordata)
elif '%(errordata)s' == 'inf' and ((testval < -1.0) or (testval > 1.0)):
with self.assertRaises(ArithmeticError):
arrayfunc.%(funcname)s(testval, errordata)
elif '%(errordata)s' == '-inf' and ((testval > -1.0) and (testval < 1.0)):
with self.assertRaises(ArithmeticError):
arrayfunc.%(funcname)s(testval, errordata)
else:
arrayfunc.%(funcname)s(testval, errordata)
########################################################
def test_%(funclabel)s_%(errorlabel)s_num_array_none_c2(self):
"""Test %(funclabel)s as *num-array-none* for %(errordata)s with error check off - Array code %(typelabel)s.
"""
for testval in [%(test_op_x)s]:
with self.subTest(msg='Failed with parameter', testval = testval):
# Copy the array so we don't change the original data.
errordata = copy.copy(self.errordata)
expectedpe = [self.PyOp(testval, x, float('%(test_nan_default)s')) for x in self.errordata]
arrayfunc.%(funcname)s(testval, errordata, matherrors=True)
for dataoutitem, expecteditem in zip(errordata, expectedpe):
# The behavour of assertEqual is modified by addTypeEqualityFunc.
self.assertEqual(dataoutitem, expecteditem)
########################################################
def test_%(funclabel)s_%(errorlabel)s_num_array_array_d1(self):
"""Test %(funclabel)s as *num-array-array* for %(errordata)s - Array code %(typelabel)s.
"""
for testval in [%(test_op_y)s]:
with self.subTest(msg='Failed with parameter', testval = testval):
# This version is expected to pass.
arrayfunc.%(funcname)s(testval, self.dataok1, self.dataout)
# This is the actual test. When testing for errors, the result
# will depend upon whether the test is for nan or inf, and
# what numeric values are involved.
# The template auto-generating this unit test is re-used for
# different test values, so we need a conditional test for this.
if '%(errordata)s' == 'nan' and testval != 1.0:
with self.assertRaises(ArithmeticError):
arrayfunc.%(funcname)s(testval, self.errordata, self.dataout)
elif '%(errordata)s' == 'inf' and ((testval < -1.0) or (testval > 1.0)):
with self.assertRaises(ArithmeticError):
arrayfunc.%(funcname)s(testval, self.errordata, self.dataout)
elif '%(errordata)s' == '-inf' and ((testval > -1.0) and (testval < 1.0)):
with self.assertRaises(ArithmeticError):
arrayfunc.%(funcname)s(testval, self.errordata, self.dataout)
else:
arrayfunc.%(funcname)s(testval, self.errordata, self.dataout)
########################################################
def test_%(funclabel)s_%(errorlabel)s_num_array_array_d2(self):
"""Test %(funclabel)s as *num-array-array* for %(errordata)s with error check off - Array code %(typelabel)s.
"""
for testval in [%(test_op_x)s]:
with self.subTest(msg='Failed with parameter', testval = testval):
expectedpe = [self.PyOp(testval, x, float('%(test_nan_default)s')) for x in self.errordata]
arrayfunc.%(funcname)s(testval, self.errordata, self.dataout, matherrors=True)
for dataoutitem, expecteditem in zip(self.dataout, expectedpe):
# The behavour of assertEqual is modified by addTypeEqualityFunc.
self.assertEqual(dataoutitem, expecteditem)
########################################################
def test_%(funclabel)s_%(errorlabel)s_array_array_none_e1(self):
"""Test %(funclabel)s as *array-array-none* for %(errordata)s - Array code %(typelabel)s.
"""
# Copy the array so we don't change the original data.
dataok1 = copy.copy(self.dataok1)
dataok2 = copy.copy(self.dataok2)
# This version is expected to pass.
arrayfunc.%(funcname)s(dataok1, dataok2)
# Copy the array so we don't change the original data.
dataok1 = copy.copy(self.dataok1)
# This is the actual test.
with self.assertRaises(ArithmeticError):
arrayfunc.%(funcname)s(dataok1, self.errordata)
########################################################
def test_%(funclabel)s_%(errorlabel)s_array_array_none_e2(self):
"""Test %(funclabel)s as *array-array-none* for %(errordata)s with error check off - Array code %(typelabel)s.
"""
arrayfunc.%(funcname)s(self.dataok1, self.errordata, matherrors=True)
for dataoutitem, expecteditem in zip(self.dataok1, self.expectedpe):
# The behavour of assertEqual is modified by addTypeEqualityFunc.
self.assertEqual(dataoutitem, expecteditem)
########################################################
def test_%(funclabel)s_%(errorlabel)s_array_array_array_f1(self):
"""Test %(funclabel)s as *array-array-array* for %(errordata)s - Array code %(typelabel)s.
"""
# This version is expected to pass.
arrayfunc.%(funcname)s(self.dataok1, self.dataok2, self.dataout)
# This is the actual test.
with self.assertRaises(ArithmeticError):
arrayfunc.%(funcname)s(self.dataok1, self.errordata, self.dataout)
########################################################
def test_%(funclabel)s_%(errorlabel)s_array_array_array_f2(self):
"""Test %(funclabel)s as *array-array-array* for %(errordata)s with error check off - Array code %(typelabel)s.
"""
arrayfunc.%(funcname)s(self.dataok1, self.errordata, self.dataout, matherrors=True)
for dataoutitem, expecteditem in zip(self.dataout, self.expectedpe):
# The behavour of assertEqual is modified by addTypeEqualityFunc.
self.assertEqual(dataoutitem, expecteditem)
'''
##############################################################################
# ==============================================================================
# The template used to generate the tests for nan, inf, -inf in data arrays
# specifically for copysign.
nan_data_copysign_template = '''
##############################################################################
class %(funclabel)s_%(errorlabel)s_errors_%(typelabel)s(unittest.TestCase):
"""Test for copysign function operation using parameter %(errordata)s.
nan_data_copysign_template
"""
##############################################################################
def FloatassertEqual(self, expecteditem, dataoutitem, msg=None):
"""This function is patched into assertEqual to allow testing for
the floating point special values NaN, Inf, and -Inf.
"""
# NaN cannot be compared using normal means.
if math.isnan(dataoutitem) and math.isnan(expecteditem):
pass
# Anything else can be compared normally.
else:
if not math.isclose(expecteditem, dataoutitem, rel_tol=0.01, abs_tol=0.0):
raise self.failureException('%%0.3f != %%0.3f' %% (expecteditem, dataoutitem))
########################################################
def setUp(self):
"""Initialise.
"""
self.addTypeEqualityFunc(float, self.FloatassertEqual)
self.okarray1 = array.array('%(typecode)s', [%(test_op_y)s])
# This is the same data, but with signs reversed.
self.okarray2 = array.array('%(typecode)s', [-x for x in [%(test_op_y)s]])
arraysize = len(self.okarray1)
self.dataout = array.array('%(typecode)s', itertools.repeat(0.0, arraysize))
self.errordata = array.array('%(typecode)s', [float('%(errordata)s')] * arraysize)
########################################################
def test_%(funclabel)s_%(errorlabel)s_array_num_none_a1(self):
"""Test %(funclabel)s as *array-num-none* for %(errordata)s - Array code %(typelabel)s.
"""
for testval in self.okarray2:
with self.subTest(msg='Failed with parameter', testval = testval):
# Copy the array so we don't change the original data.
okarray1 = copy.copy(self.okarray1)
errordata = copy.copy(self.errordata)
# This version is expected to pass.
arrayfunc.%(funcname)s(okarray1, testval)
# This is the actual test.
with self.assertRaises(ArithmeticError):
arrayfunc.%(funcname)s(errordata, testval)
########################################################
def test_%(funclabel)s_%(errorlabel)s_array_num_none_a2(self):
"""Test %(funclabel)s as *array-num-none* for %(errordata)s with error check off - Array code %(typelabel)s.
"""
for testval in self.okarray2:
with self.subTest(msg='Failed with parameter', testval = testval):
# Copy the array so we don't change the original data.
errordata = copy.copy(self.errordata)
expected = [math.%(funcname)s(x, testval) for x in errordata]
arrayfunc.%(funcname)s(errordata, testval, matherrors=True)
for dataoutitem, expecteditem in zip(errordata, expected):
# The behavour of assertEqual is modified by addTypeEqualityFunc.
self.assertEqual(dataoutitem, expecteditem)
########################################################
def test_%(funclabel)s_%(errorlabel)s_array_num_array_b1(self):
"""Test %(funclabel)s as *array-num-array* for %(errordata)s - Array code %(typelabel)s.
"""
for testval in self.okarray2:
with self.subTest(msg='Failed with parameter', testval = testval):
# Copy the array so we don't change the original data.
okarray1 = copy.copy(self.okarray1)
errordata = copy.copy(self.errordata)
# This version is expected to pass.
arrayfunc.%(funcname)s(okarray1, testval, self.dataout)
# This is the actual test.
with self.assertRaises(ArithmeticError):
arrayfunc.%(funcname)s(errordata, testval, self.dataout)
########################################################
def test_%(funclabel)s_%(errorlabel)s_array_num_array_b2(self):
"""Test %(funclabel)s as *array-num-array* for %(errordata)s with error check off - Array code %(typelabel)s.
"""
for testval in self.okarray2:
with self.subTest(msg='Failed with parameter', testval = testval):
expected = [math.%(funcname)s(x, testval) for x in self.errordata]
arrayfunc.%(funcname)s(self.errordata, testval, self.dataout, matherrors=True)
for dataoutitem, expecteditem in zip(self.dataout, expected):
# The behavour of assertEqual is modified by addTypeEqualityFunc.
self.assertEqual(dataoutitem, expecteditem)
########################################################
def test_%(funclabel)s_%(errorlabel)s_num_array_none_c1(self):
"""Test %(funclabel)s as *num-array-none* for %(errordata)s - Array code %(typelabel)s.
"""
for testval in self.okarray2:
with self.subTest(msg='Failed with parameter', testval = testval):
# Copy the array so we don't change the original data.
okarray1 = copy.copy(self.okarray1)
errordata = copy.copy(self.errordata)
expected = [math.%(funcname)s(testval, x) for x in errordata]
arrayfunc.%(funcname)s(testval, errordata)
for dataoutitem, expecteditem in zip(errordata, expected):
# The behavour of assertEqual is modified by addTypeEqualityFunc.
self.assertEqual(dataoutitem, expecteditem)
########################################################
def test_%(funclabel)s_%(errorlabel)s_num_array_none_c2(self):
"""Test %(funclabel)s as *num-array-none* for %(errordata)s with error check off - Array code %(typelabel)s.
"""
for testval in self.okarray2:
with self.subTest(msg='Failed with parameter', testval = testval):
# Copy the array so we don't change the original data.
errordata = copy.copy(self.errordata)
expected = [math.%(funcname)s(testval, x) for x in errordata]
arrayfunc.%(funcname)s(testval, errordata, matherrors=True)
for dataoutitem, expecteditem in zip(errordata, expected):
# The behavour of assertEqual is modified by addTypeEqualityFunc.
self.assertEqual(dataoutitem, expecteditem)
########################################################
def test_%(funclabel)s_%(errorlabel)s_num_array_array_d1(self):
"""Test %(funclabel)s as *num-array-array* for %(errordata)s - Array code %(typelabel)s.
"""
for testval in self.okarray2:
with self.subTest(msg='Failed with parameter', testval = testval):
expected = [math.%(funcname)s(testval, x) for x in self.errordata]
arrayfunc.%(funcname)s(testval, self.errordata, self.dataout)
for dataoutitem, expecteditem in zip(self.dataout, expected):
# The behavour of assertEqual is modified by addTypeEqualityFunc.
self.assertEqual(dataoutitem, expecteditem)
########################################################
def test_%(funclabel)s_%(errorlabel)s_num_array_array_d2(self):
"""Test %(funclabel)s as *num-array-array* for %(errordata)s with error check off - Array code %(typelabel)s.
"""
for testval in self.okarray2:
with self.subTest(msg='Failed with parameter', testval = testval):
expected = [math.%(funcname)s(testval, x) for x in self.errordata]
arrayfunc.%(funcname)s(testval, self.errordata, self.dataout, matherrors=True)
for dataoutitem, expecteditem in zip(self.dataout, expected):
# The behavour of assertEqual is modified by addTypeEqualityFunc.
self.assertEqual(dataoutitem, expecteditem)
########################################################
def test_%(funclabel)s_%(errorlabel)s_array_array_none_e1(self):
"""Test %(funclabel)s as *array-array-none* for %(errordata)s - Array code %(typelabel)s.
"""
expected = [math.%(funcname)s(x, y) for x,y in zip(self.okarray1, self.errordata)]
arrayfunc.%(funcname)s(self.okarray1, self.errordata)
for dataoutitem, expecteditem in zip(self.okarray1, expected):
# The behavour of assertEqual is modified by addTypeEqualityFunc.
self.assertEqual(dataoutitem, expecteditem)
########################################################
def test_%(funclabel)s_%(errorlabel)s_array_array_none_e2(self):
"""Test %(funclabel)s as *array-array-none* for %(errordata)s with error check off - Array code %(typelabel)s.
"""
expected = [math.%(funcname)s(x, y) for x,y in zip(self.okarray1, self.errordata)]
arrayfunc.%(funcname)s(self.okarray1, self.errordata, matherrors=True)
for dataoutitem, expecteditem in zip(self.okarray1, expected):
# The behavour of assertEqual is modified by addTypeEqualityFunc.
self.assertEqual(dataoutitem, expecteditem)
########################################################
def test_%(funclabel)s_%(errorlabel)s_array_array_array_f1(self):
"""Test %(funclabel)s as *array-array-array* for %(errordata)s - Array code %(typelabel)s.
"""
expected = [math.%(funcname)s(x, y) for x,y in zip(self.okarray1, self.errordata)]
arrayfunc.%(funcname)s(self.okarray1, self.errordata, self.dataout)
for dataoutitem, expecteditem in zip(self.dataout, expected):
# The behavour of assertEqual is modified by addTypeEqualityFunc.
self.assertEqual(dataoutitem, expecteditem)
########################################################
def test_%(funclabel)s_%(errorlabel)s_array_array_array_f2(self):
"""Test %(funclabel)s as *array-array-array* for %(errordata)s with error check off - Array code %(typelabel)s.
"""
expected = [math.%(funcname)s(x, y) for x,y in zip(self.okarray1, self.errordata)]
arrayfunc.%(funcname)s(self.okarray1, self.errordata, self.dataout, matherrors=True)
for dataoutitem, expecteditem in zip(self.dataout, expected):
# The behavour of assertEqual is modified by addTypeEqualityFunc.
self.assertEqual(dataoutitem, expecteditem)
##############################################################################
'''
# ==============================================================================
# ==============================================================================
# These are all the test code templates.
test_templates = {'test_template' : test_template,
'nan_data_error_template' : nan_data_error_template,
'nan_data_noerror_template' : nan_data_noerror_template,
'nan_data_powerror_template' : nan_data_powerror_template,
'nan_data_fmod_inf_template' : nan_data_fmod_inf_template,
'nan_data_copysign_template' : nan_data_copysign_template,
}
# ==============================================================================
# Read in the op codes.
opdata = codegen_common.ReadINI('affuncdata.ini')
# Filter out the desired math functions.
funclist = [(x,dict(y)) for x,y in opdata.items() if y.get('test_op_templ') == 'test_template']
# ==============================================================================
# This defines the module name.
modulename = 'arrayfunc'
# Import the array module for testing.
arrayimport = 'import array'
for funcname, func in funclist:
filenamebase = 'test_' + funcname
filename = filenamebase + '.py'
headerdate = codegen_common.FormatHeaderData(filenamebase, '09-Dec-2017', funcname)
# Add additional header data.
headerdate['modulename'] = modulename
headerdate['arrayimport'] = arrayimport
# One function (one output file).
with open(filename, 'w') as f:
# The copyright header.
f.write(codegen_common.HeaderTemplate % headerdate)
# Check each array type.
for functype in codegen_common.floatarrays:
testtemplate = test_templates[func['test_op_templ']]
# Basic tests.
funcdata = {'funclabel' : funcname, 'funcname' : funcname, 'pyoperator' : func['pyoperator'],
'typelabel' : functype, 'typecode' : functype, 'test_op_x' : func['test_op_x'],
'test_op_y' : func['test_op_y']}
f.write(testtemplate % funcdata)
# Test for invalid parameters. One template should work for all
# functions of this style.
f.write(param_invalid_template % funcdata)
# NaN, inf, -inf tests.
funcdata = {'funclabel' : funcname, 'funcname' : funcname, 'pyoperator' : func['pyoperator'],
'typelabel' : functype, 'typecode' : functype, 'test_op_x' : func['test_op_x'],
'test_op_y' : func['test_op_y'],
'test_nan_default' : func['test_nan_default']
}
# NaN
testtemplate = test_templates[func['test_nan_data_template']]
funcdata['errorlabel'] = 'NaN'
funcdata['errordata'] = 'nan'
f.write(testtemplate % funcdata)
# inf
testtemplate = test_templates[func['test_inf_data_template']]
funcdata['errorlabel'] = 'inf'
funcdata['errordata'] = 'inf'
f.write(testtemplate % funcdata)
# -inf
testtemplate = test_templates[func['test_ninf_data_template']]
funcdata['errorlabel'] = 'ninf'
funcdata['errordata'] = '-inf'
f.write(testtemplate % funcdata)
f.write(codegen_common.testendtemplate % {'funcname' : funcname, 'testprefix' : 'af'})
# ==============================================================================
| [
"[email protected]"
] | |
9407a3410c3adf54c911ab96278515594e083f7c | 8cd15fba24b6dfa431f3764932101969f5fb524f | /JAMediaVideo/gtk2/Globales.py | 814c31044263af9b38ece76b1e5a3998450b5472 | [] | no_license | srevinsaju/JAMediaSuite | c872b4781657bf1bcf63908f71abeca799b8c666 | 1813d1205cf31f89be3c4512eb495baed427494f | refs/heads/master | 2020-12-04T12:14:53.794749 | 2019-01-05T12:52:13 | 2019-01-05T12:52:13 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,966 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Globals.py por:
# Flavio Danesse <[email protected]>
# Uruguay
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
def get_ip():
"""
Devuelve ip rango de difusión en la red.
"""
import socket
try:
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect(("google.com", 80))
ret = s.getsockname()[0]
s.close()
return ret
except:
return ""
def get_color(color):
"""
Devuelve Colores predefinidos.
"""
from gtk import gdk
colors = {
"GRIS": gdk.Color(60156, 60156, 60156),
"AMARILLO": gdk.Color(65000, 65000, 40275),
"NARANJA": gdk.Color(65000, 26000, 0),
"BLANCO": gdk.Color(65535, 65535, 65535),
"NEGRO": gdk.Color(0, 0, 0),
"ROJO": gdk.Color(65000, 0, 0),
"VERDE": gdk.Color(0, 65000, 0),
"AZUL": gdk.Color(0, 0, 65000),
}
return colors.get(color, None)
def get_colors(key):
from gtk import gdk
_dict = {
"window": "#ffffff",
"barradeprogreso": "#778899",
"toolbars": "#f0e6aa",
"drawingplayer": "#000000",
}
return gdk.color_parse(_dict.get(key, "#ffffff"))
def describe_archivo(archivo):
"""
Devuelve el tipo de un archivo (imagen, video, texto).
-z, --uncompress para ver dentro de los zip.
"""
import commands
datos = commands.getoutput('file -ik %s%s%s' % ("\"", archivo, "\""))
retorno = ""
for dat in datos.split(":")[1:]:
retorno += " %s" % (dat)
return retorno
def make_base_directory():
"""
Crea toda la estructura de Directorios de JAMedia.
"""
import os
import commands
if not os.path.exists(os.path.join(
os.environ["HOME"], "JAMediaDatos")):
os.mkdir(os.path.join(os.environ["HOME"], "JAMediaDatos"))
os.chmod(os.path.join(os.environ["HOME"], "JAMediaDatos"), 0755)
# unificar directorios de JAMedia, JAMediaVideo y JAMediaImagenes
directorio_viejo = os.path.join(os.environ["HOME"], ".JAMediaDatos")
directorio_nuevo = os.path.join(os.environ["HOME"], "JAMediaDatos")
if os.path.exists(directorio_viejo):
for elemento in os.listdir(directorio_viejo):
commands.getoutput('mv %s %s' % (os.path.join(directorio_viejo,
elemento), directorio_nuevo))
commands.getoutput('rm -r %s' % (directorio_viejo))
# Directorios JAMedia
DIRECTORIO_MIS_ARCHIVOS = os.path.join(
os.environ["HOME"], "JAMediaDatos", "MisArchivos")
DIRECTORIO_DATOS = os.path.join(os.environ["HOME"],
"JAMediaDatos", "Datos")
if not os.path.exists(DIRECTORIO_MIS_ARCHIVOS):
os.mkdir(DIRECTORIO_MIS_ARCHIVOS)
os.chmod(DIRECTORIO_MIS_ARCHIVOS, 0755)
if not os.path.exists(DIRECTORIO_DATOS):
os.mkdir(DIRECTORIO_DATOS)
os.chmod(DIRECTORIO_DATOS, 0755)
# Directorio JAMediaTube
DIRECTORIO_YOUTUBE = os.path.join(os.environ["HOME"],
"JAMediaDatos", "YoutubeVideos")
if not os.path.exists(DIRECTORIO_YOUTUBE):
os.mkdir(DIRECTORIO_YOUTUBE)
os.chmod(DIRECTORIO_YOUTUBE, 0755)
# Directorios JAMediaVideo
AUDIO_JAMEDIA_VIDEO = os.path.join(os.environ["HOME"],
"JAMediaDatos", "Audio")
if not os.path.exists(AUDIO_JAMEDIA_VIDEO):
os.mkdir(AUDIO_JAMEDIA_VIDEO)
os.chmod(AUDIO_JAMEDIA_VIDEO, 0755)
VIDEO_JAMEDIA_VIDEO = os.path.join(os.environ["HOME"],
"JAMediaDatos", "Videos")
if not os.path.exists(VIDEO_JAMEDIA_VIDEO):
os.mkdir(VIDEO_JAMEDIA_VIDEO)
os.chmod(VIDEO_JAMEDIA_VIDEO, 0755)
IMAGENES_JAMEDIA_VIDEO = os.path.join(os.environ["HOME"],
"JAMediaDatos", "Fotos")
if not os.path.exists(IMAGENES_JAMEDIA_VIDEO):
os.mkdir(IMAGENES_JAMEDIA_VIDEO)
os.chmod(IMAGENES_JAMEDIA_VIDEO, 0755)
def get_data_directory():
"""
Devuelve el Directorio de Datos de JAMedia y JAMediaTube.
"""
import os
DIRECTORIO_DATOS = os.path.join(os.environ["HOME"],
"JAMediaDatos", "Datos")
if not os.path.exists(DIRECTORIO_DATOS):
make_base_directory()
return DIRECTORIO_DATOS
def get_tube_directory():
"""
Devuelve el Directorio de Videos de JAMediaTube.
"""
import os
DIRECTORIO_YOUTUBE = os.path.join(os.environ["HOME"],
"JAMediaDatos", "YoutubeVideos")
if not os.path.exists(DIRECTORIO_YOUTUBE):
make_base_directory()
return DIRECTORIO_YOUTUBE
def get_audio_directory():
"""
Devuelve el Directorio de Audio de JAMedia y JAMediaTube.
"""
import os
AUDIO_JAMEDIA_VIDEO = os.path.join(os.environ["HOME"],
"JAMediaDatos", "Audio")
if not os.path.exists(AUDIO_JAMEDIA_VIDEO):
make_base_directory()
return AUDIO_JAMEDIA_VIDEO
def get_imagenes_directory():
"""
Devuelve el Directorio de Imagenes de JAMediaVideo y JAMediaImagenes.
"""
import os
IMAGENES_JAMEDIA_VIDEO = os.path.join(os.environ["HOME"],
"JAMediaDatos", "Fotos")
if not os.path.exists(IMAGENES_JAMEDIA_VIDEO):
make_base_directory()
return IMAGENES_JAMEDIA_VIDEO
def get_video_directory():
"""
Devuelve el Directorio de Video de JAMediaVideo.
"""
import os
VIDEO_JAMEDIA_VIDEO = os.path.join(os.environ["HOME"],
"JAMediaDatos", "Videos")
if not os.path.exists(VIDEO_JAMEDIA_VIDEO):
make_base_directory()
return VIDEO_JAMEDIA_VIDEO
'''
def get_my_files_directory():
"""
Devuelve el Directorio de Archivos del usuario en JAMedia.
"""
import os
DIRECTORIO_MIS_ARCHIVOS = os.path.join(os.environ["HOME"],
"JAMediaDatos", "MisArchivos")
if not os.path.exists(DIRECTORIO_MIS_ARCHIVOS):
make_base_directory()
return DIRECTORIO_MIS_ARCHIVOS
'''
def get_separador(draw=False, ancho=0, expand=False):
"""
Devuelve un separador generico.
"""
import gtk
separador = gtk.SeparatorToolItem()
separador.props.draw = draw
separador.set_size_request(ancho, -1)
separador.set_expand(expand)
return separador
'''
def get_togle_boton(archivo, flip=False,
color=get_color("GRIS"), pixels=24):
# Gdk.Color(65000, 65000, 65000)
"""
Devuelve un toggletoolbutton generico.
"""
import gtk
boton = gtk.ToggleToolButton()
imagen = gtk.Image()
pixbuf = gtk.gdk.pixbuf_new_from_file_at_size(
archivo, pixels, pixels)
if flip:
pixbuf = pixbuf.flip(True)
imagen.set_from_pixbuf(pixbuf)
boton.set_icon_widget(imagen)
imagen.show()
boton.show()
return boton
'''
def get_boton(archivo, flip=False, rotacion=None,
pixels=24, tooltip_text=None):
"""
Devuelve un toolbutton generico.
"""
import gtk
boton = gtk.ToolButton()
imagen = gtk.Image()
pixbuf = gtk.gdk.pixbuf_new_from_file_at_size(archivo, pixels, pixels)
if flip:
pixbuf = pixbuf.flip(True)
if rotacion:
pixbuf = pixbuf.rotate_simple(rotacion)
imagen.set_from_pixbuf(pixbuf)
boton.set_icon_widget(imagen)
imagen.show()
boton.show()
if tooltip_text:
boton.set_tooltip_text(tooltip_text)
boton.TOOLTIP = tooltip_text
return boton
| [
"[email protected]"
] | |
0d178fa066c1f4c5d384bfd333819d9ac8351337 | fd5edffed3c69a4d749880e18189c391a0a92562 | /blog/migrations/0002_auto_20181026_1956.py | b69729dbed11c9f0b6dd0221836d82990b3583f9 | [] | no_license | bgarcial/hostayni_platform | 4e9768bc1a13f006167d16b6d33bce88a029c524 | 2cf136b24b27db1a907ccc1274d32c1523abe1a2 | refs/heads/master | 2021-10-14T07:42:30.095351 | 2018-11-14T16:11:54 | 2018-11-14T16:11:54 | 103,794,415 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 672 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.10.7 on 2018-10-26 19:56
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('blog', '0001_initial'),
]
operations = [
migrations.RemoveField(
model_name='article',
name='category',
),
migrations.AlterField(
model_name='article',
name='draft',
field=models.BooleanField(default=False, help_text='Si seleccionas esta opción tu artículo no será publicado por el momento', verbose_name='Guardar publicación'),
),
]
| [
"[email protected]"
] | |
4ec65bf797fd519390932f21927af6966f94336b | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/168/usersdata/276/70781/submittedfiles/exercicio24.py | 65d4c1fabd7be393ddb9b1c8b13fb77cc8cf2feb | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 231 | py | # -*- coding: utf-8 -*-
import math
x = int (input('Digite o valor de x: '))
y = int (input('Digite o valor de y: '))
i = 1
mdc = 0
while (i<=y):
if (x%i==0) and (y%i==0):
mdc = i
print (i)
i = i + 1
| [
"[email protected]"
] | |
39defe150001c2805ae5c7822c51642555a4b3dc | 2bd8fbe6e2ee2511d00479440aa589249234c2d8 | /01-Supervised/11-16/day17/day17-01-integrate-2-RandomForest-2-parallelized.py | 0698a4110a7311f17677f8802c4b7f25c36c8f54 | [] | no_license | LeenonGo/sklearn-learn | 71d21f9b26cfb5cc6d65a22883127db873a31091 | 460d6e75e82943c802f7c025a03c821d02b5d232 | refs/heads/master | 2023-07-13T18:42:17.510938 | 2021-08-18T11:34:06 | 2021-08-18T11:34:06 | 371,628,997 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,362 | py | # -*- coding: utf-8 -*-
# @Author : Lee
# @Time : 2021/7/20 15:09
# @Function: 并行化: https://www.scikitlearn.com.cn/0.21.3/12/#11124
# n_jobs = k ,则计算被划分为 k 个作业,并运行在机器的 k 个核上
# 如果设置 n_jobs = -1 ,则使用机器的所有核。
#
from time import time
import matplotlib.pyplot as plt
from sklearn.datasets import fetch_olivetti_faces
from sklearn.ensemble import ExtraTreesClassifier
"""
这个例子展示了在图像分类任务(faces)中使用树的森林来评估基于杂质的像素重要性。像素越热,越重要。
"""
n_jobs = 1 # 调整
data = fetch_olivetti_faces()
X, y = data.data, data.target
mask = y < 5 # Limit to 5 classes
X = X[mask]
y = y[mask]
# Build a forest and compute the pixel importances
print("Fitting ExtraTreesClassifier on faces data with %d cores..." % n_jobs)
t0 = time()
forest = ExtraTreesClassifier(n_estimators=1000,
max_features=128,
n_jobs=n_jobs,
random_state=0)
forest.fit(X, y)
print("done in %0.3fs" % (time() - t0))
importances = forest.feature_importances_
importances = importances.reshape(data.images[0].shape)
# Plot pixel importances
plt.matshow(importances, cmap=plt.cm.hot)
plt.title("Pixel importances with forests of trees")
plt.show()
| [
"[email protected]"
] | |
ccd07e782ba302eaba43b3b517b58b8b67f736ae | 62758b6067133b1a4c75da979197d21a5691c34e | /ichnaea/cache.py | 8cc5409f92eecfc5ca30f27177bcc16e2e11344f | [
"Apache-2.0"
] | permissive | mate1983/ichnaea | 903450705f9a83fd74aeb16e5b6fd9644de04065 | ac3ed0640ee8cc7f142ba21cb6976dbf2bd488cb | refs/heads/master | 2020-12-03T10:44:19.124756 | 2016-03-11T11:17:57 | 2016-03-11T11:59:34 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,454 | py | """
Functionality related to using Redis as a cache and a queue.
"""
from contextlib import contextmanager
import redis
from redis.exceptions import RedisError
from six.moves.urllib.parse import urlparse
def configure_redis(cache_url, _client=None):
"""
Configure and return a :class:`~ichnaea.cache.RedisClient` instance.
:param _client: Test-only hook to provide a pre-configured client.
"""
if cache_url is None or _client is not None:
return _client
url = urlparse(cache_url)
netloc = url.netloc.split(':')
host = netloc[0]
if len(netloc) > 1:
port = int(netloc[1])
else: # pragma: no cover
port = 6379
if len(url.path) > 1:
db = int(url.path[1:])
else: # pragma: no cover
db = 0
pool = redis.ConnectionPool(
max_connections=20,
host=host,
port=port,
db=db,
socket_timeout=30.0,
socket_connect_timeout=60.0,
socket_keepalive=True,
)
return RedisClient(connection_pool=pool)
@contextmanager
def redis_pipeline(redis_client, execute=True):
"""
Return a Redis pipeline usable as a context manager.
:param execute: Should the pipeline be executed or aborted at the end?
:type execute: bool
"""
with redis_client.pipeline() as pipe:
yield pipe
if execute:
pipe.execute()
class RedisClient(redis.StrictRedis):
"""A strict pingable RedisClient."""
# The last part of these keys is a counter than can be incremented
# whenever the contents/structure of the cache changes. This allows
# for easy `cache-busting'.
cache_keys = {
'downloads': b'cache:downloads:3',
'fallback_blue': b'cache:fallback:blue:',
'fallback_cell': b'cache:fallback:cell:',
'fallback_wifi': b'cache:fallback:wifi:',
'leaders': b'cache:leaders:2',
'leaders_weekly': b'cache:leaders_weekly:2',
'stats': b'cache:stats:3',
'stats_regions': b'cache:stats_regions:4',
'stats_blue_json': b'cache:stats_blue_json:2',
'stats_cell_json': b'cache:stats_cell_json:2',
'stats_wifi_json': b'cache:stats_wifi_json:2',
}
def ping(self):
"""
Ping the Redis server. On success return `True`, otherwise `False`.
"""
try:
self.execute_command('PING')
except RedisError:
return False
return True
| [
"[email protected]"
] | |
79a6bd1bd4c4a106b21e7d931958f60298534be3 | 0ba9f66cd4db73e49a0beb644a893a2ef7040486 | /objdet/modelloader/ssd.py | e5a159c1de99c234af427f4f2eb54897f32fd2af | [] | no_license | NMADALI97/objdet | 7009d8d2d05190b9aa108575c5eec8441883c524 | f67834b252e0bf1938b794af33a226241fa6899a | refs/heads/master | 2021-09-25T05:21:14.578893 | 2018-10-18T14:37:27 | 2018-10-18T14:37:27 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 15,838 | py | # -*- coding: utf-8 -*-
import torch
from torch import nn
import torch.nn.functional as F
import itertools
import math
from . import utils
class StrideConv(nn.Module):
"""
StrideConv:H,W根据stride进行下采样,H*W->(H/stride)*(W/stride)
"""
def __init__(self, in_channels, out_channels, kernel_size, stride=1, dilation=1, groups=1, bias=True):
"""
:param in_channels:
:param out_channels:
:param kernel_size:
:param stride:
:param dilation:
:param groups:
:param bias:
"""
super(StrideConv, self).__init__()
padding = (kernel_size - 1) // 2
self.conv = nn.Conv2d(in_channels, out_channels, kernel_size, stride=stride, padding=padding, dilation=dilation,
groups=groups, bias=bias)
def forward(self, x):
return self.conv(x)
class StridePool(nn.Module):
"""
StridePool:H,W根据stride进行下采样,H*W->(H/stride)*(W/stride)
"""
def __init__(self, kernel_size, stride=None, ceil_mode=False):
super(StridePool, self).__init__()
padding = (kernel_size - 1) // 2
self.pool = nn.MaxPool2d(kernel_size=kernel_size, stride=stride, padding=padding, ceil_mode=ceil_mode)
def forward(self, x):
return self.pool(x)
class L2Norm(nn.Module):
def __init__(self, in_features, scale):
super(L2Norm, self).__init__()
self.weight = nn.Parameter(torch.Tensor(in_features))
self.reset_parameters(scale)
def reset_parameters(self, scale):
nn.init.constant(self.weight, scale)
def forward(self, x):
x = F.normalize(x, dim=1)
scale = self.weight[None, :, None, None]
return scale * x
class SSDBoxCoder:
def __init__(self, ssd_model):
"""
:type ssd_model: SSD300
"""
self.steps = ssd_model.steps
self.fm_sizes = ssd_model.fm_sizes
self.fm_num = len(self.fm_sizes)
self.aspect_ratios = ssd_model.aspect_ratios
self.box_sizes = ssd_model.box_sizes
self.variances = (0.1, 0.2)
self.default_boxes = self._get_default_boxes()
def _get_default_boxes(self):
"""
:return: boxes: (#boxes, 4), 4 is for (cx, cy, h, w) box format
"""
boxes = []
for fm_id, fm_size in enumerate(self.fm_sizes):
for h, w in itertools.product(range(fm_size), repeat=2):
# print('(h,w):({},{})'.format(h, w))
cx = (w + 0.5) * self.steps[fm_id] # steps recover the center to the origin map
cy = (h + 0.5) * self.steps[fm_id] # steps recover the center to the origin map
# print('(cx,cy):({},{})'.format(cx, cy))
s = self.box_sizes[fm_id]
boxes.append((cx, cy, s, s)) # boxes append (cx, cy, h, w)
s_prime = math.sqrt(self.box_sizes[fm_id] * self.box_sizes[fm_id + 1]) # append large box
boxes.append((cx, cy, s_prime, s_prime)) # boxes append (cx, cy, h, w)
# aspect_ratio just save 2, 3 and append 1/2, 1/3
for aspect_ratio in self.aspect_ratios[fm_id]:
boxes.append((cx, cy, s / math.sqrt(aspect_ratio),
s * math.sqrt(aspect_ratio))) # boxes append (cx, cy, h, w)
boxes.append((cx, cy, s * math.sqrt(aspect_ratio),
s / math.sqrt(aspect_ratio))) # boxes append (cx, cy, h, w)
return torch.Tensor(boxes)
def encode(self, boxes, labels):
"""
SSD编码规则:
tx = (x-anchor_x) / (variance[0]*anchor_w)
ty = (y-anchor_y) / (variance[0]*anchor_h)
tw = log(w/anchor_w) / variance[1]
th = log(h/anchor_h) / variance[1]
:param boxes: 输入的bounding boxes格式为(x_lt, y_lt, x_rb, y_rb),size [#obj, 4]
:param labels:输入的目标类的标签,size [#obj, ]
:return:
"""
def argmax(x):
x_v, x_i = x.max(0)
x_j = x_v.max(0)[1][0]
return x_i[x_j], x_j
default_boxes = self.default_boxes # xywh
default_boxes_xyxy = utils.change_box_format(default_boxes, 'xywh2xyxy')
ious = utils.box_iou(default_boxes_xyxy, boxes) # 计算boxes和默认的boxes之间的IOU
index = torch.LongTensor(len(default_boxes)).fill_(-1)
masked_ious = ious.clone()
# 不断寻找到最大值,直到最大值也比较小的时候
while True:
i, j = argmax(masked_ious)
# print('(i,j):({},{})'.format(i, j))
if masked_ious[i, j] < 1e-6:
break
index[i] = j
masked_ious[i, :] = 0
masked_ious[:, j] = 0
# masked_ious_np = masked_ious.numpy()
# ious_np = ious.numpy()
# index_np = index.numpy()
# print(masked_ious)
mask = (index < 0) & (ious.max(1)[0] >= 0.5)
if mask.any():
index[mask] = ious[mask.nonzero().squeeze()].max(1)[1]
boxes = boxes[index.clamp(min=0)]
boxes_xywh = utils.change_box_format(boxes, 'xyxy2xywh')
# ssd tx ty tw th编码
loc_xy = (boxes_xywh[:, :2] - default_boxes[:, :2]) / default_boxes[:, 2:] / self.variances[0]
loc_wh = torch.log(boxes_xywh[:, 2:] / default_boxes[:, 2:]) / self.variances[1]
loc_targets = torch.cat([loc_xy, loc_wh], 1)
cls_targets = 1 + labels[index.clamp(min=0)]
cls_targets[index < 0] = 0
return loc_targets, cls_targets
def decode(self, loc_preds, cls_preds, score_thresh=0.6, nms_thresh=0.45):
xy = loc_preds[:, :2] * self.variances[0] * self.default_boxes[:, 2:] + self.default_boxes[:, :2]
wh = torch.exp(loc_preds[:, 2:] * self.variances[1]) * self.default_boxes[:, 2:]
box_preds = torch.cat([xy - wh / 2, xy + wh / 2], 1)
boxes = []
labels = []
scores = []
num_classes = cls_preds.size(1)
for i in range(num_classes - 1):
score = cls_preds[:, i + 1] # class i corresponds to (i+1) column
mask = score > score_thresh
if not mask.any():
continue
box = box_preds[mask.nonzero().squeeze()]
score = score[mask]
keep = utils.box_nms(box, score, nms_thresh)
boxes.append(box[keep])
labels.append(torch.LongTensor(len(box[keep])).fill_(i))
scores.append(score[keep])
boxes = torch.cat(boxes, 0)
labels = torch.cat(labels, 0)
scores = torch.cat(scores, 0)
return boxes, labels, scores
class VGG16Extractor300(nn.Module):
def __init__(self):
super(VGG16Extractor300, self).__init__()
self.conv1_1 = StrideConv(in_channels=3, out_channels=64, kernel_size=3, stride=1)
self.conv1_2 = StrideConv(in_channels=64, out_channels=64, kernel_size=3, stride=1)
self.pool1 = StridePool(kernel_size=2, stride=2, ceil_mode=True)
self.conv2_1 = StrideConv(in_channels=64, out_channels=128, kernel_size=3, stride=1)
self.conv2_2 = StrideConv(in_channels=128, out_channels=128, kernel_size=3, stride=1)
self.pool2 = StridePool(kernel_size=2, stride=2, ceil_mode=True)
self.conv3_1 = StrideConv(in_channels=128, out_channels=256, kernel_size=3, stride=1)
self.conv3_2 = StrideConv(in_channels=256, out_channels=256, kernel_size=3, stride=1)
self.conv3_3 = StrideConv(in_channels=256, out_channels=256, kernel_size=3, stride=1)
self.pool3 = StridePool(kernel_size=2, stride=2, ceil_mode=True)
self.conv4_1 = StrideConv(in_channels=256, out_channels=512, kernel_size=3, stride=1)
self.conv4_2 = StrideConv(in_channels=512, out_channels=512, kernel_size=3, stride=1)
self.conv4_3 = StrideConv(in_channels=512, out_channels=512, kernel_size=3, stride=1)
self.norm4 = L2Norm(512, 20) # 使用Norm层正则化
self.pool4 = StridePool(kernel_size=2, stride=2, ceil_mode=True)
self.conv5_1 = StrideConv(in_channels=512, out_channels=512, kernel_size=3, stride=1)
self.conv5_2 = StrideConv(in_channels=512, out_channels=512, kernel_size=3, stride=1)
self.conv5_3 = StrideConv(in_channels=512, out_channels=512, kernel_size=3, stride=1)
self.pool5 = StridePool(kernel_size=3, stride=1, ceil_mode=True)
self.conv6 = nn.Conv2d(in_channels=512, out_channels=1024, kernel_size=3, padding=6, dilation=6) # 这个conv特别需要注意
self.conv7 = StrideConv(in_channels=1024, out_channels=1024, kernel_size=1, stride=1)
self.conv8_1 = StrideConv(in_channels=1024, out_channels=256, kernel_size=1, stride=1)
self.conv8_2 = StrideConv(in_channels=256, out_channels=512, kernel_size=3, stride=2)
self.conv9_1 = StrideConv(in_channels=512, out_channels=128, kernel_size=1, stride=1)
self.conv9_2 = StrideConv(in_channels=128, out_channels=256, kernel_size=3, stride=2)
self.conv10_1 = StrideConv(in_channels=256, out_channels=128, kernel_size=1, stride=1)
self.conv10_2 = nn.Conv2d(in_channels=128, out_channels=256, kernel_size=3, stride=1)
self.conv11_1 = StrideConv(in_channels=256, out_channels=128, kernel_size=1, stride=1)
self.conv11_2 = nn.Conv2d(in_channels=128, out_channels=256, kernel_size=3, stride=1)
def forward(self, x):
xs = []
x = self.conv1_1(x)
x = F.relu(x)
x = self.conv1_2(x)
x = F.relu(x)
x = self.pool1(x)
x = self.conv2_1(x)
x = F.relu(x)
x = self.conv2_2(x)
x = F.relu(x)
x = self.pool2(x)
x = self.conv3_1(x)
x = F.relu(x)
x = self.conv3_2(x)
x = F.relu(x)
x = self.conv3_3(x)
x = F.relu(x)
x = self.pool3(x)
x = self.conv4_1(x)
x = F.relu(x)
x = self.conv4_2(x)
x = F.relu(x)
x = self.conv4_3(x)
x = F.relu(x)
x1 = self.norm4(x)
# print('x1.size():{}'.format(x1.size()))
xs.append(x1) # conv4_3 38*38*512
x = self.pool4(x)
x = self.conv5_1(x)
x = F.relu(x)
x = self.conv5_2(x)
x = F.relu(x)
x = self.conv5_3(x)
x = F.relu(x)
x = self.pool5(x)
x = self.conv6(x)
x = F.relu(x)
x = self.conv7(x)
x = F.relu(x)
x2 = x
# print('x2.size():{}'.format(x2.size()))
xs.append(x2) # conv7 19*19*1024
x = self.conv8_1(x)
x = F.relu(x)
x = self.conv8_2(x)
x = F.relu(x)
x3 = x
# print('x3.size():{}'.format(x3.size()))
xs.append(x3) # conv8_2 10*10*512
x = self.conv9_1(x)
x = F.relu(x)
x = self.conv9_2(x)
x = F.relu(x)
x4 = x
# print('x4.size():{}'.format(x4.size()))
xs.append(x4) # conv9_2 5*5*256
x = self.conv10_1(x)
x = F.relu(x)
x = self.conv10_2(x)
x = F.relu(x)
x5 = x
# print('x5.size():{}'.format(x5.size()))
xs.append(x5) # conv10_2 3*3*256
x = self.conv11_1(x)
x = F.relu(x)
x = self.conv11_2(x)
x = F.relu(x)
x6 = x
# print('x6.size():{}'.format(x6.size()))
xs.append(x6) # conv11_2 1*1*256
# print('x.size():{}'.format(x.size()))
return xs
class SSD300(nn.Module):
steps = (8, 16, 32, 64, 100, 300) # steps for recover to the origin image size
fm_sizes = (38, 19, 10, 5, 3, 1) # feature map size
aspect_ratios = ((2,), (2, 3), (2, 3), (2, 3), (2,), (2,)) # aspect ratio
box_sizes = (30, 60, 111, 162, 213, 264, 315) # box size
def __init__(self, num_classes=21):
super(SSD300, self).__init__()
self.num_classes = num_classes
self.num_anchors = (4, 6, 6, 6, 4, 4)
self.in_channels = (512, 1024, 512, 256, 256, 256)
self.extractor = VGG16Extractor300()
self.loc_layers = nn.ModuleList()
self.cls_layers = nn.ModuleList()
for i in range(len(self.in_channels)):
self.loc_layers += [nn.Conv2d(self.in_channels[i], self.num_anchors[i] * 4, kernel_size=3, padding=1)]
self.cls_layers += [
nn.Conv2d(self.in_channels[i], self.num_anchors[i] * self.num_classes, kernel_size=3, padding=1)]
def forward(self, x):
loc_preds = []
cls_preds = []
xs = self.extractor(x)
for i, x in enumerate(xs):
loc_pred = self.loc_layers[i](x)
loc_pred = loc_pred.permute(0, 2, 3, 1).contiguous()
loc_preds.append(loc_pred.view(loc_pred.size(0), -1, 4))
cls_pred = self.cls_layers[i](x)
cls_pred = cls_pred.permute(0, 2, 3, 1).contiguous()
cls_preds.append(cls_pred.view(cls_pred.size(0), -1, self.num_classes))
loc_preds = torch.cat(loc_preds, 1)
cls_preds = torch.cat(cls_preds, 1)
return loc_preds, cls_preds
class SSDLoss(nn.Module):
def __init__(self, num_classes):
super(SSDLoss, self).__init__()
self.num_classes = num_classes
def _hard_negative_mining(self, cls_loss, pos):
'''Return negative indices that is 3x the number as postive indices.
Args:
cls_loss: (tensor) cross entroy loss between cls_preds and cls_targets, sized [N,#anchors].
pos: (tensor) positive class mask, sized [N,#anchors].
Return:
(tensor) negative indices, sized [N,#anchors].
'''
cls_loss = cls_loss * (pos.float() - 1)
_, idx = cls_loss.sort(1) # sort by negative losses
_, rank = idx.sort(1) # [N,#anchors]
num_neg = 3 * pos.sum(1) # [N,]
neg = rank < num_neg[:, None] # [N,#anchors]
return neg
def forward(self, loc_preds, loc_targets, cls_preds, cls_targets):
"""Compute loss between (loc_preds, loc_targets) and (cls_preds, cls_targets).
Args:
loc_preds: (tensor) predicted locations, sized [N, #anchors, 4].
loc_targets: (tensor) encoded target locations, sized [N, #anchors, 4].
cls_preds: (tensor) predicted class confidences, sized [N, #anchors, #classes].
cls_targets: (tensor) encoded target labels, sized [N, #anchors].
loss:
(tensor) loss = SmoothL1Loss(loc_preds, loc_targets) + CrossEntropyLoss(cls_preds, cls_targets).
"""
pos = cls_targets > 0 # [N,#anchors]
batch_size = pos.size(0)
num_pos = pos.sum().item()
# ===============================================================
# loc_loss = SmoothL1Loss(pos_loc_preds, pos_loc_targets)
# ===============================================================
mask = pos.unsqueeze(2).expand_as(loc_preds) # [N,#anchors,4]
loc_loss = F.smooth_l1_loss(loc_preds[mask], loc_targets[mask], size_average=False)
# ===============================================================
# cls_loss = CrossEntropyLoss(cls_preds, cls_targets)
# ===============================================================
cls_loss = F.cross_entropy(cls_preds.view(-1, self.num_classes), cls_targets.view(-1), reduce=False) # [N*#anchors,]
cls_loss = cls_loss.view(batch_size, -1)
cls_loss[cls_targets < 0] = 0 # set ignored loss to 0
neg = self._hard_negative_mining(cls_loss, pos) # [N,#anchors]
cls_loss = cls_loss[pos | neg].sum()
print('loc_loss: {} | cls_loss: {}'.format(loc_loss.item() / num_pos, cls_loss.item() / num_pos))
loss = (loc_loss + cls_loss) / num_pos
return loss
| [
"[email protected]"
] | |
8e76debaea8ecc60552b1c5384895640a9b54d55 | bbc8fbbdd40665af61fedf69962b38c1d5939683 | /deploy/pinax.wsgi | 702a6744a1f0adf054e2dce91b62c0c1158c1580 | [] | no_license | braskin/pd | 64b299ad8058e8d3939bc9778fd1576522f786b0 | df32f96b432c2f07e1a20bcbd84df3eccad5e29a | refs/heads/master | 2021-01-10T22:10:34.318229 | 2013-01-23T11:50:37 | 2013-01-23T11:50:37 | 7,773,119 | 0 | 1 | null | 2020-07-25T19:53:06 | 2013-01-23T11:09:43 | Python | UTF-8 | Python | false | false | 454 | wsgi | # pinax.wsgi is configured to live in projects/playdation/deploy.
import os
import sys
from os.path import abspath, dirname, join
from site import addsitedir
sys.path.insert(0, abspath(join(dirname(__file__), "../../")))
from django.conf import settings
os.environ["DJANGO_SETTINGS_MODULE"] = "playdation.settings"
sys.path.insert(0, join(settings.PROJECT_ROOT, "apps"))
from django.core.handlers.wsgi import WSGIHandler
application = WSGIHandler() | [
"[email protected]"
] | |
4adb56b19f422e4b95744f384d76d14ff2d0e9c6 | e6ede210d500b8f0772ff09f6a91578297ad6395 | /tests/database/database_perf_load01.py | 5d0fed1c12eb4b69890a20306a01f56a6878d493 | [
"BSD-3-Clause"
] | permissive | pnarvor/nephelae_base | 392d70e001c49d03e7027989d75adaf065f968ee | d5f1abeae0b0473b895b4735f182ddae0516a1bd | refs/heads/master | 2020-06-23T14:23:41.294273 | 2020-02-28T17:01:26 | 2020-02-28T17:01:26 | 198,648,271 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,619 | py | #! /usr/bin/python3
import sys
sys.path.append('../../')
import os
import signal
import time
from ivy.std_api import *
import logging
from nephelae_mapping.database import NephelaeDataServer
from helpers.helpers import *
print("loading database... ", end='', flush=True)
t0 = time.time()
# dtbase = NephelaeDataServer.load('output/database_perf01.neph')
# dtbase = NephelaeDataServer.load('output/database_perf02.neph')
dtbase = NephelaeDataServer.load('output/database_perf03.neph')
t1 = time.time()
print("Done. (ellapsed : ", t1 - t0,")", flush=True)
print("Reading database... ", flush=True)
t0 = time.time()
for i in range(10):
output = [entry.data for entry in dtbase.find_entries(['GPS','101'],
Fancy()[0:10.0,0:10.0,0:10.0,0:10.0])]
# for item in output:
# print(item)
t1 = time.time()
print("Done. (ellapsed : ", t1 - t0,")", flush=True)
print("Reading database... ", flush=True)
t0 = time.time()
for i in range(10):
output = [entry.data for entry in dtbase.find_entries(['101','var_0'],
Fancy()[0:10.0,0:10.0,0:10.0,0:10.0])]
# for item in output:
# print(item)
t1 = time.time()
print("Done. (ellapsed : ", t1 - t0,")", flush=True)
print("Reading database... ", flush=True)
t0 = time.time()
for i in range(10):
output = [entry.data for entry in dtbase.find_entries(['var_0','101'],
Fancy()[0:10.0,0:10.0,0:10.0,0:10.0])]
# for item in output:
# print(item)
t1 = time.time()
print("Done. (ellapsed : ", t1 - t0,")", flush=True)
| [
"[email protected]"
] | |
81f7e1a32b448da0c2743974e650e6d2b9659d73 | acb8e84e3b9c987fcab341f799f41d5a5ec4d587 | /langs/9/v-1.py | 3c7d5e588805d8f24918006b23a615c626a88574 | [] | no_license | G4te-Keep3r/HowdyHackers | 46bfad63eafe5ac515da363e1c75fa6f4b9bca32 | fb6d391aaecb60ab5c4650d4ae2ddd599fd85db2 | refs/heads/master | 2020-08-01T12:08:10.782018 | 2016-11-13T20:45:50 | 2016-11-13T20:45:50 | 73,624,224 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 486 | py | import sys
def printFunction(lineRemaining):
if lineRemaining[0] == '"' and lineRemaining[-1] == '"':
if len(lineRemaining) > 2:
#data to print
lineRemaining = lineRemaining[1:-1]
print ' '.join(lineRemaining)
else:
print
def main(fileName):
with open(fileName) as f:
for line in f:
data = line.split()
if data[0] == 'v-1':
printFunction(data[1:])
else:
print 'ERROR'
return
if __name__ == '__main__':
main(sys.argv[1]) | [
"[email protected]"
] | |
b38cc83718ba67b213d350be50f5983e023c5b64 | 838a0c32eb0ab8fa513cfdc698a09ab1eaaef00a | /codes/275. H-Index II.py | e0705b2c4b55f09ebc65aec748c8f9f6ec607acd | [] | no_license | zcgu/leetcode | ff270db9deb000e63dc9f338131c746ce7d24dfb | a041962eeab9192799ad7f74b4bbd3e4f74933d0 | refs/heads/master | 2021-01-11T20:02:49.126449 | 2016-12-31T23:51:38 | 2016-12-31T23:51:38 | 68,346,234 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 403 | py | class Solution(object):
def hIndex(self, citations):
"""
:type citations: List[int]
:rtype: int
"""
if not citations:
return 0
h = 0
i = len(citations) - 1
while citations[i] > h and i >= 0: # this citations[i] > h not citations[i] >= h
h += 1
i -= 1
return h | [
"[email protected]"
] | |
0d0107c5fc211ba55a7f4194bd58bfb09b71cc71 | 0764489a1cb0793a39252bb0e6afa76854119644 | /scenarios/credit_failed_state/executable.py | 965e78d1121ac9ff409962b2dae867a57ae767dc | [] | no_license | rserna2010/balanced-python | 8ac7bef3cb309be8affaa2aa62e882d631b62bda | 093a401d187bc09293a88214156e9e316185bfa3 | refs/heads/master | 2021-01-21T07:20:47.157987 | 2013-11-08T17:39:54 | 2013-11-08T17:39:54 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 323 | py | import balanced
balanced.configure('ak-test-1p1Tsac7gHeMQowL2seB7ieliuAJAufyq')
bank_account_info = {
"routing_number": "121000358",
"type": "checking",
"account_number": "9900000005",
"name": "Johann Bernoulli"
}
credit = balanced.Credit(
amount=10000,
bank_account=bank_account_info
).save() | [
"[email protected]"
] | |
f6588639e6480140e4ca5522ba6434637eccbae4 | 183caf378df099da122f65ea9b75002b1e12b774 | /projFocus/ceRNA/model/projCeRNA_step3-4_WGSC.py | d6335c4a2e5f5ffe52644dc2776b872148729be7 | [] | no_license | cwt1/scripts-1 | f58e476ddb2c83e0480856a95a95a644ad3c001c | 061d6592aa6ab11c93363fcb40305a57db05e3f2 | refs/heads/master | 2021-05-28T01:31:30.896133 | 2014-08-25T19:02:37 | 2014-08-25T19:02:37 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,846 | py | import numpy as np
import sys,getopt
from collections import defaultdict
from parseKeyRegFile import parseKeyRegFile
from collections import Counter, Sequence
from parseGslistFile import parseGslistFile
class MutSet():
def __init__(self, gene, mutSample, zscore):
self.gene = gene
self.mutSample = mutSample
self.mutZscore = dict(zip(mutSample,zscore))
def __str__(self):
return "(%s, %s)" % (self.gene, self.mutSample)
def __expr__(self):
return "(%s, %i mutSmp)" % (self.gene, len(self.mutSample))
def update(self, unionSample):
for smp in self.mutSample:
if not smp in unionSample:
self.mutSample = self.mutSample.remove(smp)
self.mutZscore = self.mutZscore.remove(smp)
else:
pass
def findWegitedMin(S, R, wtype = 'mean'):
'''
wtype is to select the method to use weight,
total: the summation of all mutation zscores;
mean: the mean of all mutation zscores;
max: the maximization of mutation zscores for each gene
'''
## get the minimum cost set
minCost = 99999.0
minElement = -1
minSet = ''
minSname = ''
for i, s in enumerate(S):
sname_i = s.gene; ss = s.mutSample; sw = s.mutZscore
ss_i = set(R).intersection(set(ss))
if len(ss_i) == 0:
continue
sw_i = [sw[a] for a in ss_i ]
if wtype == 'total':
cost = 1/reduce(lambda x, y: x+y , sw_i)
elif wtype == 'mean':
cost = len(sw_i)/sum(sw_i)
elif wtype == 'max':
cost = 1/ reduce(lambda x, y: max(x,y), sw_i)
else:
print "ERRor wtype: use default mean; other option total, max or mean"
cost = len(sw_i)/sum(sw_i)
if cost < minCost:
minCost = cost
minElement = i
minSname = sname_i
minSet = ss_i
return minSname, minSet, minCost
def wgsc(S, U, alpha = 0.8, tol = 0.001, wtype = 'mean'):
R = U
C = []
G = []
costs = []
while len(R) != 0:
g_i, S_i, cost = findWegitedMin(S, R, wtype = wtype)
C.append(list(S_i))
G.append(g_i)
R = list(set(R).difference(set(S_i)))
costs.append(cost)
if len(R) < int((1 - alpha) * len(U)):
break
return G, C, costs
def __test__():
from collections import defaultdict
seqSet = {'G1':['S2','S4','S6'],
'G2':['S1','S3'],
'G3':['S1'],
'G4':['S1'],
'G5':['S5'],
'G6':['S3']}
seq = ['S1', 'S2', 'S3', 'S4', 'S5','S6']
weightSet = {'G1':[1.0,0.5,1.5],
'G2':[2.0, 2.5],
'G3':[2.3],
'G4':[1.2],
'G5':[2.5],
'G6':[3.0]}
setObjL = []
for sk, ss in seqSet.items():
setObjL.append(MutSet(sk,ss,weightSet[sk]))
geneL, smpL, costL = wgsc(setObjL, seq, wtype = "mean")
geneL, smpL, costL = wgsc(setObjL, seq, wtype = "total")
geneL, smpL, costL = wgsc(setObjL, seq, wtype = "max")
####-------compute the zscore matrix for each mutation
import numpy as np
# def myZscore(a,b):
# bArr = np.array(b)
# m = np.mean(bArr)
# sd = np.std(bArr)
# return (np.array(a) - m)/sd
def myZscore(a,b):
return abs((np.array(a) - b[0])/b[1])
def formatSampleName(code19):
if len(code19) >11:
return code19[5:16].replace("-",".")
else :
return code19.replace("-", ".")
def loadNormExpfile(filename):
expD = defaultdict(list)
with open(filename) as f:
samples = f.readline().strip().split("\t")
line = f.readline()
while line:
gCrt, valCrt = line.strip().split("\t",1)
valCrt = np.array(map(float, valCrt.split("\t")))
expD[gCrt] = [np.mean(valCrt), np.std(valCrt)]
line = f.readline()
return expD
def loadExpfile(filename, expND):
expD = defaultdict(list)
with open(filename) as f:
expD['samples'] = f.readline().strip().split("\t")
line = f.readline()
while line:
gCrt, valCrt = line.strip().split("\t",1)
try:
expD[gCrt] = map(lambda x:\
myZscore(float(x),expND[gCrt]),\
valCrt.split("\t"))
except:
pass
line = f.readline()
return expD
def loadMutInfo(mutfile, zscoreD):
'''
load all mutated ceRNA driver,
and return all mutated ceRNA driver's mutated sample, and zscores
'''
mutD = defaultdict(list)
mutZscoreD = defaultdict(list)
cnt = 0
with open(mutfile) as f:
gene, samples = f.readline().strip().split("\t",1)
samples = map(formatSampleName, samples.split("\t"))
mutD['samples'] = samples
line = f.readline()
while line:
cnt = cnt + 1
gene, vals = line.strip().split("\t",1)
mutIdx = [id for (id,m) in enumerate(vals.split("\t")) if m != "0"]
mutSmp = map(samples.__getitem__, mutIdx)
mutSmpInExpSmpID = []; mutSmpInExpSmp = []
for (id, a) in enumerate(zscoreD['samples']) :
if a in mutSmp:
mutSmpInExpSmpID.append(id)
mutSmpInExpSmp.append(a)
mutZscoreD[gene] = map(zscoreD[gene].__getitem__, mutSmpInExpSmpID)
mutD[gene] = mutSmpInExpSmp
line = f.readline()
print " input target genes:\t",cnt
return mutD, mutZscoreD
def prepareDataWGSC(mutfile, gslistfile, keygenefile, pvalCut = 0.01 ):
'''
given, mutation dict, zscore dict,
intact sample list for each cancer genen,
file from group lasso result;
prepare mutation data, zscore data for each cancer gene
using MutSet objects
'''
tgeneSum, regsSum = parseKeyRegFile(keygenefile, pvalCut)
if not regsSum or not tgeneSum:
return tgeneSum, ''
reglist = regsSum
regMutObjL = []
tgene = tgeneSum[0]
gintsmplist = parseGslistFile(tgene, gslistfile)
##---check whether mutated samples are intact
cnt = 0
regMutAllSmp = []
mutRegs = []
for gene in reglist:
cnt = cnt + 1
crtMut = mutD[gene]; crtMutZscore = mutZscoreD[gene]
if crtMut:
mutRegs.append(gene)
for idx, smp in enumerate(mutD[gene]) :
if smp not in gintsmplist:
crtMut.remove(smp)
del crtMutZscore
else:
regMutAllSmp.append(smp)
pass
if crtMut:
regMutObjL.append(MutSet(gene, crtMut, crtMutZscore))
tempCnter = Counter(regMutAllSmp)
regMutAllSmp, regMutAllSmpLoad = tempCnter.keys(), tempCnter.values()
outInfo = {}
outInfo['gintSmp'] = gintsmplist ## all gint sample
outInfo['mutGintSmp'] = regMutAllSmp ## mutated gint sample
outInfo['mutGintSmpLoad'] = regMutAllSmpLoad ## mutated gint sam ple's mutation frequency for each mutation
outInfo['tgene'] = tgene ## target gene
outInfo['allRegs'] = reglist ## all regulators
outInfo['mutRegs'] = mutRegs## all mutated regulators
outInfo['intMutRegs'] = map(lambda x:x.gene, regMutObjL)
return {tgene:[regMutObjL, regMutAllSmp]}, outInfo
def __test__():
expTumorM="/Volumes//ifs/data/c2b2/ac_lab/jh3283/projFocus/result/03102014/exp/brca_exp_l3_tumor_Mar-21-2014.matrix_Mar-26-2014.voomNormed.matrix"
expNormalM="/Volumes//ifs/data/c2b2/ac_lab/jh3283/projFocus/result/03102014/exp/brca_exp_l3_normal_Mar-21-2014.matrix_Mar-26-2014.voomNormed.matrix"
expnD = loadNormExpfile(expNormalM)
zscoreD = loadExpfile(expTumorM, expnD)
mutfile = "/Volumes//ifs/data/c2b2/ac_lab/jh3283/projFocus/result/03102014/sigMut/step2_mutKeyReg/kegRegs_Apr-18-2014.summary.driverRegs.list.uniq.mut.matrix"
mutD, mutZscoreD = loadMutInfo(mutfile, zscoreD)
gslistfile="/Volumes//ifs/data/c2b2/ac_lab/jh3283/projFocus/result/03102014/gslist/gslist_Mar-24-2014_CnvMethSomFree.10smapMore.deg_20140325.txt.10more.hasReg.list"
keygenefile="/Volumes//ifs/data/c2b2/ac_lab/jh3283/projFocus/result/03102014/candiReg/run-Apr-1-2014/data/BCL9_candidateRegs_Mar-31-2014.txt"
pvalCut=0.01
tRegMutObjDict, info = prepareDataWGSC(mutfile, gslistfile, keygenefile)
targ = tRegMutObjDict.keys()[0]
print wgsc(tRegMutObjDict[targ][0],tRegMutObjDict[targ][1] , wtype = "mean")
print wgsc(tRegMutObjDict[targ][0],tRegMutObjDict[targ][1] , wtype = "total")
print wgsc(tRegMutObjDict[targ][0],tRegMutObjDict[targ][1] , wtype = "max")
| [
"[email protected]"
] | |
1b10ff1579271a1232c1aa6dffaea63bf8a0342d | 8bd6b0784de9a1e6a39d0f5f23f2d8fb50c73d49 | /MethodRefine-Rand/satellite/MethodRefine/satellite_benchmark-high/validating/validating_20.py | 6b5fe7da51099a834f291860f10739404d5f325e | [] | no_license | sysulic/MethodRefine | a483d74e65337dff4bc2539ce3caa3bf83748b48 | adbb22d4663041d853d3132f75032b7561bf605c | refs/heads/master | 2020-09-14T10:45:55.948174 | 2020-05-01T09:13:59 | 2020-05-01T09:13:59 | 223,104,986 | 3 | 2 | null | 2020-04-27T11:01:36 | 2019-11-21T06:33:16 | Python | UTF-8 | Python | false | false | 3,502 | py | #!/usr/bin/env python
# coding=utf-8
import sys
sys.path.insert(0, './')
from satellite import *
import new_tihtn_planner
state0 = new_tihtn_planner.State('state0')
allow = False
state0.sate_num = 3
state0.inst_num = 13
state0.mode_num = 3
state0.direc_num = 5
state0.img_num = 4
state0.on_board = {'inst-3-1':'sate-3','inst-1-1':'sate-1','inst-1-3':'sate-1','inst-1-2':'sate-1','sate-1':['inst-1-1', 'inst-1-2', 'inst-1-3'],'sate-2':['inst-2-1', 'inst-2-2', 'inst-2-3', 'inst-2-4', 'inst-2-5'],'sate-3':['inst-3-1', 'inst-3-2', 'inst-3-3', 'inst-3-4', 'inst-3-5'],'inst-3-4':'sate-3','inst-3-3':'sate-3','inst-3-2':'sate-3','inst-2-4':'sate-2','inst-2-5':'sate-2','inst-2-2':'sate-2','inst-2-3':'sate-2','inst-3-5':'sate-3','inst-2-1':'sate-2',}
state0.mode = {'inst-3-1':'mode-1','inst-1-1':'mode-1','inst-1-3':'mode-3','inst-1-2':'mode-2','mode-1':['inst-2-2', 'inst-1-1', 'inst-2-1', 'inst-3-1', 'inst-3-4'],'mode-2':['inst-3-5', 'inst-1-2', 'inst-2-3', 'inst-3-2', 'inst-3-3'],'mode-3':['inst-2-4', 'inst-1-3', 'inst-2-5'],'inst-3-4':'mode-1','inst-3-3':'mode-2','inst-3-2':'mode-2','inst-2-4':'mode-3','inst-2-5':'mode-3','inst-2-2':'mode-1','inst-2-3':'mode-2','inst-3-5':'mode-2','inst-2-1':'mode-1',}
state0.calib_target = {'inst-1-1':'direc-5','inst-1-3':'direc-4','inst-1-2':'direc-2','inst-2-3':'direc-5','inst-3-4':'direc-4','inst-3-3':'direc-3','inst-3-2':'direc-1','inst-2-4':'direc-4','inst-2-5':'direc-1','inst-2-2':'direc-5','inst-3-1':'direc-4','inst-3-5':'direc-3','inst-2-1':'direc-4',}
state0.pointing = {'sate-1':'direc-3','sate-2':'direc-2','sate-3':'direc-2',}
state0.power_avail = {'sate-1':True,'sate-2':True,'sate-3':True,}
state0.power_on = {'inst-1-1':False,'inst-1-2':False,'inst-1-3':False,'inst-2-1':False,'inst-2-2':False,'inst-2-3':False,'inst-2-4':False,'inst-2-5':False,'inst-3-1':False,'inst-3-2':False,'inst-3-3':False,'inst-3-4':False,'inst-3-5':False,}
state0.calibrate = {'inst-1-1':False,'inst-1-2':False,'inst-1-3':False,'inst-2-1':False,'inst-2-2':False,'inst-2-3':False,'inst-2-4':False,'inst-2-5':False,'inst-3-1':False,'inst-3-2':False,'inst-3-3':False,'inst-3-4':False,'inst-3-5':False,}
state0.have_img = {'direc-1':{'mode-1': False,'mode-2': False,'mode-3': False,},'direc-2':{'mode-1': False,'mode-2': False,'mode-3': False,},'direc-3':{'mode-1': False,'mode-2': False,'mode-3': False,},'direc-4':{'mode-1': False,'mode-2': False,'mode-3': False,},'direc-5':{'mode-1': False,'mode-2': False,'mode-3': False,},}
new_tihtn_planner.declare_types({'satellite':['sate-1','sate-2','sate-3',],'instrument':['inst-1-1','inst-1-2','inst-1-3','inst-2-1','inst-2-2','inst-2-3','inst-2-4','inst-2-5','inst-3-1','inst-3-2','inst-3-3','inst-3-4','inst-3-5',],'mode':['mode-1','mode-2','mode-3',],'direction':['direc-1','direc-2','direc-3','direc-4','direc-5',]})
new_tihtn_planner.declare_funs({switch_off:['satellite'],switch_on:['instrument', 'satellite'],turn_to:['satellite', 'direction'],calibrate:['instrument', 'satellite', 'direction'], take_img:['satellite', 'direction', 'instrument', 'mode']})
new_tihtn_planner.instance()
def execute(completable):
return new_tihtn_planner.pyhop(completable, allow, state0,[('get_img','direc-1', 'mode-3'),('get_img','direc-2', 'mode-1'),('get_img','direc-3', 'mode-2'),('get_img','direc-3', 'mode-3'),], [[0, 1],[1, 2],[2, 3],],9)
def add_methods(fun_obj_list):
for fun in fun_obj_list:
new_tihtn_planner.add_method(fun.func_name.split('__')[0], fun)
def reverse_methods():
new_tihtn_planner.reverse_methods() | [
"[email protected]"
] | |
65aabc1185420c1de3350fef656d55b4d0889e67 | f3050b7f84e584dcde54ca1690944bfccc6f5d9c | /doReport.py | 1f3e2459f51cf6acf2c67e58f657274c4e11e715 | [] | no_license | azhenglianxi/api_Project | 0c8444c2bad7464fd57911be4fdcd131a63c46b2 | 2ae87b87e41f522d4ef20f63bad6adcaec1f9874 | refs/heads/master | 2020-09-14T12:08:07.080748 | 2019-12-12T09:08:22 | 2019-12-12T09:08:22 | 223,124,370 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,537 | py | import unittest
import ddt
from testCase.course.courseTest1 import CourseTest1
from testCase.course.courseTest2 import CourseTest2
import HtmlTestRunner
from HTMLTestRunner import HTMLTestRunner
# 测试套件 test_suite
# 1.-1: 用例 一个个的添加到suite
# suite=unittest.TestSuite()
# suite.addTest(CourseTest1("test_101"))
# suite.addTest(CourseTest1("test_103"))
# suite.addTest(CourseTest1("test_102"))
# suite.addTest(CourseTest1("test_102"))
# suite.addTest(CourseTest2("test_202"))
# 1-2: 用例放入列表中 在添加suite
# suite=unittest.TestSuite()
# list=[CourseTest1("test_101"),CourseTest1("test_103"),CourseTest1("test_102"),CourseTest2("test_202")]
# suite.addTests(list)
# 1-3 :用Testloader类的discover方法来
suite=unittest.defaultTestLoader.discover('testCase',pattern="*Test*.py")
# 2 运行用例,查看结果
# 2-1 第1种情况:不使用HtmlTestRunner插件
# runner=unittest.TextTestRunner()
# runner.run(suite)
# 2-2 第2种情况:使用【经典版】HtmlTestRunner插件
# 新建一个可写二进制文件
# reportFile=open('./report/经典Html报告4.html','wb')
# runner=HTMLTestRunner(stream=reportFile,verbosity=2,description="用例执行详细信息",
# title="测试报告")
# runner.run(suite)
# 2-3 第3种情况:使用【最新版】HtmlTestRunner插件
runner=HtmlTestRunner.HTMLTestRunner(output='./report/',report_name='【最新版】html2测试报告',
report_title='my_report')
runner.run(suite)
| [
"[email protected]"
] | |
1630d428b45f4ba249a3ce615b8614472bebbcec | efd55bc63da8ab6ee964ec82bd0b761fd36107cc | /leetcode/easy/add-strings.py | 9a65e671a52bd084d149cc8082da1b152c7e4665 | [] | no_license | gsantam/competitive-programming | f9a2c9999470eeae9ef4aada6af43b91a65fcb50 | 0b208516a6ae3e72bc7b79ef0ac83dcbfa100496 | refs/heads/master | 2021-06-20T23:27:30.274275 | 2021-06-20T19:44:51 | 2021-06-20T19:44:51 | 162,201,988 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 621 | py | class Solution:
def addStrings(self, num1: str, num2: str) -> str:
i = 0
l1 = len(num1)
l2 = len(num2)
rest = 0
total_sum = 0
while l1-1-i>=0 or l2-1-i>=0:
sum_ = rest
if l1-1-i>=0:
sum_+=int(num1[l1-1-i])
if l2-1-i>=0:
sum_+=int(num2[l2-1-i])
rest = sum_//10
sum_ = sum_%10
total_sum+=sum_*(10**i)
i+=1
if rest!=0:
total_sum+=rest*(10**i)
return str(total_sum)
| [
"[email protected]"
] | |
7adcf3af90dc069ab9bec98b2839947c8aeeb910 | 0c2130f0aabf2e27fae19ba93a52b444d4abdffd | /webscraping_beautifulSoup/09 Hand on with AMAZON projects/043 amazon-project2-part2-get-book-detail-information-for-one-book.py | e17c7d86efde3a513d5c15b75c8bf65a8b03a310 | [] | no_license | abuzarrizvi/WebScrapingBeautifulSoup4 | 3e583b736f575596b69e0102dbde797d46f47a61 | 9e847e83cef9a914bc1774295fc48f974a1ab796 | refs/heads/master | 2020-06-17T15:01:16.657407 | 2019-08-14T05:08:32 | 2019-08-14T05:08:32 | 195,956,866 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 967 | py | # strategy
# soup --> ISBN table id="productDetailsTable"
# find_all li tag --> get 4th li
# --> Detail --> iframe --> div.text
from bs4 import BeautifulSoup
from selenium import webdriver
#driver = webdriver.PhantomJS(executable_path = r'C:\phantomjs-2.1.1-windows\bin\phantomjs.exe')
driver = webdriver.Chrome('C:\chromedriver_win32\chromedriver.exe')
url = 'https://www.amazon.com/Python-Programming-Introduction-Computer-Science/dp/1590282418/ref=sr_1_1?ie=UTF8&qid=1473731166&sr=8-1&keywords=python+programming'
driver.get(url)
soup = BeautifulSoup(driver.page_source,'lxml')
table = soup.find('table', {'id':'productDetailsTable'})
all_li = table.find_all('li')
isbn = all_li[3].text.strip('ISBN-10: ')
print isbn
driver.switch_to_frame( driver.find_element_by_tag_name('iframe'))
soup = BeautifulSoup(driver.page_source,'lxml')
description = soup.find('div').text
print description
driver.quit()
| [
"[email protected]"
] | |
78d781b8f81b16601a05083f18e8517528c0ccd2 | f9f94ac82a5e78adedd8c87118b13725f7e1cb13 | /service_management/faulty_logging/apps.py | 6db74420b66659fc35b37e95947ad2b4bda92048 | [] | no_license | poppykode/sm | 1b4245c8b03ecb0385ce282e5ab3c89bc3cb57d1 | 44dcf34db94de336a689f1e8456aa6bd802b7734 | refs/heads/master | 2022-11-30T02:25:39.471284 | 2020-01-13T12:04:19 | 2020-01-13T12:04:19 | 220,422,113 | 0 | 0 | null | 2022-11-22T04:18:23 | 2019-11-08T08:32:21 | JavaScript | UTF-8 | Python | false | false | 102 | py | from django.apps import AppConfig
class FaultyLoggingConfig(AppConfig):
name = 'faulty_logging'
| [
"[email protected]"
] | |
003d4fcb554b08dd645a2f33bd3035bdd7d5d3f1 | 7234e6c72eb3f09c4a66dbe91f00fdf7742f010f | /algo/arrays/fruitsIntoBasket.py | 52054c5a06bb6d3e66bb587ba1ab54b57a3e8c24 | [] | no_license | srinathalla/python | 718ac603473e7bed060ba66aa3d39a90cf7ef69d | b6c546070b1738350303df3939888d1b0e90e89b | refs/heads/master | 2021-06-13T06:11:42.653311 | 2021-02-19T06:01:41 | 2021-02-19T06:01:41 | 150,374,828 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,106 | py | from typing import List
#
# This problem is similar to longest sub array with two distinct characters..
#
# T.C : O(2n) => O(n) Two pass solution
# S.C : O(3) => O(1) as map holds only 3 entries at max
# #
class Solution:
def totalFruit(self, tree: List[int]) -> int:
if len(tree) < 3:
return len(tree)
left = 0
right = 0
maxVal = [-1, -1]
count = 0
fruitsMap = {}
while right < len(tree):
if tree[right] not in fruitsMap:
fruitsMap[tree[right]] = 0
if fruitsMap[tree[right]] == 0:
count += 1
fruitsMap[tree[right]] += 1
while count > 2:
fruitsMap[tree[left]] -= 1
if fruitsMap[tree[left]] == 0:
count -= 1
left += 1
if maxVal[1] - maxVal[0] < right + 1 - left:
maxVal[1] = right + 1
maxVal[0] = left
right += 1
return maxVal[1] - maxVal[0]
s = Solution()
print(s.totalFruit([3, 3, 3, 1, 2, 1, 1, 2, 3, 3, 4]))
| [
"[email protected]"
] | |
76c42d25f8cacebb06202933fa87bbde25eaea41 | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/verbs/_coarsens.py | 7e3f51e1ea0cdb68080e778c6311c0f110396d1c | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 245 | py |
from xai.brain.wordbase.verbs._coarsen import _COARSEN
#calss header
class _COARSENS(_COARSEN, ):
def __init__(self,):
_COARSEN.__init__(self)
self.name = "COARSENS"
self.specie = 'verbs'
self.basic = "coarsen"
self.jsondata = {}
| [
"[email protected]"
] | |
ca1ea0bfa5e35ba455e7b13dca16a027b2a67ae0 | 76fb0a3cfc9d9362ab29174bd1d55e888ea4d7f6 | /tfx/orchestration/kubeflow/v2/components/experimental/ai_platform_training_executor.py | 22029308648a87a84dc866cf7e1b633872bbf10c | [
"Apache-2.0"
] | permissive | tensorflow/tfx | 0cfc9c55171352ecc98c9dfa8ffe976c689d7073 | 1b328504fa08a70388691e4072df76f143631325 | refs/heads/master | 2023-08-30T11:56:50.894497 | 2023-08-29T22:47:19 | 2023-08-29T22:48:26 | 169,116,405 | 2,116 | 899 | Apache-2.0 | 2023-09-14T21:51:42 | 2019-02-04T17:14:36 | Python | UTF-8 | Python | false | false | 2,422 | py | # Copyright 2020 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Executor for AI Platform Training component."""
import datetime
from typing import Any, Dict, List
from tfx import types
from tfx.dsl.components.base import base_executor
from tfx.extensions.google_cloud_ai_platform import runner
from tfx.orchestration.launcher import container_common
from tfx.utils import json_utils
_POLLING_INTERVAL_IN_SECONDS = 30
_CONNECTION_ERROR_RETRY_LIMIT = 5
# Keys for AIP training config.
PROJECT_CONFIG_KEY = 'project_id'
TRAINING_JOB_CONFIG_KEY = 'training_job'
JOB_ID_CONFIG_KEY = 'job_id'
LABELS_CONFIG_KEY = 'labels'
CONFIG_KEY = 'aip_training_config'
class AiPlatformTrainingExecutor(base_executor.BaseExecutor):
"""AI Platform Training executor."""
def Do(self, input_dict: Dict[str, List[types.Artifact]],
output_dict: Dict[str, List[types.Artifact]],
exec_properties: Dict[str, Any]) -> None:
self._log_startup(input_dict, output_dict, exec_properties)
aip_config = json_utils.loads(exec_properties.pop(CONFIG_KEY))
assert aip_config, 'AIP training config is not found.'
training_job = aip_config.pop(TRAINING_JOB_CONFIG_KEY)
job_id = aip_config.pop(JOB_ID_CONFIG_KEY)
project = aip_config.pop(PROJECT_CONFIG_KEY)
# Resolve parameters.
training_job['training_input'][
'args'] = container_common._resolve_container_command_line( # pylint: disable=protected-access
cmd_args=training_job['training_input']['args'],
input_dict=input_dict,
output_dict=output_dict,
exec_properties=exec_properties)
training_job['job_id'] = job_id or 'tfx_{}'.format(
datetime.datetime.now().strftime('%Y%m%d%H%M%S'))
# Invoke CMLE job
runner._launch_cloud_training( # pylint: disable=protected-access
project=project,
training_job=training_job)
| [
"[email protected]"
] | |
1674abf712c6b066af59fe0fea6ab7e259a5eb39 | 2d74104aaa132896a65ea0032951eee5d4c97840 | /chemman/msds_collector/migrations/0003_uploadedmsds_token.py | 4bd9419402891cf29b641c4c80e6ef4bb2b7ea19 | [] | no_license | Whitie/ChemManager | 6e228e8713f9dfeca21adbd3e9a65c8871a822bc | d40792361527219514b1b4cc03718ea7c2a92777 | refs/heads/master | 2023-06-09T09:29:41.626087 | 2022-12-14T13:29:44 | 2022-12-14T13:29:44 | 189,994,861 | 0 | 0 | null | 2023-04-21T21:40:13 | 2019-06-03T11:47:23 | Python | UTF-8 | Python | false | false | 553 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-07-26 06:36
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('msds_collector', '0002_parseddata'),
]
operations = [
migrations.AddField(
model_name='uploadedmsds',
name='token',
field=models.CharField(default='12345', editable=False, max_length=64, verbose_name='Security token'),
preserve_default=False,
),
]
| [
"[email protected]"
] | |
e7c04ec2cf024157d985c805cf4d4068468f9938 | 19ee165c252970294333e203728020cdcae550b3 | /agc018/agc018_a/20200210103816.py | ab764993bcef7aa989b4543b9e7a8b7a477f7530 | [] | no_license | autumncolors11/atc_submits | 4528c700e488d530f9cdde3a4198f36b30c3d35e | 6f9689b6d7de45fd4e44ad118e4e3531bb8dac4d | refs/heads/master | 2022-11-28T17:59:55.750896 | 2020-08-09T14:10:05 | 2020-08-09T14:10:05 | 258,122,955 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,437 | py | import sys
sys.setrecursionlimit(10**6)
from math import floor,ceil,sqrt,factorial,log
from heapq import heappop, heappush, heappushpop
from collections import Counter,defaultdict,deque
from itertools import accumulate,permutations,combinations,product,combinations_with_replacement
from bisect import bisect_left,bisect_right
from copy import deepcopy
from operator import itemgetter
from fractions import gcd
from functools import reduce
mod = 10 ** 9 + 7
#整数input
def ii(): return int(sys.stdin.readline().rstrip()) #int(input())
def mii(): return map(int,sys.stdin.readline().rstrip().split())
def limii(): return list(mii()) #list(map(int,input().split()))
def lin(n:int): return [ii() for _ in range(n)]
def llint(n: int): return [limii() for _ in range(n)]
#文字列input
def ss(): return sys.stdin.readline().rstrip() #input()
def mss(): return sys.stdin.readline().rstrip().split()
def limss(): return list(mss()) #list(input().split())
def lst(n:int): return [ss() for _ in range(n)]
def llstr(n: int): return [limss() for _ in range(n)]
#本当に貪欲法か? DP法では??
#本当に貪欲法か? DP法では??
#本当に貪欲法か? DP法では??
#agc018 getting difference
n,k=mii()
arr=limii()
#print(arr)
def gcd1(numbers):
return reduce(gcd,numbers)
p=gcd1(arr)
if k%p==0 and k<=max(arr):
print("POSSIBLE")
else:
print("IMPOSSIBLE")
| [
"[email protected]"
] | |
6aaa96fca2f0988e8a953d3ea9d73960f446d645 | af4d559792c4255d5f26bc078cd176b70c0e643f | /hpsklearn/components/linear_model/_omp.py | 5ea2e28c9530e946a54473c50100917878145894 | [
"BSD-3-Clause"
] | permissive | hyperopt/hyperopt-sklearn | ec7d5f97ba8fd5a2c283dfec2fa9e0170b61c6ce | 4b3f6fde3a1ded2e71e8373d52c1b51a0239ef91 | refs/heads/master | 2023-08-02T07:19:20.259964 | 2022-12-15T17:53:07 | 2022-12-15T17:53:07 | 8,293,893 | 1,480 | 292 | NOASSERTION | 2022-12-15T17:53:08 | 2013-02-19T16:09:53 | Python | UTF-8 | Python | false | false | 3,050 | py | from hyperopt.pyll import scope, Apply
from hyperopt import hp
from sklearn import linear_model
import numpy as np
import typing
@scope.define
def sklearn_OrthogonalMatchingPursuit(*args, **kwargs):
return linear_model.OrthogonalMatchingPursuit(*args, **kwargs)
@scope.define
def sklearn_OrthogonalMatchingPursuitCV(*args, **kwargs):
return linear_model.OrthogonalMatchingPursuitCV(*args, **kwargs)
def orthogonal_matching_pursuit(name: str,
n_nonzero_coefs: int = None,
tol: typing.Union[float, Apply] = None,
fit_intercept: bool = True,
precompute: typing.Union[str, bool] = "auto"
):
"""
Return a pyll graph with hyperparameters that will construct
a sklearn.linear_model.OrthogonalMatchingPursuit model.
Args:
name: name | str
n_nonzero_coefs: target number non-zero coefficients | int
tol: maximum norm of residual | float
fit_intercept: whether to calculate intercept for model | bool
precompute: whether to use precomputed Gram and Xy matrix | str, bool
"""
def _name(msg):
return f"{name}.orthogonal_matching_pursuit_{msg}"
hp_space = dict(
n_nonzero_coefs=n_nonzero_coefs,
tol=hp.loguniform(_name("tol"), np.log(1e-5), np.log(1e-2)) if tol is None else tol,
fit_intercept=fit_intercept,
precompute=precompute
)
return scope.sklearn_OrthogonalMatchingPursuit(**hp_space)
def orthogonal_matching_pursuit_cv(name: str,
copy: bool = True,
fit_intercept: bool = True,
max_iter: typing.Union[int, Apply] = None,
cv: typing.Union[int, callable, typing.Generator, Apply] = None,
n_jobs: int = 1,
verbose: typing.Union[bool, int] = False
):
"""
Return a pyll graph with hyperparameters that will construct
a sklearn.linear_model.OrthogonalMatchingPursuitCV model.
Args:
name: name | str
copy: whether design matrix must be copied | bool
fit_intercept: whether to calculate intercept for model | bool
max_iter: maximum number of iterations | int
cv: cross-validation splitting strategy| int, callable or generator
n_jobs: number of CPUs during cv | int
verbose: verbosity amount | bool, int
"""
def _name(msg):
return f"{name}.orthogonal_matching_pursuit_cv_{msg}"
hp_space = dict(
copy=copy,
fit_intercept=fit_intercept,
max_iter=max_iter,
cv=hp.pchoice(_name("cv"), [(0.0625, 3), (0.175, 4), (0.525, 5), (0.175, 6), (0.0625, 7)])
if cv is None else cv,
n_jobs=n_jobs,
verbose=verbose
)
return scope.sklearn_OrthogonalMatchingPursuitCV(**hp_space)
| [
"[email protected]"
] | |
58c55c37a28dfaf4b6268d6b6d9d66081dbce2b3 | 1d928c3f90d4a0a9a3919a804597aa0a4aab19a3 | /python/numpy/2017/8/legendre.py | 5128643cd8480ed49db001f54918bc6838354373 | [] | no_license | rosoareslv/SED99 | d8b2ff5811e7f0ffc59be066a5a0349a92cbb845 | a062c118f12b93172e31e8ca115ce3f871b64461 | refs/heads/main | 2023-02-22T21:59:02.703005 | 2021-01-28T19:40:51 | 2021-01-28T19:40:51 | 306,497,459 | 1 | 1 | null | 2020-11-24T20:56:18 | 2020-10-23T01:18:07 | null | UTF-8 | Python | false | false | 57,260 | py | """
Legendre Series (:mod: `numpy.polynomial.legendre`)
===================================================
.. currentmodule:: numpy.polynomial.polynomial
This module provides a number of objects (mostly functions) useful for
dealing with Legendre series, including a `Legendre` class that
encapsulates the usual arithmetic operations. (General information
on how this module represents and works with such polynomials is in the
docstring for its "parent" sub-package, `numpy.polynomial`).
Constants
---------
.. autosummary::
:toctree: generated/
legdomain Legendre series default domain, [-1,1].
legzero Legendre series that evaluates identically to 0.
legone Legendre series that evaluates identically to 1.
legx Legendre series for the identity map, ``f(x) = x``.
Arithmetic
----------
.. autosummary::
:toctree: generated/
legmulx multiply a Legendre series in P_i(x) by x.
legadd add two Legendre series.
legsub subtract one Legendre series from another.
legmul multiply two Legendre series.
legdiv divide one Legendre series by another.
legpow raise a Legendre series to an positive integer power
legval evaluate a Legendre series at given points.
legval2d evaluate a 2D Legendre series at given points.
legval3d evaluate a 3D Legendre series at given points.
leggrid2d evaluate a 2D Legendre series on a Cartesian product.
leggrid3d evaluate a 3D Legendre series on a Cartesian product.
Calculus
--------
.. autosummary::
:toctree: generated/
legder differentiate a Legendre series.
legint integrate a Legendre series.
Misc Functions
--------------
.. autosummary::
:toctree: generated/
legfromroots create a Legendre series with specified roots.
legroots find the roots of a Legendre series.
legvander Vandermonde-like matrix for Legendre polynomials.
legvander2d Vandermonde-like matrix for 2D power series.
legvander3d Vandermonde-like matrix for 3D power series.
leggauss Gauss-Legendre quadrature, points and weights.
legweight Legendre weight function.
legcompanion symmetrized companion matrix in Legendre form.
legfit least-squares fit returning a Legendre series.
legtrim trim leading coefficients from a Legendre series.
legline Legendre series representing given straight line.
leg2poly convert a Legendre series to a polynomial.
poly2leg convert a polynomial to a Legendre series.
Classes
-------
Legendre A Legendre series class.
See also
--------
numpy.polynomial.polynomial
numpy.polynomial.chebyshev
numpy.polynomial.laguerre
numpy.polynomial.hermite
numpy.polynomial.hermite_e
"""
from __future__ import division, absolute_import, print_function
import warnings
import numpy as np
import numpy.linalg as la
from numpy.core.multiarray import normalize_axis_index
from . import polyutils as pu
from ._polybase import ABCPolyBase
__all__ = [
'legzero', 'legone', 'legx', 'legdomain', 'legline', 'legadd',
'legsub', 'legmulx', 'legmul', 'legdiv', 'legpow', 'legval', 'legder',
'legint', 'leg2poly', 'poly2leg', 'legfromroots', 'legvander',
'legfit', 'legtrim', 'legroots', 'Legendre', 'legval2d', 'legval3d',
'leggrid2d', 'leggrid3d', 'legvander2d', 'legvander3d', 'legcompanion',
'leggauss', 'legweight']
legtrim = pu.trimcoef
def poly2leg(pol):
"""
Convert a polynomial to a Legendre series.
Convert an array representing the coefficients of a polynomial (relative
to the "standard" basis) ordered from lowest degree to highest, to an
array of the coefficients of the equivalent Legendre series, ordered
from lowest to highest degree.
Parameters
----------
pol : array_like
1-D array containing the polynomial coefficients
Returns
-------
c : ndarray
1-D array containing the coefficients of the equivalent Legendre
series.
See Also
--------
leg2poly
Notes
-----
The easy way to do conversions between polynomial basis sets
is to use the convert method of a class instance.
Examples
--------
>>> from numpy import polynomial as P
>>> p = P.Polynomial(np.arange(4))
>>> p
Polynomial([ 0., 1., 2., 3.], domain=[-1, 1], window=[-1, 1])
>>> c = P.Legendre(P.legendre.poly2leg(p.coef))
>>> c
Legendre([ 1. , 3.25, 1. , 0.75], domain=[-1, 1], window=[-1, 1])
"""
[pol] = pu.as_series([pol])
deg = len(pol) - 1
res = 0
for i in range(deg, -1, -1):
res = legadd(legmulx(res), pol[i])
return res
def leg2poly(c):
"""
Convert a Legendre series to a polynomial.
Convert an array representing the coefficients of a Legendre series,
ordered from lowest degree to highest, to an array of the coefficients
of the equivalent polynomial (relative to the "standard" basis) ordered
from lowest to highest degree.
Parameters
----------
c : array_like
1-D array containing the Legendre series coefficients, ordered
from lowest order term to highest.
Returns
-------
pol : ndarray
1-D array containing the coefficients of the equivalent polynomial
(relative to the "standard" basis) ordered from lowest order term
to highest.
See Also
--------
poly2leg
Notes
-----
The easy way to do conversions between polynomial basis sets
is to use the convert method of a class instance.
Examples
--------
>>> c = P.Legendre(range(4))
>>> c
Legendre([ 0., 1., 2., 3.], [-1., 1.])
>>> p = c.convert(kind=P.Polynomial)
>>> p
Polynomial([-1. , -3.5, 3. , 7.5], [-1., 1.])
>>> P.leg2poly(range(4))
array([-1. , -3.5, 3. , 7.5])
"""
from .polynomial import polyadd, polysub, polymulx
[c] = pu.as_series([c])
n = len(c)
if n < 3:
return c
else:
c0 = c[-2]
c1 = c[-1]
# i is the current degree of c1
for i in range(n - 1, 1, -1):
tmp = c0
c0 = polysub(c[i - 2], (c1*(i - 1))/i)
c1 = polyadd(tmp, (polymulx(c1)*(2*i - 1))/i)
return polyadd(c0, polymulx(c1))
#
# These are constant arrays are of integer type so as to be compatible
# with the widest range of other types, such as Decimal.
#
# Legendre
legdomain = np.array([-1, 1])
# Legendre coefficients representing zero.
legzero = np.array([0])
# Legendre coefficients representing one.
legone = np.array([1])
# Legendre coefficients representing the identity x.
legx = np.array([0, 1])
def legline(off, scl):
"""
Legendre series whose graph is a straight line.
Parameters
----------
off, scl : scalars
The specified line is given by ``off + scl*x``.
Returns
-------
y : ndarray
This module's representation of the Legendre series for
``off + scl*x``.
See Also
--------
polyline, chebline
Examples
--------
>>> import numpy.polynomial.legendre as L
>>> L.legline(3,2)
array([3, 2])
>>> L.legval(-3, L.legline(3,2)) # should be -3
-3.0
"""
if scl != 0:
return np.array([off, scl])
else:
return np.array([off])
def legfromroots(roots):
"""
Generate a Legendre series with given roots.
The function returns the coefficients of the polynomial
.. math:: p(x) = (x - r_0) * (x - r_1) * ... * (x - r_n),
in Legendre form, where the `r_n` are the roots specified in `roots`.
If a zero has multiplicity n, then it must appear in `roots` n times.
For instance, if 2 is a root of multiplicity three and 3 is a root of
multiplicity 2, then `roots` looks something like [2, 2, 2, 3, 3]. The
roots can appear in any order.
If the returned coefficients are `c`, then
.. math:: p(x) = c_0 + c_1 * L_1(x) + ... + c_n * L_n(x)
The coefficient of the last term is not generally 1 for monic
polynomials in Legendre form.
Parameters
----------
roots : array_like
Sequence containing the roots.
Returns
-------
out : ndarray
1-D array of coefficients. If all roots are real then `out` is a
real array, if some of the roots are complex, then `out` is complex
even if all the coefficients in the result are real (see Examples
below).
See Also
--------
polyfromroots, chebfromroots, lagfromroots, hermfromroots,
hermefromroots.
Examples
--------
>>> import numpy.polynomial.legendre as L
>>> L.legfromroots((-1,0,1)) # x^3 - x relative to the standard basis
array([ 0. , -0.4, 0. , 0.4])
>>> j = complex(0,1)
>>> L.legfromroots((-j,j)) # x^2 + 1 relative to the standard basis
array([ 1.33333333+0.j, 0.00000000+0.j, 0.66666667+0.j])
"""
if len(roots) == 0:
return np.ones(1)
else:
[roots] = pu.as_series([roots], trim=False)
roots.sort()
p = [legline(-r, 1) for r in roots]
n = len(p)
while n > 1:
m, r = divmod(n, 2)
tmp = [legmul(p[i], p[i+m]) for i in range(m)]
if r:
tmp[0] = legmul(tmp[0], p[-1])
p = tmp
n = m
return p[0]
def legadd(c1, c2):
"""
Add one Legendre series to another.
Returns the sum of two Legendre series `c1` + `c2`. The arguments
are sequences of coefficients ordered from lowest order term to
highest, i.e., [1,2,3] represents the series ``P_0 + 2*P_1 + 3*P_2``.
Parameters
----------
c1, c2 : array_like
1-D arrays of Legendre series coefficients ordered from low to
high.
Returns
-------
out : ndarray
Array representing the Legendre series of their sum.
See Also
--------
legsub, legmul, legdiv, legpow
Notes
-----
Unlike multiplication, division, etc., the sum of two Legendre series
is a Legendre series (without having to "reproject" the result onto
the basis set) so addition, just like that of "standard" polynomials,
is simply "component-wise."
Examples
--------
>>> from numpy.polynomial import legendre as L
>>> c1 = (1,2,3)
>>> c2 = (3,2,1)
>>> L.legadd(c1,c2)
array([ 4., 4., 4.])
"""
# c1, c2 are trimmed copies
[c1, c2] = pu.as_series([c1, c2])
if len(c1) > len(c2):
c1[:c2.size] += c2
ret = c1
else:
c2[:c1.size] += c1
ret = c2
return pu.trimseq(ret)
def legsub(c1, c2):
"""
Subtract one Legendre series from another.
Returns the difference of two Legendre series `c1` - `c2`. The
sequences of coefficients are from lowest order term to highest, i.e.,
[1,2,3] represents the series ``P_0 + 2*P_1 + 3*P_2``.
Parameters
----------
c1, c2 : array_like
1-D arrays of Legendre series coefficients ordered from low to
high.
Returns
-------
out : ndarray
Of Legendre series coefficients representing their difference.
See Also
--------
legadd, legmul, legdiv, legpow
Notes
-----
Unlike multiplication, division, etc., the difference of two Legendre
series is a Legendre series (without having to "reproject" the result
onto the basis set) so subtraction, just like that of "standard"
polynomials, is simply "component-wise."
Examples
--------
>>> from numpy.polynomial import legendre as L
>>> c1 = (1,2,3)
>>> c2 = (3,2,1)
>>> L.legsub(c1,c2)
array([-2., 0., 2.])
>>> L.legsub(c2,c1) # -C.legsub(c1,c2)
array([ 2., 0., -2.])
"""
# c1, c2 are trimmed copies
[c1, c2] = pu.as_series([c1, c2])
if len(c1) > len(c2):
c1[:c2.size] -= c2
ret = c1
else:
c2 = -c2
c2[:c1.size] += c1
ret = c2
return pu.trimseq(ret)
def legmulx(c):
"""Multiply a Legendre series by x.
Multiply the Legendre series `c` by x, where x is the independent
variable.
Parameters
----------
c : array_like
1-D array of Legendre series coefficients ordered from low to
high.
Returns
-------
out : ndarray
Array representing the result of the multiplication.
Notes
-----
The multiplication uses the recursion relationship for Legendre
polynomials in the form
.. math::
xP_i(x) = ((i + 1)*P_{i + 1}(x) + i*P_{i - 1}(x))/(2i + 1)
"""
# c is a trimmed copy
[c] = pu.as_series([c])
# The zero series needs special treatment
if len(c) == 1 and c[0] == 0:
return c
prd = np.empty(len(c) + 1, dtype=c.dtype)
prd[0] = c[0]*0
prd[1] = c[0]
for i in range(1, len(c)):
j = i + 1
k = i - 1
s = i + j
prd[j] = (c[i]*j)/s
prd[k] += (c[i]*i)/s
return prd
def legmul(c1, c2):
"""
Multiply one Legendre series by another.
Returns the product of two Legendre series `c1` * `c2`. The arguments
are sequences of coefficients, from lowest order "term" to highest,
e.g., [1,2,3] represents the series ``P_0 + 2*P_1 + 3*P_2``.
Parameters
----------
c1, c2 : array_like
1-D arrays of Legendre series coefficients ordered from low to
high.
Returns
-------
out : ndarray
Of Legendre series coefficients representing their product.
See Also
--------
legadd, legsub, legdiv, legpow
Notes
-----
In general, the (polynomial) product of two C-series results in terms
that are not in the Legendre polynomial basis set. Thus, to express
the product as a Legendre series, it is necessary to "reproject" the
product onto said basis set, which may produce "unintuitive" (but
correct) results; see Examples section below.
Examples
--------
>>> from numpy.polynomial import legendre as L
>>> c1 = (1,2,3)
>>> c2 = (3,2)
>>> P.legmul(c1,c2) # multiplication requires "reprojection"
array([ 4.33333333, 10.4 , 11.66666667, 3.6 ])
"""
# s1, s2 are trimmed copies
[c1, c2] = pu.as_series([c1, c2])
if len(c1) > len(c2):
c = c2
xs = c1
else:
c = c1
xs = c2
if len(c) == 1:
c0 = c[0]*xs
c1 = 0
elif len(c) == 2:
c0 = c[0]*xs
c1 = c[1]*xs
else:
nd = len(c)
c0 = c[-2]*xs
c1 = c[-1]*xs
for i in range(3, len(c) + 1):
tmp = c0
nd = nd - 1
c0 = legsub(c[-i]*xs, (c1*(nd - 1))/nd)
c1 = legadd(tmp, (legmulx(c1)*(2*nd - 1))/nd)
return legadd(c0, legmulx(c1))
def legdiv(c1, c2):
"""
Divide one Legendre series by another.
Returns the quotient-with-remainder of two Legendre series
`c1` / `c2`. The arguments are sequences of coefficients from lowest
order "term" to highest, e.g., [1,2,3] represents the series
``P_0 + 2*P_1 + 3*P_2``.
Parameters
----------
c1, c2 : array_like
1-D arrays of Legendre series coefficients ordered from low to
high.
Returns
-------
quo, rem : ndarrays
Of Legendre series coefficients representing the quotient and
remainder.
See Also
--------
legadd, legsub, legmul, legpow
Notes
-----
In general, the (polynomial) division of one Legendre series by another
results in quotient and remainder terms that are not in the Legendre
polynomial basis set. Thus, to express these results as a Legendre
series, it is necessary to "reproject" the results onto the Legendre
basis set, which may produce "unintuitive" (but correct) results; see
Examples section below.
Examples
--------
>>> from numpy.polynomial import legendre as L
>>> c1 = (1,2,3)
>>> c2 = (3,2,1)
>>> L.legdiv(c1,c2) # quotient "intuitive," remainder not
(array([ 3.]), array([-8., -4.]))
>>> c2 = (0,1,2,3)
>>> L.legdiv(c2,c1) # neither "intuitive"
(array([-0.07407407, 1.66666667]), array([-1.03703704, -2.51851852]))
"""
# c1, c2 are trimmed copies
[c1, c2] = pu.as_series([c1, c2])
if c2[-1] == 0:
raise ZeroDivisionError()
lc1 = len(c1)
lc2 = len(c2)
if lc1 < lc2:
return c1[:1]*0, c1
elif lc2 == 1:
return c1/c2[-1], c1[:1]*0
else:
quo = np.empty(lc1 - lc2 + 1, dtype=c1.dtype)
rem = c1
for i in range(lc1 - lc2, - 1, -1):
p = legmul([0]*i + [1], c2)
q = rem[-1]/p[-1]
rem = rem[:-1] - q*p[:-1]
quo[i] = q
return quo, pu.trimseq(rem)
def legpow(c, pow, maxpower=16):
"""Raise a Legendre series to a power.
Returns the Legendre series `c` raised to the power `pow`. The
argument `c` is a sequence of coefficients ordered from low to high.
i.e., [1,2,3] is the series ``P_0 + 2*P_1 + 3*P_2.``
Parameters
----------
c : array_like
1-D array of Legendre series coefficients ordered from low to
high.
pow : integer
Power to which the series will be raised
maxpower : integer, optional
Maximum power allowed. This is mainly to limit growth of the series
to unmanageable size. Default is 16
Returns
-------
coef : ndarray
Legendre series of power.
See Also
--------
legadd, legsub, legmul, legdiv
Examples
--------
"""
# c is a trimmed copy
[c] = pu.as_series([c])
power = int(pow)
if power != pow or power < 0:
raise ValueError("Power must be a non-negative integer.")
elif maxpower is not None and power > maxpower:
raise ValueError("Power is too large")
elif power == 0:
return np.array([1], dtype=c.dtype)
elif power == 1:
return c
else:
# This can be made more efficient by using powers of two
# in the usual way.
prd = c
for i in range(2, power + 1):
prd = legmul(prd, c)
return prd
def legder(c, m=1, scl=1, axis=0):
"""
Differentiate a Legendre series.
Returns the Legendre series coefficients `c` differentiated `m` times
along `axis`. At each iteration the result is multiplied by `scl` (the
scaling factor is for use in a linear change of variable). The argument
`c` is an array of coefficients from low to high degree along each
axis, e.g., [1,2,3] represents the series ``1*L_0 + 2*L_1 + 3*L_2``
while [[1,2],[1,2]] represents ``1*L_0(x)*L_0(y) + 1*L_1(x)*L_0(y) +
2*L_0(x)*L_1(y) + 2*L_1(x)*L_1(y)`` if axis=0 is ``x`` and axis=1 is
``y``.
Parameters
----------
c : array_like
Array of Legendre series coefficients. If c is multidimensional the
different axis correspond to different variables with the degree in
each axis given by the corresponding index.
m : int, optional
Number of derivatives taken, must be non-negative. (Default: 1)
scl : scalar, optional
Each differentiation is multiplied by `scl`. The end result is
multiplication by ``scl**m``. This is for use in a linear change of
variable. (Default: 1)
axis : int, optional
Axis over which the derivative is taken. (Default: 0).
.. versionadded:: 1.7.0
Returns
-------
der : ndarray
Legendre series of the derivative.
See Also
--------
legint
Notes
-----
In general, the result of differentiating a Legendre series does not
resemble the same operation on a power series. Thus the result of this
function may be "unintuitive," albeit correct; see Examples section
below.
Examples
--------
>>> from numpy.polynomial import legendre as L
>>> c = (1,2,3,4)
>>> L.legder(c)
array([ 6., 9., 20.])
>>> L.legder(c, 3)
array([ 60.])
>>> L.legder(c, scl=-1)
array([ -6., -9., -20.])
>>> L.legder(c, 2,-1)
array([ 9., 60.])
"""
c = np.array(c, ndmin=1, copy=1)
if c.dtype.char in '?bBhHiIlLqQpP':
c = c.astype(np.double)
cnt, iaxis = [int(t) for t in [m, axis]]
if cnt != m:
raise ValueError("The order of derivation must be integer")
if cnt < 0:
raise ValueError("The order of derivation must be non-negative")
if iaxis != axis:
raise ValueError("The axis must be integer")
iaxis = normalize_axis_index(iaxis, c.ndim)
if cnt == 0:
return c
c = np.moveaxis(c, iaxis, 0)
n = len(c)
if cnt >= n:
c = c[:1]*0
else:
for i in range(cnt):
n = n - 1
c *= scl
der = np.empty((n,) + c.shape[1:], dtype=c.dtype)
for j in range(n, 2, -1):
der[j - 1] = (2*j - 1)*c[j]
c[j - 2] += c[j]
if n > 1:
der[1] = 3*c[2]
der[0] = c[1]
c = der
c = np.moveaxis(c, 0, iaxis)
return c
def legint(c, m=1, k=[], lbnd=0, scl=1, axis=0):
"""
Integrate a Legendre series.
Returns the Legendre series coefficients `c` integrated `m` times from
`lbnd` along `axis`. At each iteration the resulting series is
**multiplied** by `scl` and an integration constant, `k`, is added.
The scaling factor is for use in a linear change of variable. ("Buyer
beware": note that, depending on what one is doing, one may want `scl`
to be the reciprocal of what one might expect; for more information,
see the Notes section below.) The argument `c` is an array of
coefficients from low to high degree along each axis, e.g., [1,2,3]
represents the series ``L_0 + 2*L_1 + 3*L_2`` while [[1,2],[1,2]]
represents ``1*L_0(x)*L_0(y) + 1*L_1(x)*L_0(y) + 2*L_0(x)*L_1(y) +
2*L_1(x)*L_1(y)`` if axis=0 is ``x`` and axis=1 is ``y``.
Parameters
----------
c : array_like
Array of Legendre series coefficients. If c is multidimensional the
different axis correspond to different variables with the degree in
each axis given by the corresponding index.
m : int, optional
Order of integration, must be positive. (Default: 1)
k : {[], list, scalar}, optional
Integration constant(s). The value of the first integral at
``lbnd`` is the first value in the list, the value of the second
integral at ``lbnd`` is the second value, etc. If ``k == []`` (the
default), all constants are set to zero. If ``m == 1``, a single
scalar can be given instead of a list.
lbnd : scalar, optional
The lower bound of the integral. (Default: 0)
scl : scalar, optional
Following each integration the result is *multiplied* by `scl`
before the integration constant is added. (Default: 1)
axis : int, optional
Axis over which the integral is taken. (Default: 0).
.. versionadded:: 1.7.0
Returns
-------
S : ndarray
Legendre series coefficient array of the integral.
Raises
------
ValueError
If ``m < 0``, ``len(k) > m``, ``np.isscalar(lbnd) == False``, or
``np.isscalar(scl) == False``.
See Also
--------
legder
Notes
-----
Note that the result of each integration is *multiplied* by `scl`.
Why is this important to note? Say one is making a linear change of
variable :math:`u = ax + b` in an integral relative to `x`. Then
.. math::`dx = du/a`, so one will need to set `scl` equal to
:math:`1/a` - perhaps not what one would have first thought.
Also note that, in general, the result of integrating a C-series needs
to be "reprojected" onto the C-series basis set. Thus, typically,
the result of this function is "unintuitive," albeit correct; see
Examples section below.
Examples
--------
>>> from numpy.polynomial import legendre as L
>>> c = (1,2,3)
>>> L.legint(c)
array([ 0.33333333, 0.4 , 0.66666667, 0.6 ])
>>> L.legint(c, 3)
array([ 1.66666667e-02, -1.78571429e-02, 4.76190476e-02,
-1.73472348e-18, 1.90476190e-02, 9.52380952e-03])
>>> L.legint(c, k=3)
array([ 3.33333333, 0.4 , 0.66666667, 0.6 ])
>>> L.legint(c, lbnd=-2)
array([ 7.33333333, 0.4 , 0.66666667, 0.6 ])
>>> L.legint(c, scl=2)
array([ 0.66666667, 0.8 , 1.33333333, 1.2 ])
"""
c = np.array(c, ndmin=1, copy=1)
if c.dtype.char in '?bBhHiIlLqQpP':
c = c.astype(np.double)
if not np.iterable(k):
k = [k]
cnt, iaxis = [int(t) for t in [m, axis]]
if cnt != m:
raise ValueError("The order of integration must be integer")
if cnt < 0:
raise ValueError("The order of integration must be non-negative")
if len(k) > cnt:
raise ValueError("Too many integration constants")
if iaxis != axis:
raise ValueError("The axis must be integer")
iaxis = normalize_axis_index(iaxis, c.ndim)
if cnt == 0:
return c
c = np.moveaxis(c, iaxis, 0)
k = list(k) + [0]*(cnt - len(k))
for i in range(cnt):
n = len(c)
c *= scl
if n == 1 and np.all(c[0] == 0):
c[0] += k[i]
else:
tmp = np.empty((n + 1,) + c.shape[1:], dtype=c.dtype)
tmp[0] = c[0]*0
tmp[1] = c[0]
if n > 1:
tmp[2] = c[1]/3
for j in range(2, n):
t = c[j]/(2*j + 1)
tmp[j + 1] = t
tmp[j - 1] -= t
tmp[0] += k[i] - legval(lbnd, tmp)
c = tmp
c = np.moveaxis(c, 0, iaxis)
return c
def legval(x, c, tensor=True):
"""
Evaluate a Legendre series at points x.
If `c` is of length `n + 1`, this function returns the value:
.. math:: p(x) = c_0 * L_0(x) + c_1 * L_1(x) + ... + c_n * L_n(x)
The parameter `x` is converted to an array only if it is a tuple or a
list, otherwise it is treated as a scalar. In either case, either `x`
or its elements must support multiplication and addition both with
themselves and with the elements of `c`.
If `c` is a 1-D array, then `p(x)` will have the same shape as `x`. If
`c` is multidimensional, then the shape of the result depends on the
value of `tensor`. If `tensor` is true the shape will be c.shape[1:] +
x.shape. If `tensor` is false the shape will be c.shape[1:]. Note that
scalars have shape (,).
Trailing zeros in the coefficients will be used in the evaluation, so
they should be avoided if efficiency is a concern.
Parameters
----------
x : array_like, compatible object
If `x` is a list or tuple, it is converted to an ndarray, otherwise
it is left unchanged and treated as a scalar. In either case, `x`
or its elements must support addition and multiplication with
with themselves and with the elements of `c`.
c : array_like
Array of coefficients ordered so that the coefficients for terms of
degree n are contained in c[n]. If `c` is multidimensional the
remaining indices enumerate multiple polynomials. In the two
dimensional case the coefficients may be thought of as stored in
the columns of `c`.
tensor : boolean, optional
If True, the shape of the coefficient array is extended with ones
on the right, one for each dimension of `x`. Scalars have dimension 0
for this action. The result is that every column of coefficients in
`c` is evaluated for every element of `x`. If False, `x` is broadcast
over the columns of `c` for the evaluation. This keyword is useful
when `c` is multidimensional. The default value is True.
.. versionadded:: 1.7.0
Returns
-------
values : ndarray, algebra_like
The shape of the return value is described above.
See Also
--------
legval2d, leggrid2d, legval3d, leggrid3d
Notes
-----
The evaluation uses Clenshaw recursion, aka synthetic division.
Examples
--------
"""
c = np.array(c, ndmin=1, copy=0)
if c.dtype.char in '?bBhHiIlLqQpP':
c = c.astype(np.double)
if isinstance(x, (tuple, list)):
x = np.asarray(x)
if isinstance(x, np.ndarray) and tensor:
c = c.reshape(c.shape + (1,)*x.ndim)
if len(c) == 1:
c0 = c[0]
c1 = 0
elif len(c) == 2:
c0 = c[0]
c1 = c[1]
else:
nd = len(c)
c0 = c[-2]
c1 = c[-1]
for i in range(3, len(c) + 1):
tmp = c0
nd = nd - 1
c0 = c[-i] - (c1*(nd - 1))/nd
c1 = tmp + (c1*x*(2*nd - 1))/nd
return c0 + c1*x
def legval2d(x, y, c):
"""
Evaluate a 2-D Legendre series at points (x, y).
This function returns the values:
.. math:: p(x,y) = \\sum_{i,j} c_{i,j} * L_i(x) * L_j(y)
The parameters `x` and `y` are converted to arrays only if they are
tuples or a lists, otherwise they are treated as a scalars and they
must have the same shape after conversion. In either case, either `x`
and `y` or their elements must support multiplication and addition both
with themselves and with the elements of `c`.
If `c` is a 1-D array a one is implicitly appended to its shape to make
it 2-D. The shape of the result will be c.shape[2:] + x.shape.
Parameters
----------
x, y : array_like, compatible objects
The two dimensional series is evaluated at the points `(x, y)`,
where `x` and `y` must have the same shape. If `x` or `y` is a list
or tuple, it is first converted to an ndarray, otherwise it is left
unchanged and if it isn't an ndarray it is treated as a scalar.
c : array_like
Array of coefficients ordered so that the coefficient of the term
of multi-degree i,j is contained in ``c[i,j]``. If `c` has
dimension greater than two the remaining indices enumerate multiple
sets of coefficients.
Returns
-------
values : ndarray, compatible object
The values of the two dimensional Legendre series at points formed
from pairs of corresponding values from `x` and `y`.
See Also
--------
legval, leggrid2d, legval3d, leggrid3d
Notes
-----
.. versionadded::1.7.0
"""
try:
x, y = np.array((x, y), copy=0)
except Exception:
raise ValueError('x, y are incompatible')
c = legval(x, c)
c = legval(y, c, tensor=False)
return c
def leggrid2d(x, y, c):
"""
Evaluate a 2-D Legendre series on the Cartesian product of x and y.
This function returns the values:
.. math:: p(a,b) = \\sum_{i,j} c_{i,j} * L_i(a) * L_j(b)
where the points `(a, b)` consist of all pairs formed by taking
`a` from `x` and `b` from `y`. The resulting points form a grid with
`x` in the first dimension and `y` in the second.
The parameters `x` and `y` are converted to arrays only if they are
tuples or a lists, otherwise they are treated as a scalars. In either
case, either `x` and `y` or their elements must support multiplication
and addition both with themselves and with the elements of `c`.
If `c` has fewer than two dimensions, ones are implicitly appended to
its shape to make it 2-D. The shape of the result will be c.shape[2:] +
x.shape + y.shape.
Parameters
----------
x, y : array_like, compatible objects
The two dimensional series is evaluated at the points in the
Cartesian product of `x` and `y`. If `x` or `y` is a list or
tuple, it is first converted to an ndarray, otherwise it is left
unchanged and, if it isn't an ndarray, it is treated as a scalar.
c : array_like
Array of coefficients ordered so that the coefficient of the term of
multi-degree i,j is contained in `c[i,j]`. If `c` has dimension
greater than two the remaining indices enumerate multiple sets of
coefficients.
Returns
-------
values : ndarray, compatible object
The values of the two dimensional Chebyshev series at points in the
Cartesian product of `x` and `y`.
See Also
--------
legval, legval2d, legval3d, leggrid3d
Notes
-----
.. versionadded::1.7.0
"""
c = legval(x, c)
c = legval(y, c)
return c
def legval3d(x, y, z, c):
"""
Evaluate a 3-D Legendre series at points (x, y, z).
This function returns the values:
.. math:: p(x,y,z) = \\sum_{i,j,k} c_{i,j,k} * L_i(x) * L_j(y) * L_k(z)
The parameters `x`, `y`, and `z` are converted to arrays only if
they are tuples or a lists, otherwise they are treated as a scalars and
they must have the same shape after conversion. In either case, either
`x`, `y`, and `z` or their elements must support multiplication and
addition both with themselves and with the elements of `c`.
If `c` has fewer than 3 dimensions, ones are implicitly appended to its
shape to make it 3-D. The shape of the result will be c.shape[3:] +
x.shape.
Parameters
----------
x, y, z : array_like, compatible object
The three dimensional series is evaluated at the points
`(x, y, z)`, where `x`, `y`, and `z` must have the same shape. If
any of `x`, `y`, or `z` is a list or tuple, it is first converted
to an ndarray, otherwise it is left unchanged and if it isn't an
ndarray it is treated as a scalar.
c : array_like
Array of coefficients ordered so that the coefficient of the term of
multi-degree i,j,k is contained in ``c[i,j,k]``. If `c` has dimension
greater than 3 the remaining indices enumerate multiple sets of
coefficients.
Returns
-------
values : ndarray, compatible object
The values of the multidimensional polynomial on points formed with
triples of corresponding values from `x`, `y`, and `z`.
See Also
--------
legval, legval2d, leggrid2d, leggrid3d
Notes
-----
.. versionadded::1.7.0
"""
try:
x, y, z = np.array((x, y, z), copy=0)
except Exception:
raise ValueError('x, y, z are incompatible')
c = legval(x, c)
c = legval(y, c, tensor=False)
c = legval(z, c, tensor=False)
return c
def leggrid3d(x, y, z, c):
"""
Evaluate a 3-D Legendre series on the Cartesian product of x, y, and z.
This function returns the values:
.. math:: p(a,b,c) = \\sum_{i,j,k} c_{i,j,k} * L_i(a) * L_j(b) * L_k(c)
where the points `(a, b, c)` consist of all triples formed by taking
`a` from `x`, `b` from `y`, and `c` from `z`. The resulting points form
a grid with `x` in the first dimension, `y` in the second, and `z` in
the third.
The parameters `x`, `y`, and `z` are converted to arrays only if they
are tuples or a lists, otherwise they are treated as a scalars. In
either case, either `x`, `y`, and `z` or their elements must support
multiplication and addition both with themselves and with the elements
of `c`.
If `c` has fewer than three dimensions, ones are implicitly appended to
its shape to make it 3-D. The shape of the result will be c.shape[3:] +
x.shape + y.shape + z.shape.
Parameters
----------
x, y, z : array_like, compatible objects
The three dimensional series is evaluated at the points in the
Cartesian product of `x`, `y`, and `z`. If `x`,`y`, or `z` is a
list or tuple, it is first converted to an ndarray, otherwise it is
left unchanged and, if it isn't an ndarray, it is treated as a
scalar.
c : array_like
Array of coefficients ordered so that the coefficients for terms of
degree i,j are contained in ``c[i,j]``. If `c` has dimension
greater than two the remaining indices enumerate multiple sets of
coefficients.
Returns
-------
values : ndarray, compatible object
The values of the two dimensional polynomial at points in the Cartesian
product of `x` and `y`.
See Also
--------
legval, legval2d, leggrid2d, legval3d
Notes
-----
.. versionadded::1.7.0
"""
c = legval(x, c)
c = legval(y, c)
c = legval(z, c)
return c
def legvander(x, deg):
"""Pseudo-Vandermonde matrix of given degree.
Returns the pseudo-Vandermonde matrix of degree `deg` and sample points
`x`. The pseudo-Vandermonde matrix is defined by
.. math:: V[..., i] = L_i(x)
where `0 <= i <= deg`. The leading indices of `V` index the elements of
`x` and the last index is the degree of the Legendre polynomial.
If `c` is a 1-D array of coefficients of length `n + 1` and `V` is the
array ``V = legvander(x, n)``, then ``np.dot(V, c)`` and
``legval(x, c)`` are the same up to roundoff. This equivalence is
useful both for least squares fitting and for the evaluation of a large
number of Legendre series of the same degree and sample points.
Parameters
----------
x : array_like
Array of points. The dtype is converted to float64 or complex128
depending on whether any of the elements are complex. If `x` is
scalar it is converted to a 1-D array.
deg : int
Degree of the resulting matrix.
Returns
-------
vander : ndarray
The pseudo-Vandermonde matrix. The shape of the returned matrix is
``x.shape + (deg + 1,)``, where The last index is the degree of the
corresponding Legendre polynomial. The dtype will be the same as
the converted `x`.
"""
ideg = int(deg)
if ideg != deg:
raise ValueError("deg must be integer")
if ideg < 0:
raise ValueError("deg must be non-negative")
x = np.array(x, copy=0, ndmin=1) + 0.0
dims = (ideg + 1,) + x.shape
dtyp = x.dtype
v = np.empty(dims, dtype=dtyp)
# Use forward recursion to generate the entries. This is not as accurate
# as reverse recursion in this application but it is more efficient.
v[0] = x*0 + 1
if ideg > 0:
v[1] = x
for i in range(2, ideg + 1):
v[i] = (v[i-1]*x*(2*i - 1) - v[i-2]*(i - 1))/i
return np.moveaxis(v, 0, -1)
def legvander2d(x, y, deg):
"""Pseudo-Vandermonde matrix of given degrees.
Returns the pseudo-Vandermonde matrix of degrees `deg` and sample
points `(x, y)`. The pseudo-Vandermonde matrix is defined by
.. math:: V[..., (deg[1] + 1)*i + j] = L_i(x) * L_j(y),
where `0 <= i <= deg[0]` and `0 <= j <= deg[1]`. The leading indices of
`V` index the points `(x, y)` and the last index encodes the degrees of
the Legendre polynomials.
If ``V = legvander2d(x, y, [xdeg, ydeg])``, then the columns of `V`
correspond to the elements of a 2-D coefficient array `c` of shape
(xdeg + 1, ydeg + 1) in the order
.. math:: c_{00}, c_{01}, c_{02} ... , c_{10}, c_{11}, c_{12} ...
and ``np.dot(V, c.flat)`` and ``legval2d(x, y, c)`` will be the same
up to roundoff. This equivalence is useful both for least squares
fitting and for the evaluation of a large number of 2-D Legendre
series of the same degrees and sample points.
Parameters
----------
x, y : array_like
Arrays of point coordinates, all of the same shape. The dtypes
will be converted to either float64 or complex128 depending on
whether any of the elements are complex. Scalars are converted to
1-D arrays.
deg : list of ints
List of maximum degrees of the form [x_deg, y_deg].
Returns
-------
vander2d : ndarray
The shape of the returned matrix is ``x.shape + (order,)``, where
:math:`order = (deg[0]+1)*(deg([1]+1)`. The dtype will be the same
as the converted `x` and `y`.
See Also
--------
legvander, legvander3d. legval2d, legval3d
Notes
-----
.. versionadded::1.7.0
"""
ideg = [int(d) for d in deg]
is_valid = [id == d and id >= 0 for id, d in zip(ideg, deg)]
if is_valid != [1, 1]:
raise ValueError("degrees must be non-negative integers")
degx, degy = ideg
x, y = np.array((x, y), copy=0) + 0.0
vx = legvander(x, degx)
vy = legvander(y, degy)
v = vx[..., None]*vy[..., None,:]
return v.reshape(v.shape[:-2] + (-1,))
def legvander3d(x, y, z, deg):
"""Pseudo-Vandermonde matrix of given degrees.
Returns the pseudo-Vandermonde matrix of degrees `deg` and sample
points `(x, y, z)`. If `l, m, n` are the given degrees in `x, y, z`,
then The pseudo-Vandermonde matrix is defined by
.. math:: V[..., (m+1)(n+1)i + (n+1)j + k] = L_i(x)*L_j(y)*L_k(z),
where `0 <= i <= l`, `0 <= j <= m`, and `0 <= j <= n`. The leading
indices of `V` index the points `(x, y, z)` and the last index encodes
the degrees of the Legendre polynomials.
If ``V = legvander3d(x, y, z, [xdeg, ydeg, zdeg])``, then the columns
of `V` correspond to the elements of a 3-D coefficient array `c` of
shape (xdeg + 1, ydeg + 1, zdeg + 1) in the order
.. math:: c_{000}, c_{001}, c_{002},... , c_{010}, c_{011}, c_{012},...
and ``np.dot(V, c.flat)`` and ``legval3d(x, y, z, c)`` will be the
same up to roundoff. This equivalence is useful both for least squares
fitting and for the evaluation of a large number of 3-D Legendre
series of the same degrees and sample points.
Parameters
----------
x, y, z : array_like
Arrays of point coordinates, all of the same shape. The dtypes will
be converted to either float64 or complex128 depending on whether
any of the elements are complex. Scalars are converted to 1-D
arrays.
deg : list of ints
List of maximum degrees of the form [x_deg, y_deg, z_deg].
Returns
-------
vander3d : ndarray
The shape of the returned matrix is ``x.shape + (order,)``, where
:math:`order = (deg[0]+1)*(deg([1]+1)*(deg[2]+1)`. The dtype will
be the same as the converted `x`, `y`, and `z`.
See Also
--------
legvander, legvander3d. legval2d, legval3d
Notes
-----
.. versionadded::1.7.0
"""
ideg = [int(d) for d in deg]
is_valid = [id == d and id >= 0 for id, d in zip(ideg, deg)]
if is_valid != [1, 1, 1]:
raise ValueError("degrees must be non-negative integers")
degx, degy, degz = ideg
x, y, z = np.array((x, y, z), copy=0) + 0.0
vx = legvander(x, degx)
vy = legvander(y, degy)
vz = legvander(z, degz)
v = vx[..., None, None]*vy[..., None,:, None]*vz[..., None, None,:]
return v.reshape(v.shape[:-3] + (-1,))
def legfit(x, y, deg, rcond=None, full=False, w=None):
"""
Least squares fit of Legendre series to data.
Return the coefficients of a Legendre series of degree `deg` that is the
least squares fit to the data values `y` given at points `x`. If `y` is
1-D the returned coefficients will also be 1-D. If `y` is 2-D multiple
fits are done, one for each column of `y`, and the resulting
coefficients are stored in the corresponding columns of a 2-D return.
The fitted polynomial(s) are in the form
.. math:: p(x) = c_0 + c_1 * L_1(x) + ... + c_n * L_n(x),
where `n` is `deg`.
Parameters
----------
x : array_like, shape (M,)
x-coordinates of the M sample points ``(x[i], y[i])``.
y : array_like, shape (M,) or (M, K)
y-coordinates of the sample points. Several data sets of sample
points sharing the same x-coordinates can be fitted at once by
passing in a 2D-array that contains one dataset per column.
deg : int or 1-D array_like
Degree(s) of the fitting polynomials. If `deg` is a single integer
all terms up to and including the `deg`'th term are included in the
fit. For NumPy versions >= 1.11.0 a list of integers specifying the
degrees of the terms to include may be used instead.
rcond : float, optional
Relative condition number of the fit. Singular values smaller than
this relative to the largest singular value will be ignored. The
default value is len(x)*eps, where eps is the relative precision of
the float type, about 2e-16 in most cases.
full : bool, optional
Switch determining nature of return value. When it is False (the
default) just the coefficients are returned, when True diagnostic
information from the singular value decomposition is also returned.
w : array_like, shape (`M`,), optional
Weights. If not None, the contribution of each point
``(x[i],y[i])`` to the fit is weighted by `w[i]`. Ideally the
weights are chosen so that the errors of the products ``w[i]*y[i]``
all have the same variance. The default value is None.
.. versionadded:: 1.5.0
Returns
-------
coef : ndarray, shape (M,) or (M, K)
Legendre coefficients ordered from low to high. If `y` was
2-D, the coefficients for the data in column k of `y` are in
column `k`. If `deg` is specified as a list, coefficients for
terms not included in the fit are set equal to zero in the
returned `coef`.
[residuals, rank, singular_values, rcond] : list
These values are only returned if `full` = True
resid -- sum of squared residuals of the least squares fit
rank -- the numerical rank of the scaled Vandermonde matrix
sv -- singular values of the scaled Vandermonde matrix
rcond -- value of `rcond`.
For more details, see `linalg.lstsq`.
Warns
-----
RankWarning
The rank of the coefficient matrix in the least-squares fit is
deficient. The warning is only raised if `full` = False. The
warnings can be turned off by
>>> import warnings
>>> warnings.simplefilter('ignore', RankWarning)
See Also
--------
chebfit, polyfit, lagfit, hermfit, hermefit
legval : Evaluates a Legendre series.
legvander : Vandermonde matrix of Legendre series.
legweight : Legendre weight function (= 1).
linalg.lstsq : Computes a least-squares fit from the matrix.
scipy.interpolate.UnivariateSpline : Computes spline fits.
Notes
-----
The solution is the coefficients of the Legendre series `p` that
minimizes the sum of the weighted squared errors
.. math:: E = \\sum_j w_j^2 * |y_j - p(x_j)|^2,
where :math:`w_j` are the weights. This problem is solved by setting up
as the (typically) overdetermined matrix equation
.. math:: V(x) * c = w * y,
where `V` is the weighted pseudo Vandermonde matrix of `x`, `c` are the
coefficients to be solved for, `w` are the weights, and `y` are the
observed values. This equation is then solved using the singular value
decomposition of `V`.
If some of the singular values of `V` are so small that they are
neglected, then a `RankWarning` will be issued. This means that the
coefficient values may be poorly determined. Using a lower order fit
will usually get rid of the warning. The `rcond` parameter can also be
set to a value smaller than its default, but the resulting fit may be
spurious and have large contributions from roundoff error.
Fits using Legendre series are usually better conditioned than fits
using power series, but much can depend on the distribution of the
sample points and the smoothness of the data. If the quality of the fit
is inadequate splines may be a good alternative.
References
----------
.. [1] Wikipedia, "Curve fitting",
http://en.wikipedia.org/wiki/Curve_fitting
Examples
--------
"""
x = np.asarray(x) + 0.0
y = np.asarray(y) + 0.0
deg = np.asarray(deg)
# check arguments.
if deg.ndim > 1 or deg.dtype.kind not in 'iu' or deg.size == 0:
raise TypeError("deg must be an int or non-empty 1-D array of int")
if deg.min() < 0:
raise ValueError("expected deg >= 0")
if x.ndim != 1:
raise TypeError("expected 1D vector for x")
if x.size == 0:
raise TypeError("expected non-empty vector for x")
if y.ndim < 1 or y.ndim > 2:
raise TypeError("expected 1D or 2D array for y")
if len(x) != len(y):
raise TypeError("expected x and y to have same length")
if deg.ndim == 0:
lmax = deg
order = lmax + 1
van = legvander(x, lmax)
else:
deg = np.sort(deg)
lmax = deg[-1]
order = len(deg)
van = legvander(x, lmax)[:, deg]
# set up the least squares matrices in transposed form
lhs = van.T
rhs = y.T
if w is not None:
w = np.asarray(w) + 0.0
if w.ndim != 1:
raise TypeError("expected 1D vector for w")
if len(x) != len(w):
raise TypeError("expected x and w to have same length")
# apply weights. Don't use inplace operations as they
# can cause problems with NA.
lhs = lhs * w
rhs = rhs * w
# set rcond
if rcond is None:
rcond = len(x)*np.finfo(x.dtype).eps
# Determine the norms of the design matrix columns.
if issubclass(lhs.dtype.type, np.complexfloating):
scl = np.sqrt((np.square(lhs.real) + np.square(lhs.imag)).sum(1))
else:
scl = np.sqrt(np.square(lhs).sum(1))
scl[scl == 0] = 1
# Solve the least squares problem.
c, resids, rank, s = la.lstsq(lhs.T/scl, rhs.T, rcond)
c = (c.T/scl).T
# Expand c to include non-fitted coefficients which are set to zero
if deg.ndim > 0:
if c.ndim == 2:
cc = np.zeros((lmax+1, c.shape[1]), dtype=c.dtype)
else:
cc = np.zeros(lmax+1, dtype=c.dtype)
cc[deg] = c
c = cc
# warn on rank reduction
if rank != order and not full:
msg = "The fit may be poorly conditioned"
warnings.warn(msg, pu.RankWarning, stacklevel=2)
if full:
return c, [resids, rank, s, rcond]
else:
return c
def legcompanion(c):
"""Return the scaled companion matrix of c.
The basis polynomials are scaled so that the companion matrix is
symmetric when `c` is an Legendre basis polynomial. This provides
better eigenvalue estimates than the unscaled case and for basis
polynomials the eigenvalues are guaranteed to be real if
`numpy.linalg.eigvalsh` is used to obtain them.
Parameters
----------
c : array_like
1-D array of Legendre series coefficients ordered from low to high
degree.
Returns
-------
mat : ndarray
Scaled companion matrix of dimensions (deg, deg).
Notes
-----
.. versionadded::1.7.0
"""
# c is a trimmed copy
[c] = pu.as_series([c])
if len(c) < 2:
raise ValueError('Series must have maximum degree of at least 1.')
if len(c) == 2:
return np.array([[-c[0]/c[1]]])
n = len(c) - 1
mat = np.zeros((n, n), dtype=c.dtype)
scl = 1./np.sqrt(2*np.arange(n) + 1)
top = mat.reshape(-1)[1::n+1]
bot = mat.reshape(-1)[n::n+1]
top[...] = np.arange(1, n)*scl[:n-1]*scl[1:n]
bot[...] = top
mat[:, -1] -= (c[:-1]/c[-1])*(scl/scl[-1])*(n/(2*n - 1))
return mat
def legroots(c):
"""
Compute the roots of a Legendre series.
Return the roots (a.k.a. "zeros") of the polynomial
.. math:: p(x) = \\sum_i c[i] * L_i(x).
Parameters
----------
c : 1-D array_like
1-D array of coefficients.
Returns
-------
out : ndarray
Array of the roots of the series. If all the roots are real,
then `out` is also real, otherwise it is complex.
See Also
--------
polyroots, chebroots, lagroots, hermroots, hermeroots
Notes
-----
The root estimates are obtained as the eigenvalues of the companion
matrix, Roots far from the origin of the complex plane may have large
errors due to the numerical instability of the series for such values.
Roots with multiplicity greater than 1 will also show larger errors as
the value of the series near such points is relatively insensitive to
errors in the roots. Isolated roots near the origin can be improved by
a few iterations of Newton's method.
The Legendre series basis polynomials aren't powers of ``x`` so the
results of this function may seem unintuitive.
Examples
--------
>>> import numpy.polynomial.legendre as leg
>>> leg.legroots((1, 2, 3, 4)) # 4L_3 + 3L_2 + 2L_1 + 1L_0, all real roots
array([-0.85099543, -0.11407192, 0.51506735])
"""
# c is a trimmed copy
[c] = pu.as_series([c])
if len(c) < 2:
return np.array([], dtype=c.dtype)
if len(c) == 2:
return np.array([-c[0]/c[1]])
m = legcompanion(c)
r = la.eigvals(m)
r.sort()
return r
def leggauss(deg):
"""
Gauss-Legendre quadrature.
Computes the sample points and weights for Gauss-Legendre quadrature.
These sample points and weights will correctly integrate polynomials of
degree :math:`2*deg - 1` or less over the interval :math:`[-1, 1]` with
the weight function :math:`f(x) = 1`.
Parameters
----------
deg : int
Number of sample points and weights. It must be >= 1.
Returns
-------
x : ndarray
1-D ndarray containing the sample points.
y : ndarray
1-D ndarray containing the weights.
Notes
-----
.. versionadded::1.7.0
The results have only been tested up to degree 100, higher degrees may
be problematic. The weights are determined by using the fact that
.. math:: w_k = c / (L'_n(x_k) * L_{n-1}(x_k))
where :math:`c` is a constant independent of :math:`k` and :math:`x_k`
is the k'th root of :math:`L_n`, and then scaling the results to get
the right value when integrating 1.
"""
ideg = int(deg)
if ideg != deg or ideg < 1:
raise ValueError("deg must be a non-negative integer")
# first approximation of roots. We use the fact that the companion
# matrix is symmetric in this case in order to obtain better zeros.
c = np.array([0]*deg + [1])
m = legcompanion(c)
x = la.eigvalsh(m)
# improve roots by one application of Newton
dy = legval(x, c)
df = legval(x, legder(c))
x -= dy/df
# compute the weights. We scale the factor to avoid possible numerical
# overflow.
fm = legval(x, c[1:])
fm /= np.abs(fm).max()
df /= np.abs(df).max()
w = 1/(fm * df)
# for Legendre we can also symmetrize
w = (w + w[::-1])/2
x = (x - x[::-1])/2
# scale w to get the right value
w *= 2. / w.sum()
return x, w
def legweight(x):
"""
Weight function of the Legendre polynomials.
The weight function is :math:`1` and the interval of integration is
:math:`[-1, 1]`. The Legendre polynomials are orthogonal, but not
normalized, with respect to this weight function.
Parameters
----------
x : array_like
Values at which the weight function will be computed.
Returns
-------
w : ndarray
The weight function at `x`.
Notes
-----
.. versionadded::1.7.0
"""
w = x*0.0 + 1.0
return w
#
# Legendre series class
#
class Legendre(ABCPolyBase):
"""A Legendre series class.
The Legendre class provides the standard Python numerical methods
'+', '-', '*', '//', '%', 'divmod', '**', and '()' as well as the
attributes and methods listed in the `ABCPolyBase` documentation.
Parameters
----------
coef : array_like
Legendre coefficients in order of increasing degree, i.e.,
``(1, 2, 3)`` gives ``1*P_0(x) + 2*P_1(x) + 3*P_2(x)``.
domain : (2,) array_like, optional
Domain to use. The interval ``[domain[0], domain[1]]`` is mapped
to the interval ``[window[0], window[1]]`` by shifting and scaling.
The default value is [-1, 1].
window : (2,) array_like, optional
Window, see `domain` for its use. The default value is [-1, 1].
.. versionadded:: 1.6.0
"""
# Virtual Functions
_add = staticmethod(legadd)
_sub = staticmethod(legsub)
_mul = staticmethod(legmul)
_div = staticmethod(legdiv)
_pow = staticmethod(legpow)
_val = staticmethod(legval)
_int = staticmethod(legint)
_der = staticmethod(legder)
_fit = staticmethod(legfit)
_line = staticmethod(legline)
_roots = staticmethod(legroots)
_fromroots = staticmethod(legfromroots)
# Virtual properties
nickname = 'leg'
domain = np.array(legdomain)
window = np.array(legdomain)
| [
"[email protected]"
] | |
0a6a59073b7043bda4ed6a38ceee5501721c11b1 | db6533cae5a58becf3163d750cd890c73035d0c5 | /set_mark/link.py | cc0f91e2b4be47254492f099864a57c07bc33132 | [
"BSD-3-Clause"
] | permissive | goranmabi/openNAMU | 7e076f18279614a69a7969e22cf3b9fa31605cb5 | 1c0781cb6034040032122df2514e6d8baecc6120 | refs/heads/master | 2021-05-16T16:15:14.076942 | 2018-02-01T10:04:09 | 2018-02-01T10:04:09 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,698 | py | import sqlite3
import re
from urllib import parse
import hashlib
def url_pas(data):
return parse.quote(data).replace('/','%2F')
def sha224(data):
return hashlib.sha224(bytes(data, 'utf-8')).hexdigest()
def link(conn, title, data, num, category, backlink):
curs = conn.cursor()
data = data.replace('\', '\\')
m = re.findall("\[\[(분류:(?:(?:(?!\]\]|#).)+))((?:#(?:(?:(?!#|\]\]).)+))+)?\]\]", data)
for g in m:
if title != g[0]:
if num == 1:
backlink += [[title, g[0], 'cat']]
curs.execute("select title from data where title = ?", [g[0]])
if curs.fetchall():
red = ""
else:
red = 'class="not_thing"'
if(category != ''):
category += ' / '
style = ''
if g[1]:
if re.search('#blur', g[1]):
style = ' style="filter: blur(3px);" onmouseover="this.style.filter=\'none\';" onmouseout="this.style.filter=\'blur(3px)\';"'
category += '<a ' + red + ' ' + style + ' href="/w/' + url_pas(g[0]) + '">' + re.sub("분류:", "", g[0]) + '</a>'
data = re.sub("\[\[(분류:(?:(?:(?!\]\]|#).)+))((?:#(?:(?:(?!#|\]\]).)+))+)?\]\]", '', data, 1)
test = re.findall('\[\[wiki:([^|\]]+)(?:\|([^\]]+))?\]\]', data)
for wiki in test:
if wiki[1]:
out = wiki[1]
else:
out = wiki[0]
data = re.sub('\[\[wiki:([^|\]]+)(?:\|([^\]]+))?\]\]', '<a id="inside" href="/' + wiki[0] + '">' + out + '</a>', data, 1)
test = re.findall('\[\[inter:([^:]+):((?:(?!\||]]).)+)(?:\|([^\]]+))?]]', data)
for wiki in test:
curs.execute('select link from inter where title = ?', [wiki[0]])
inter = curs.fetchall()
if not inter:
data = re.sub('\[\[inter:([^:]+):((?:(?!\||]]).)+)(?:\|([^\]]+))?]]', '인터위키 정보 없음', data, 1)
else:
if wiki[2]:
out = wiki[0] + ':' + wiki[2]
else:
out = wiki[0] + ':' + wiki[1]
data = re.sub('\[\[inter:([^:]+):((?:(?!\||]]).)+)(?:\|([^\]]+))?]]', '<a id="inside" href="' + inter[0][0] + wiki[1] + '">' + out + '</a>', data, 1)
data = re.sub("\[\[(?::(?P<in>(?:분류|파일):(?:(?:(?!\]\]).)*)))\]\]", "[[\g<in>]]", data)
a = re.findall('\[\[\.\.\/(\|(?:(?!]]).)+)?]]', data)
for i in a:
b = re.search('(.*)\/', title)
if b:
m = b.groups()
if i:
data = re.sub('\[\[\.\.\/(\|((?!]]).)+)?]]', '[[' + m[0] + i + ']]', data, 1)
else:
data = re.sub('\[\[\.\.\/(\|((?!]]).)+)?]]', '[[' + m[0] + ']]', data, 1)
else:
if i:
data = re.sub('\[\[\.\.\/(\|((?!]]).)+)?]]', '[[' + title + i + ']]', data, 1)
else:
data = re.sub('\[\[\.\.\/(\|((?!]]).)+)?]]', '[[' + title + ']]', data, 1)
data = re.sub('\[\[(?P<in>\/(?:(?!]]|\|).)+)(?P<out>\|(?:(?:(?!]]).)+))?]]', '[[' + title + '\g<in>\g<out>]]', data)
link = re.compile('\[\[((?:(?!\[\[|\]\]|\|).)*)(?:\|((?:(?!\[\[|\]\]).)*))?\]\]')
while 1:
l_d = link.search(data)
if l_d:
d = l_d.groups()
if re.search('^(?:파일|외부):', d[0]):
width = ''
height = ''
align = ''
span = ['', '']
try:
w_d = re.search('width=([0-9]+(?:[a-z%]+)?)', d[1])
if w_d:
width = 'width="' + w_d.groups()[0] + '" '
h_d = re.search('height=([0-9]+(?:[a-z%]+)?)', d[1])
if h_d:
height = 'height="' + h_d.groups()[0] + '" '
a_d = re.search('align=(center|right)', d[1])
if a_d:
span[0] = '<span style="display: block; text-align: ' + a_d.groups()[0] + ';">'
span[1] = '</span>'
except:
pass
f_d = re.search('^파일:([^.]+)\.(.+)$', d[0])
if f_d:
if not re.search("^파일:([^\n]*)", title):
if num == 1:
backlink += [[title, d[0], 'file']]
file_name = f_d.groups()
curs.execute("select title from data where title = ?", ['파일:' + file_name[0] + '.' + file_name[1]])
if not curs.fetchall():
img = '<a class="not_thing" href="/w/' + url_pas('파일:' + file_name[0] + '.' + file_name[1]) + '">파일:' + file_name[0] + '.' + file_name[1] + '</a>'
else:
img = span[0] + '<img src="/image/' + sha224(file_name[0]) + '.' + file_name[1] + '" ' + width + height + '>' + span[1]
data = link.sub(img, data, 1)
else:
img = span[0] + '<img src="' + re.sub('^외부:', '', d[0]) + '" ' + width + height + '>' + span[1]
data = link.sub(img, data, 1)
elif re.search('^https?:\/\/', re.sub('<([^>]*)>', '', d[0])):
view = d[0]
try:
if re.search('(.+)', d[1]):
view = d[1]
except:
pass
data = link.sub('<a class="out_link" rel="nofollow" href="' + re.sub('<([^>]*)>', '', d[0]) + '">' + view + '</a>', data, 1)
else:
view = d[0].replace('\\\\', '<slash>').replace('\\', '').replace('<slash>', '\\')
try:
if re.search('(.+)', d[1]):
view = d[1].replace('\\\\', '<slash>').replace('\\', '').replace('<slash>', '\\')
except:
pass
sh = ''
s_d = re.search('#((?:(?!x27;|#).)+)$', d[0])
if s_d:
href = re.sub('#((?:(?!x27;|#).)+)$', '', d[0])
sh = '#' + s_d.groups()[0]
else:
href = d[0]
if d[0] == title:
data = link.sub('<b>' + view + '</b>', data, 1)
elif re.search('^#', d[0]):
data = link.sub('<a title="' + sh + '" href="' + sh + '">' + view + '</a>', data, 1)
else:
a = re.sub('<([^>]*)>', '', href.replace(''', "'").replace('"', '"').replace('\\\\', '<slash>').replace('\\', '').replace('<slash>', '\\'))
if num == 1:
backlink += [[title, a, '']]
curs.execute("select title from data where title = ?", [a])
if not curs.fetchall():
no = 'class="not_thing"'
if num == 1:
backlink += [[title, a, 'no']]
else:
no = ''
data = link.sub('<a ' + no + ' title="' + re.sub('<([^>]*)>', '', href) + sh + '" href="/w/' + url_pas(a) + sh + '">' + view.replace('\\', '\\\\') + '</a>', data, 1)
else:
break
data = data.replace('\\', '\')
return [data, category, backlink] | [
"[email protected]"
] | |
3efa40b1568ac779495027a89b5b37e1c9ac8094 | 15f321878face2af9317363c5f6de1e5ddd9b749 | /solutions_python/Problem_200/3303.py | 9127ca7f11dd01a24730d2a21ab3e5fad553dcc5 | [] | no_license | dr-dos-ok/Code_Jam_Webscraper | c06fd59870842664cd79c41eb460a09553e1c80a | 26a35bf114a3aa30fc4c677ef069d95f41665cc0 | refs/heads/master | 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 820 | py | def parse_input(str):
str_first_val = str.split()
real_digits = [ int(c) for c in str_first_val[0] ]
return real_digits
def solve(test):
big_number = parse_input(test)
num_of_digits = len(big_number)
index_of_max_incrising_digit = 0
for digit_ind in range(0,num_of_digits):
if( big_number[digit_ind] > big_number[index_of_max_incrising_digit] ):
index_of_max_incrising_digit = digit_ind;
elif ( big_number[digit_ind] < big_number[index_of_max_incrising_digit] ):
big_number[index_of_max_incrising_digit] -= 1
for digit_ind_in_change in range(index_of_max_incrising_digit+1,num_of_digits):
big_number[digit_ind_in_change] = 9
break
num_in_str = ''.join(map(str,big_number))
if( num_in_str[0] == '0'):
num_in_str = num_in_str[1:]
return num_in_str | [
"[email protected]"
] | |
dc85fab7d7d45de099b87639674ff0ef08b306c1 | f8d3f814067415485bb439d7fe92dc2bbe22a048 | /models/research/object_detection/exporter.py | 0abe25a0a8504f1390b6187e755d9d6d1a7a13a3 | [
"Apache-2.0"
] | permissive | gmonkman/python | 2f9ab8f159c01f6235c86cb0cd52062cd3fdedd3 | 9123aa6baf538b662143b9098d963d55165e8409 | refs/heads/master | 2023-04-09T15:53:29.746676 | 2022-11-26T20:35:21 | 2022-11-26T20:35:21 | 60,254,898 | 0 | 2 | null | 2023-03-24T22:58:39 | 2016-06-02T10:25:27 | Python | UTF-8 | Python | false | false | 19,824 | py | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functions to export object detection inference graph."""
import logging
import os
import tempfile
import tensorflow as tf
from google.protobuf import text_format
from tensorflow.core.protobuf import saver_pb2
from tensorflow.python import pywrap_tensorflow
from tensorflow.python.client import session
from tensorflow.python.framework import graph_util
from tensorflow.python.platform import gfile
from tensorflow.python.saved_model import signature_constants
from tensorflow.python.training import saver as saver_lib
from object_detection.builders import model_builder
from object_detection.core import standard_fields as fields
from object_detection.data_decoders import tf_example_decoder
slim = tf.contrib.slim
# TODO(derekjchow): Replace with freeze_graph.freeze_graph_with_def_protos when
# newer version of Tensorflow becomes more common.
def freeze_graph_with_def_protos(
input_graph_def,
input_saver_def,
input_checkpoint,
output_node_names,
restore_op_name,
filename_tensor_name,
clear_devices,
initializer_nodes,
variable_names_blacklist=''):
"""Converts all variables in a graph and checkpoint into constants."""
del restore_op_name, filename_tensor_name # Unused by updated loading code.
# 'input_checkpoint' may be a prefix if we're using Saver V2 format
if not saver_lib.checkpoint_exists(input_checkpoint):
raise ValueError(
'Input checkpoint "' + input_checkpoint + '" does not exist!')
if not output_node_names:
raise ValueError(
'You must supply the name of a node to --output_node_names.')
# Remove all the explicit device specifications for this node. This helps to
# make the graph more portable.
if clear_devices:
for node in input_graph_def.node:
node.device = ''
with tf.Graph().as_default():
tf.import_graph_def(input_graph_def, name='')
config = tf.ConfigProto(graph_options=tf.GraphOptions())
with session.Session(config=config) as sess:
if input_saver_def:
saver = saver_lib.Saver(saver_def=input_saver_def)
saver.restore(sess, input_checkpoint)
else:
var_list = {}
reader = pywrap_tensorflow.NewCheckpointReader(input_checkpoint)
var_to_shape_map = reader.get_variable_to_shape_map()
for key in var_to_shape_map:
try:
tensor = sess.graph.get_tensor_by_name(key + ':0')
except KeyError:
# This tensor doesn't exist in the graph (for example it's
# 'global_step' or a similar housekeeping element) so skip it.
continue
var_list[key] = tensor
saver = saver_lib.Saver(var_list=var_list)
saver.restore(sess, input_checkpoint)
if initializer_nodes:
sess.run(initializer_nodes)
variable_names_blacklist = (variable_names_blacklist.split(',') if
variable_names_blacklist else None)
output_graph_def = graph_util.convert_variables_to_constants(
sess,
input_graph_def,
output_node_names.split(','),
variable_names_blacklist=variable_names_blacklist)
return output_graph_def
def replace_variable_values_with_moving_averages(graph,
current_checkpoint_file,
new_checkpoint_file):
"""Replaces variable values in the checkpoint with their moving averages.
If the current checkpoint has shadow variables maintaining moving averages of
the variables defined in the graph, this function generates a new checkpoint
where the variables contain the values of their moving averages.
Args:
graph: a tf.Graph object.
current_checkpoint_file: a checkpoint containing both original variables and
their moving averages.
new_checkpoint_file: file path to write a new checkpoint.
"""
with graph.as_default():
variable_averages = tf.train.ExponentialMovingAverage(0.0)
ema_variables_to_restore = variable_averages.variables_to_restore()
with tf.Session() as sess:
read_saver = tf.train.Saver(ema_variables_to_restore)
read_saver.restore(sess, current_checkpoint_file)
write_saver = tf.train.Saver()
write_saver.save(sess, new_checkpoint_file)
def _image_tensor_input_placeholder(input_shape=None):
"""Returns input placeholder and a 4-D uint8 image tensor."""
if input_shape is None:
input_shape = (None, None, None, 3)
input_tensor = tf.placeholder(
dtype=tf.uint8, shape=input_shape, name='image_tensor')
return input_tensor, input_tensor
def _tf_example_input_placeholder():
"""Returns input that accepts a batch of strings with tf examples.
Returns:
a tuple of input placeholder and the output decoded images.
"""
batch_tf_example_placeholder = tf.placeholder(
tf.string, shape=[None], name='tf_example')
def decode(tf_example_string_tensor):
tensor_dict = tf_example_decoder.TfExampleDecoder().decode(
tf_example_string_tensor)
image_tensor = tensor_dict[fields.InputDataFields.image]
return image_tensor
return (batch_tf_example_placeholder,
tf.map_fn(decode,
elems=batch_tf_example_placeholder,
dtype=tf.uint8,
parallel_iterations=32,
back_prop=False))
def _encoded_image_string_tensor_input_placeholder():
"""Returns input that accepts a batch of PNG or JPEG strings.
Returns:
a tuple of input placeholder and the output decoded images.
"""
batch_image_str_placeholder = tf.placeholder(
dtype=tf.string,
shape=[None],
name='encoded_image_string_tensor')
def decode(encoded_image_string_tensor):
image_tensor = tf.image.decode_image(encoded_image_string_tensor,
channels=3)
image_tensor.set_shape((None, None, 3))
return image_tensor
return (batch_image_str_placeholder,
tf.map_fn(
decode,
elems=batch_image_str_placeholder,
dtype=tf.uint8,
parallel_iterations=32,
back_prop=False))
input_placeholder_fn_map = {
'image_tensor': _image_tensor_input_placeholder,
'encoded_image_string_tensor':
_encoded_image_string_tensor_input_placeholder,
'tf_example': _tf_example_input_placeholder,
}
def _add_output_tensor_nodes(postprocessed_tensors,
output_collection_name='inference_op'):
"""Adds output nodes for detection boxes and scores.
Adds the following nodes for output tensors -
* num_detections: float32 tensor of shape [batch_size].
* detection_boxes: float32 tensor of shape [batch_size, num_boxes, 4]
containing detected boxes.
* detection_scores: float32 tensor of shape [batch_size, num_boxes]
containing scores for the detected boxes.
* detection_classes: float32 tensor of shape [batch_size, num_boxes]
containing class predictions for the detected boxes.
* detection_keypoints: (Optional) float32 tensor of shape
[batch_size, num_boxes, num_keypoints, 2] containing keypoints for each
detection box.
* detection_masks: (Optional) float32 tensor of shape
[batch_size, num_boxes, mask_height, mask_width] containing masks for each
detection box.
Args:
postprocessed_tensors: a dictionary containing the following fields
'detection_boxes': [batch, max_detections, 4]
'detection_scores': [batch, max_detections]
'detection_classes': [batch, max_detections]
'detection_masks': [batch, max_detections, mask_height, mask_width]
(optional).
'num_detections': [batch]
output_collection_name: Name of collection to add output tensors to.
Returns:
A tensor dict containing the added output tensor nodes.
"""
detection_fields = fields.DetectionResultFields
label_id_offset = 1
boxes = postprocessed_tensors.get(detection_fields.detection_boxes)
scores = postprocessed_tensors.get(detection_fields.detection_scores)
classes = postprocessed_tensors.get(
detection_fields.detection_classes) + label_id_offset
keypoints = postprocessed_tensors.get(detection_fields.detection_keypoints)
masks = postprocessed_tensors.get(detection_fields.detection_masks)
num_detections = postprocessed_tensors.get(detection_fields.num_detections)
outputs = {}
outputs[detection_fields.detection_boxes] = tf.identity(
boxes, name=detection_fields.detection_boxes)
outputs[detection_fields.detection_scores] = tf.identity(
scores, name=detection_fields.detection_scores)
outputs[detection_fields.detection_classes] = tf.identity(
classes, name=detection_fields.detection_classes)
outputs[detection_fields.num_detections] = tf.identity(
num_detections, name=detection_fields.num_detections)
if keypoints is not None:
outputs[detection_fields.detection_keypoints] = tf.identity(
keypoints, name=detection_fields.detection_keypoints)
if masks is not None:
outputs[detection_fields.detection_masks] = tf.identity(
masks, name=detection_fields.detection_masks)
for output_key in outputs:
tf.add_to_collection(output_collection_name, outputs[output_key])
if masks is not None:
tf.add_to_collection(output_collection_name,
outputs[detection_fields.detection_masks])
return outputs
def write_frozen_graph(frozen_graph_path, frozen_graph_def):
"""Writes frozen graph to disk.
Args:
frozen_graph_path: Path to write inference graph.
frozen_graph_def: tf.GraphDef holding frozen graph.
"""
with gfile.GFile(frozen_graph_path, 'wb') as f:
f.write(frozen_graph_def.SerializeToString())
logging.info('%d ops in the final graph.', len(frozen_graph_def.node))
def write_saved_model(saved_model_path,
frozen_graph_def,
inputs,
outputs):
"""Writes SavedModel to disk.
If checkpoint_path is not None bakes the weights into the graph thereby
eliminating the need of checkpoint files during inference. If the model
was trained with moving averages, setting use_moving_averages to true
restores the moving averages, otherwise the original set of variables
is restored.
Args:
saved_model_path: Path to write SavedModel.
frozen_graph_def: tf.GraphDef holding frozen graph.
inputs: The input image tensor to use for detection.
outputs: A tensor dictionary containing the outputs of a DetectionModel.
"""
with tf.Graph().as_default():
with session.Session() as sess:
tf.import_graph_def(frozen_graph_def, name='')
builder = tf.saved_model.builder.SavedModelBuilder(saved_model_path)
tensor_info_inputs = {
'inputs': tf.saved_model.utils.build_tensor_info(inputs)}
tensor_info_outputs = {}
for k, v in outputs.items():
tensor_info_outputs[k] = tf.saved_model.utils.build_tensor_info(v)
detection_signature = (
tf.saved_model.signature_def_utils.build_signature_def(
inputs=tensor_info_inputs,
outputs=tensor_info_outputs,
method_name=signature_constants.PREDICT_METHOD_NAME))
builder.add_meta_graph_and_variables(
sess, [tf.saved_model.tag_constants.SERVING],
signature_def_map={
signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY:
detection_signature,
},
)
builder.save()
def write_graph_and_checkpoint(inference_graph_def,
model_path,
input_saver_def,
trained_checkpoint_prefix):
"""Writes the graph and the checkpoint into disk."""
for node in inference_graph_def.node:
node.device = ''
with tf.Graph().as_default():
tf.import_graph_def(inference_graph_def, name='')
with session.Session() as sess:
saver = saver_lib.Saver(saver_def=input_saver_def,
save_relative_paths=True)
saver.restore(sess, trained_checkpoint_prefix)
saver.save(sess, model_path)
def _get_outputs_from_inputs(input_tensors, detection_model,
output_collection_name):
inputs = tf.to_float(input_tensors)
preprocessed_inputs, true_image_shapes = detection_model.preprocess(inputs)
output_tensors = detection_model.predict(
preprocessed_inputs, true_image_shapes)
postprocessed_tensors = detection_model.postprocess(
output_tensors, true_image_shapes)
return _add_output_tensor_nodes(postprocessed_tensors,
output_collection_name)
def _build_detection_graph(input_type, detection_model, input_shape,
output_collection_name, graph_hook_fn):
"""Build the detection graph."""
if input_type not in input_placeholder_fn_map:
raise ValueError('Unknown input type: {}'.format(input_type))
placeholder_args = {}
if input_shape is not None:
if input_type != 'image_tensor':
raise ValueError('Can only specify input shape for `image_tensor` '
'inputs.')
placeholder_args['input_shape'] = input_shape
placeholder_tensor, input_tensors = input_placeholder_fn_map[input_type](
**placeholder_args)
outputs = _get_outputs_from_inputs(
input_tensors=input_tensors,
detection_model=detection_model,
output_collection_name=output_collection_name)
# Add global step to the graph.
slim.get_or_create_global_step()
if graph_hook_fn: graph_hook_fn()
return outputs, placeholder_tensor
def _export_inference_graph(input_type,
detection_model,
use_moving_averages,
trained_checkpoint_prefix,
output_directory,
additional_output_tensor_names=None,
input_shape=None,
output_collection_name='inference_op',
graph_hook_fn=None):
"""Export helper."""
tf.gfile.MakeDirs(output_directory)
frozen_graph_path = os.path.join(output_directory,
'frozen_inference_graph.pb')
saved_model_path = os.path.join(output_directory, 'saved_model')
model_path = os.path.join(output_directory, 'model.ckpt')
outputs, placeholder_tensor = _build_detection_graph(
input_type=input_type,
detection_model=detection_model,
input_shape=input_shape,
output_collection_name=output_collection_name,
graph_hook_fn=graph_hook_fn)
saver_kwargs = {}
if use_moving_averages:
# This check is to be compatible with both version of SaverDef.
if os.path.isfile(trained_checkpoint_prefix):
saver_kwargs['write_version'] = saver_pb2.SaverDef.V1
temp_checkpoint_prefix = tempfile.NamedTemporaryFile().name
else:
temp_checkpoint_prefix = tempfile.mkdtemp()
replace_variable_values_with_moving_averages(
tf.get_default_graph(), trained_checkpoint_prefix,
temp_checkpoint_prefix)
checkpoint_to_use = temp_checkpoint_prefix
else:
checkpoint_to_use = trained_checkpoint_prefix
saver = tf.train.Saver(**saver_kwargs)
input_saver_def = saver.as_saver_def()
write_graph_and_checkpoint(
inference_graph_def=tf.get_default_graph().as_graph_def(),
model_path=model_path,
input_saver_def=input_saver_def,
trained_checkpoint_prefix=checkpoint_to_use)
if additional_output_tensor_names is not None:
output_node_names = ','.join(outputs.keys()+additional_output_tensor_names)
else:
output_node_names = ','.join(outputs.keys())
frozen_graph_def = freeze_graph_with_def_protos(
input_graph_def=tf.get_default_graph().as_graph_def(),
input_saver_def=input_saver_def,
input_checkpoint=checkpoint_to_use,
output_node_names=output_node_names,
restore_op_name='save/restore_all',
filename_tensor_name='save/Const:0',
clear_devices=True,
initializer_nodes='')
write_frozen_graph(frozen_graph_path, frozen_graph_def)
write_saved_model(saved_model_path, frozen_graph_def,
placeholder_tensor, outputs)
def export_inference_graph(input_type,
pipeline_config,
trained_checkpoint_prefix,
output_directory,
input_shape=None,
output_collection_name='inference_op',
additional_output_tensor_names=None):
"""Exports inference graph for the model specified in the pipeline config.
Args:
input_type: Type of input for the graph. Can be one of [`image_tensor`,
`tf_example`].
pipeline_config: pipeline_pb2.TrainAndEvalPipelineConfig proto.
trained_checkpoint_prefix: Path to the trained checkpoint file.
output_directory: Path to write outputs.
input_shape: Sets a fixed shape for an `image_tensor` input. If not
specified, will default to [None, None, None, 3].
output_collection_name: Name of collection to add output tensors to.
If None, does not add output tensors to a collection.
additional_output_tensor_names: list of additional output
tensors to include in the frozen graph.
"""
detection_model = model_builder.build(pipeline_config.model,
is_training=False)
_export_inference_graph(input_type, detection_model,
pipeline_config.eval_config.use_moving_averages,
trained_checkpoint_prefix,
output_directory, additional_output_tensor_names,
input_shape, output_collection_name,
graph_hook_fn=None)
pipeline_config.eval_config.use_moving_averages = False
config_text = text_format.MessageToString(pipeline_config)
with tf.gfile.Open(
os.path.join(output_directory, 'pipeline.config'), 'wb') as f:
f.write(config_text)
| [
"[email protected]"
] | |
b3ab80fc9ff47764f6c0bf07ebfada6f13074ce2 | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_355/ch131_2020_04_01_17_55_42_755784.py | 4c58703c96d166274d0324a80fa7cc166fe51e65 | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 624 | py | import random
a = random.randint(1, 10)
b = random.randint(1, 10)
s = a + b
contador = 0
print ("Vc tem 10 dinheiros")
chutes = input("quantos chutes quer comprar?")
while contador < chutes:
pri_numero = input("aposte 1 numero")
seg_numeros = input("aposte outro numero, este deve ser maior ou igual ao anterior")
if s < pri_numero:
print ("Soma menor")
if s > seg_numero:
print ("Soma maior")
if s == pri_numero or s == seg_numero:
h = 10 - chutes
g= h + (h*3)
print("acertou")
return g
else:
print("Soma no meio")
contador+=1
| [
"[email protected]"
] | |
723c56869dcbe51563a60e055a7706f3999667c7 | 43c24c890221d6c98e4a45cd63dba4f1aa859f55 | /test/cpython/test_shelve.py | 8f8bff4866c04207c68433d618ae4b4cbfeb0b4c | [
"Python-2.0",
"Apache-2.0",
"BSD-2-Clause"
] | permissive | jmgc/pyston | c8e4df03c33c6b81d20b7d51a781d9e10148238e | 9f672c1bbb75710ac17dd3d9107da05c8e9e8e8f | refs/heads/master | 2020-12-11T07:51:58.968440 | 2020-09-11T14:38:38 | 2020-09-11T14:38:38 | 39,242,644 | 0 | 0 | NOASSERTION | 2020-09-11T14:38:39 | 2015-07-17T08:09:31 | Python | UTF-8 | Python | false | false | 42 | py | ../../from_cpython/Lib/test/test_shelve.py | [
"[email protected]"
] | |
2cdf52486711ebe99c6646a833bcf3b370fd8337 | d6c9c730ca514af81307018c669bd2f7e5de51c6 | /Stack_20190722/stack_class.py | fea59faa2fbd2e6bd8c2179dd296a356f5911880 | [] | no_license | itbullet/python_projects | a1a56d070a6a70b0814cdc2a83cbd1ce9bc0dab8 | 06d171f1cab7f45c704944e40ffb0b7a175c1d2d | refs/heads/master | 2020-06-22T15:07:40.768297 | 2019-09-09T13:49:02 | 2019-09-09T13:49:02 | 197,734,778 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 376 | py | class Stack:
def __init__(self):
self.items = []
def is_empty(self):
return self.items == []
def push(self, item):
self.items.append(item)
def pop(self):
return self.items.pop()
def peek(self):
last = len(self.items) - 1
return self.items[last]
def size(self):
return len(self.items) | [
"[email protected]"
] | |
eb0af1cae0abc9f558c4b2dbf6c5134c2923070c | b5b3642dd43599f375baac26de6fe72aacaa0a38 | /8/a.py | bc1ad96065ee82d8eb2eb62b494fd4cf2d49a7c8 | [] | no_license | the-glu/aoc2019 | 61eea9f4c8fab86786f89d1545a0c4301767431f | 5590294e9d4f040d7c17792ac02efb52b0674387 | refs/heads/master | 2020-09-30T15:10:55.719241 | 2019-12-20T09:50:16 | 2019-12-20T09:50:16 | 227,313,232 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 15,864 | py | d = """112222222222222212222122220212222222202222222222222122222222202100222222120222101222222122222222221222220102222222222020222222212221222222222222221222102222222222222212222222220222222222202222222222222122222222222100222222120222121222222122222222221222221002222222222222222222202221222222222222222222022222222222222212222222220222222222202222222222222222222222222201222222120222010222222022222222221222220012222222222021222222212222222222222222221222112222222222222222222022222212222222222222222222222022222222202000222222021222021222222222222222221222220202222222222222222222202221222222222222222222212222222222222202222222222222222222222222222222222222222222212021222222021222001222222222222222222222222122222222222222222222222221222222222222221222222222222222222202222122222222222222202222212222222122222202212110222222222222220222222022222222220222221012222222222020222222222221222222222222220222022222222222222202222222221222222222222222212222202122222202202012222222222222220222222122222222222222222122222222222220222222222222222222222212220222112222222222222222222122221212222222222222202222212222222222222111222222121222121222222122222222220222222100222222222221222222212221222222222222221222022222222222222212222022222212222222212222202222222222222212122011222222222222002222222122222222222222220012222222222021222222222220222222222222220222022222222222220212222022222202222222202222212222201122222202222212222222222222102222222222222222222222222212222222222222222222212220222222222202221222002222222222222222222022220212222222212222212222220122222212022201222222121222100222222122222222220222222122222222222122222222202222222222222212222222002222222222222222222122222212222222222222222222222222222222102110222222221222101222222222222222220222221212222222222021222222202222222222222222221222202220222222220212222222222222222222212222202222211022222202022221222222222222001222222122222222021222220012222222222122222222212221222222222222222222212222222222221202222022221222222222222222212222200222222212012101222222021222200222222022222222120222222210222222222021222222212222222222222222220222002222222222221212222222220212222222202222222222212222222222202202222222221222100222222022222222220222222000222222222121222222212220222222222212221222112220222222221222222222222222222222212222212222222122222212122212212222221222010222222022222222022222222002222222222120222222202222222222222202222222222222222222221222222022221222222222212222202222212122222222122201212222120222101222222222222222222222221212222222222120222222222220222222222202220222022221222222222212222122220222222222212222212222212022222202222012222222120222000222222222222222121222222221222022222220222222222222222222222202221222002221222222222222222122222212222222202222012222211222222212112111212222122222020222222022222222022222221011222122222220222222212221222220222211221222112222222222221222222122221222222222222222102222211222222212002101202222020222111222222222022222122222222022222222222122222222222222222220222220222222202222222222220222222122221212222222202222222222222022222222202200222221021222220222221222122222122222221011222022222222222222202220222220222201222222112222222222221202222122222222222222222222012222210122222212012000202222121222011222221022220222020222220002222122222222222222222220222222222202020222002220222222222222222122221022222222202222222222220122222212102201222221022222221222221022022222222222220100202222222220222222202220222221202201221222022221222222220222222022220002222222202222202222210222222202022101202222220222120222221220021222021222221000212122222220222222212222222220222211122222002222222222220202222222221212222222222222102222221222222212002001202222021222011222220021121222222222220201212122222021222222212220222222222221220220022220222222221212222122222222222222222222102222222022222202112000222021022222011222222121220222121222220101222022222122222222212221222222212212122222022220222222121222222222220202222222222222012222210222222212222120222220221222222222221121121222220222220022212222222222222222222220222220202220020222202221212222020222222222221022222222212222112222202022222212212021202222021222202222220122221222120222220020112122222022222222200222222222222210120222022222202222021212222022222002202222222222222222221122222202112201212221020222200220221020120222022222220012212022222120222222212222222222212211122222012222212222020222222122221002212222212222102222222222222210002010212221221222211222221220121222222222220211102122222022222222220221222221222220122221222221212222022202222122222122222222202222012222211022222202222110222121122222112221221222222222121222220202122022222022222222220220222222212200020221202220222222221202222222220202222222222222022222211122222222222120212022221222120222222221120222122222222100222022222021222222201222222220222220220222022222212222120211222122220222212222202222222222202222222212212221202221221222021221220021220222022222222002112122222021222222211220222221222200122222212021202222122200222122220022202222202222222222211222222200122122212122220222112221220122122222120222222222102022222122202222220221222221202202220222102222212222022200222112220222212222212222212222220022222212012020202121120222212221222020220222120222221101122222222022212222210220222221202211222220022021202222222220222120222222202222202222012222210222222210212021212021120222102220222222220222120222221000122122222220202222222020222222212222021222112022212222221202222021222102220222212222022222221022222222202102212021121222101222222120021222122222222212122122222021202222210022222222222212220222112021202222122201222101222212202222202222102222211022222210012010212120222222121221222121021222022222222002112022222222212202200222222222222210222220012121212222120202222012221102212222201222212222212122222220122101212221222222212222222020222222122222221122212022222121212222211221222222222220120222222220222222120221222200221112211222212222112222211122222220222010222121220222211220220120021222022222220021222222222020212212210221222221202222122222012220202222221211222212220202211222201222112222212122222202112222222121122222220222222120120222021222222001212122222021212212200121222221212200022222222020202222020222222100220112221222200222022222221222222200012010212221120222200222222220121222221222222211022222222222202222221122222221212210221220222120212222121200222100220202211222200222022122212122222222122100212121120122212221221021122222020222222102202222222022202222210121222220212200220222122221222222222200222110222012201222222222002022211122222211222001222021022122101220220221120222021222222200112122222020212212210020222220222212122221222220202222020212222010221122202222220222022222220022222221122221212120021122222221222220020222021222220010012222222022202212221121222220222201122220212221212222020211122202220112200222201222112022201222222202102022212021020122021222220222221222021222221000102022222120212222220021222221222222121220102022202222020202222000220002211222221222002022220022222201022112222122121222102220221120121222120222221201212122222222212202220022222222221200022222222222222222022212122211221112222222222222012022222222222220122120212122021022202220222220220222122222222210212122222122212222212021222200220201021222202120222222022221222212220222210222212222202122211222222212002001202022222022200220220222120222021222221222002122222122212222221221222221201211221221012220222221220200122212212102210222210222212122221122222212122101212122022022211221221122122222222222222200012102222020212212221221222202210211221221022021202220022220222101202222211222221222002122211222222202122100212221122022001220222122221222121222221020212002222122222212202222222201222210221220001221212221022221222201211222211222202222122222211222222222122202212121121122010220221222121222021222220201012212222220212212211220222211212202220222221022202222220220122101202022221222221222122222201022222200002202212220021222000222222021220222102222222202202102222120222212211222222201211210021222211120222221021221122120220222220222210222012222201022222221202121222020122122201220021120221222211222220122212122222220212222201221222210200211220222111020212221222211122000220002210222212222202002201222222201222202222222020222011221021120222222211222221120002222222022202222210222222201210201122220001222222221020202122020202022212222201202002012202222222200002101212122121222012220022022120222120222220110102002222122212202211022222220210200220221222121222222220212122020212112220222221212102102212022222210212221202122221022021220021021221222010222222011102222222221202212212020222222202201221221211222202221020201022120202102201222210212202222212122222201222011212121222122201222120220220222121222221210002112222220222202222022222211212221121222101121222222120200222202201012222222201212002012210222222221002111202021221122000222220020121222020222220122202022222020222212211121122211222222220222020220202221120221222111212022221222201212112122222022222221022100222122122122222222122022220222222222220002202212222222212202221222022210221222022220212121222222022222122201222122212222202202212202221122222211102112202222020022002222121221022022200220221020011202222121212212200121122200210202222222111220222220222200022112212002211222200222122202211022222212120211222221220022020220022221021222011222221120011002222021212212222222222210220221121221020222212221022220122000202102201222221212002122212122222001012121202122222022001222220022121022201222020200212112222020202222210021122212212210221222000221222221222202022122200222200222221222222212220222222200000012212120021122210222020120221222202221021000022102222220212112201122022222202201022222211222212222221222122221212112222222220212112212212022222112212011222122220122120222221220022022110221020111000112222120202222202021022221202200022220000220222220222221122122222002222222212212022112221122222201112210212220122222212222020222112222022221122122010222222120222012220121122211212200122220212222202220020202022210212022210222201222222122222022222011002011202120222122211221221222211102101221121202220102222122212212221222122210200202220221010122210220020201022020212122202222212202222212222222222001022201212022020222102220021122201221011221222212010002222220202002200021222201210221121220110221210221020211222012202102201222200212102212222122222212221001202021022122211222020122111221110220021212222012222221212212210020222212201200122221200222211222120212122122201222210222202222222002202222222222001222222121221022020222221022121021110220122211000122222121222011211022222221221111020222011120220220020221022020222212212222222212002212200022222221002221212220021022021222220022222000200222020100202002222200212002200220022220202201220222222021222220221210122101222102220222202212212102201122222002020012222022222202012221221222122121120221021112221212212001222000211220022222212002121220010121212220220201222020220012212222210222102022221022222221101021212021021122122222121222222101101222120110110112212010102111220222222202212011020222212222111222221210122020221212220222211202110022221222222102011120212120020102120221222222111121220222120011112002202010122212222220222201200112021200112121120220020201022110200002202222211222200022220122222200220111212021122012012221221222000211012221021201122212222022022012212022022200202012222212201022210221220211022102210112212222220222200012201222222011120101212022121022002220122121212122002221122101110202212110112012210122022201212210112212110121221222021212022200201102200222211202200022220222222000210201202020021010002222020021201101220220122201220202222102102021212120122211201001112212020122110221022220122122220012212222211202200212212022222022202222222121221202122221220222011000012222221212020212202222212202200221022201221111000200000220002220200210122221222012202222211212000202222222222021201101202122122020222220120221112200121210020210101122212201012021210221122200211102212200012020200220220222022111212202211222222222222212202122222111202022222222121021111220220120001220120222021211200002222000022000202221222210210220101220102021110220112200221122210212212222220222012112220122222112020110222220121121211202020022111000100211022112020212212121102022212222122202222220201212002022012222002202020010222212212202202202222212212022222010210101202122020002101222022122202022121222120220100102212120012020220020222201201000211211120120112222202212222021021112220202202202120022212022222021111220202222022222121201022120201010212211122121202002212121222222222022022202221021010201111201200220112201122220010222210222202222102012222222222112020002222020121220221221021221211212021202121021100022212020212210212122122201220120000211202201121222101222220202122002210222210202221222202222222221210001212022120221222211020022022001002220220222000012202021002020200222022210221011220200101202012220220202221201020122220222222202202112220222222102121221212122021100121210020122112201210220021120012002202101002002220220222212201022100221122220011210011222021020201022211222200212121112200022022002210100212222022122000200021120110212011200220001020202212022122122202220222211002022220200122020122200001212021200110202212222212212001212222022222000200101222010221210201211021022111100001221120221201212212222202211220121022221001121111221222212011200110202120121001202200212222202212202212122022100010012222211121110001202022222120022020221222002202002222011102212200220022210111202010210122212200211000102021001010202220222202202100222220122022100112200212211122012202011020220102021210202020221000222202020212201210022122202012211212211010011200211002020122110021022202202222222211112211222222000102010222222221112210211122122022112122222020001002212222101102221201021222210201010220221011022102202110221221022010222222222221212022102221222022120111002202210022121121100221121221000120212022212121112212220112010212222222211020222010202112201010212110001220102211222220222210202210112221122212201222200212112221120220212020221201100210210120111020112212100112201211020122222020211220200002101012201211122221022201012201202212222102112221022102021100112212201021102002011222120220200200202022110012122202000212202220221122211020011012210022000121200121200122202011212220212212212222222202022022000220022212102221100111101221120002111210000122201220212202100002202202022022201110201021222110120202222000012021211021122212221220202201022201122122001201022212210220121002000020121212020022021221222111012202210102211201121022210002120001220212020012210020222021120121222220221201222102002202222022000201001222122220222210010222022102200120202220002201102212001112012210200022220101122102202111121121210100220221020012212200210212202111212201022012201010020202201020202110002122220211200202212021001110022202001022022220201122212012010111210101002020222011112222111210122200220012220220002210222222001012120222112121012001112021121202212121112121012112112212210002010210222222202111200210211202121012002102000101211111010100120221012202200121110120121110120020121100021222100011001001200112000212100001000010102120220120002000122100001220102"""
max_nb_0 = 9999999999999999999
max_layer = 0
layer = 0
z_layer = 0
nb_1 = 0
nb_2 = 0
image = {}
pos_x = 0
pos_y = 0
for p, x in enumerate(d):
if (x == "0" or x == "1") and (pos_x, pos_y) not in image:
image[(pos_x, pos_y)] = "X" if x == "1" else " "
if x == "1":
nb_1 += 1
if x == "2":
nb_2 += 1
p += 1
pos_x += 1
if (pos_x == 25):
pos_y += 1
pos_x = 0
if pos_y == 6:
pos_y = 0
print("L")
else:
print("NL")
if p % (25 * 6) == 0:
if z_layer < max_nb_0:
max_nb_0 = z_layer
max_layer = nb_1 * nb_2
layer += 1
z_layer = 0
nb_1 = 0
nb_2 = 0
print(max_nb_0)
print(max_layer)
for y in range(0, 6):
for x in range(0, 25):
print(image[(x, y)], end='')
print("")
| [
"[email protected]"
] | |
1c58e39e99b670be9e8d2f6c4131c0c5d37638b3 | 6a1595e33051ebbd098f78cb0ff7d09cfc0a57dc | /day3/work_day3.py | 64838662dc94e940f40402a3a1643dba55a2d0ff | [] | no_license | MannixZ/Python_1-100 | f3514ef1255ca27b656209716bdb27d3821df46e | 740c3d2800f8d55fd2bcd8f789486253e01c9d53 | refs/heads/master | 2020-07-04T03:18:10.125091 | 2019-11-18T16:45:11 | 2019-11-18T16:45:11 | 202,135,554 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,069 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2019/7/29 15:05
# @Author : Mannix
# @File : work_day3.py
# @Software: PyCharm
def work_1():
'''英制单位英寸和公制单位厘米互换'''
value = float(input('请输入长度: '))
unit = input('请输入单位: ')
if unit == 'in' or unit == '英寸':
print('%f英寸 = %f厘米' % (value, value * 2.54))
elif unit == 'cm' or unit == '厘米':
print('%f厘米 = %f英寸' % (value, value / 2.54))
else:
print('请输入有效的单位')
def work_2():
'''掷骰子决定做什么事情'''
from random import randint
face = randint(1, 6)
if face == 1:
result = '唱首歌'
elif face == 2:
result = '跳个舞'
elif face == 3:
result = '学狗叫'
elif face == 4:
result = '做俯卧撑'
elif face == 5:
result = '念绕口令'
else:
result = '讲冷笑话'
print(result)
if __name__ == '__main__':
work_1()
work_2() | [
"[email protected]"
] | |
37b16598f173de07ea41f4d67978f031034c90e7 | ee838d827f128b6d651675fbc11c6127be58280a | /scipy_341_ex3.py | 2c29670607e0bd65654f760b0260cb4d971ce5e0 | [] | no_license | CodedQuen/Scipy-and-Numpy | 80a4b2d6792ba4702634849d583e0ce86e4a2820 | 1b333d5f7cf2c6310c64523f9de80718c6a84cb4 | refs/heads/master | 2022-11-09T23:12:17.624938 | 2020-06-27T04:38:01 | 2020-06-27T04:38:01 | 275,300,153 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 396 | py | import numpy as np
from scipy.stats import geom
# Here set up the parameters for the normal distribution.
# where loc is the mean and scale is the standard deviation.
p = 0.5
dist = geom(p)
# Setup the sample range
x = np.linspace(0, 5, 1000)
# Calling norm's PMF and CDF
pmf = dist.pmf(x)
cdf = dist.cdf(x)
# Here we draw out 500 random values from
sample = dist.rvs(500) | [
"[email protected]"
] | |
07633b13bd1cf0f0286c52bae03096144bf0adb2 | 868cd4895a8da17a7e3e2c8da0ec9e139f8d0c30 | /keras/keras35_lstm_sequences.py | 13dc52d0ca3c1cfc6a2f3bdc6e3f021efc2c58f9 | [] | no_license | inJAJA/Study | 35d4e410df7b476a4c298664bb99ce9b09bf6296 | c2fd9a1e1f3a31cb3737cbb4891d848cc802f1d4 | refs/heads/master | 2022-12-21T11:41:15.396610 | 2020-09-20T23:51:45 | 2020-09-20T23:51:45 | 263,212,524 | 3 | 3 | null | null | null | null | UTF-8 | Python | false | false | 3,440 | py |
## LSTM_Sequence : LSTM을 2개 연결하기
from numpy import array
from keras.models import Model
from keras.layers import Dense, LSTM, Input
# 1. 데이터
x = array([[1,2,3],[2,3,4],[3,4,5],[4,5,6],
[5,6,7],[6,7,8],[7,8,9],[8,9,10],
[9,10,11],[11,12,13],
[20,30,40],[30,40,50],[40,50,60],
])
y = array([4,5,6,7,8,9,10,11,12,13,50,60,70]) # (13, ) 벡터
x_predict = array([50, 60, 70]) # (3, )
print('x.shape : ',x.shape) # (13, 3)
print('y.shape : ',y.shape) # (13, ) != (13, 1)
# 벡터 행렬
# x = x.reshape(13, 3, 1)
x = x.reshape(x.shape[0], x.shape[1], 1) # x.shape[0] = 13 / x.shape[1] = 3 / data 1개씩 작업 하겠다.
print(x.shape) # (13, 3, 1)
#2. 모델구성
input1 = Input(shape = (3, 1))
LSTM1 = LSTM(100, return_sequences= True)(input1)
# LSTM2 = LSTM(10)(LSTM1, return_sequences= True)(LSTM1) # return_sequences를 썼으면 무조건 LSTM사용
LSTM2 = LSTM(100)(LSTM1)
dense1 = Dense(50)(LSTM2)
dense2 = Dense(50)(dense1)
dense3 = Dense(50)(dense2)
output1 = Dense(1)(dense3)
model = Model(inputs = input1, outputs = output1)
model.summary()
'''
LSTM = ( , , ) : 3 차원
Dense = ( , ) : 2 차원
# return_sequences : 들어온 원래 차원으로 output
ex) x.shape = (13, 3, 1)
LSTM1 = LSTM( 10 )(dense1)
' 2 '차원으로 output
LSTM1 = LSTM( 10, return_sequence = True )(LSTM2)
(받아 들인) ' 3 '차원으로 output
Layer (type) Output Shape Param #
=================================================================
input_1 (InputLayer) (None, 3, 1) 0
_________________________________________________________________
lstm_1 (LSTM) (None, 3, 10) 480
_________________________________________________________________
lstm_2 (LSTM) (None, 10) 840
_________________________________________________________________
dense_1 (Dense) (None, 5) 55
_________________________________________________________________
dense_2 (Dense) (None, 1) 6
=================================================================
# 앞에 output_node가 input_dim(feature)가 된다.
# LSTM_sequences_parameter
:num_param = 4 * ( num_units + input_dim + bias) * num_units
= 4 * (LSTM2_output + LSTM1_output + 1 ) * LSTM2_output
= 4 * ( 10 + 10 + 1 ) * 10
= 840
'''
# EarlyStopping
from keras.callbacks import EarlyStopping
es = EarlyStopping(monitor = 'loss', patience=100, mode = 'min')
#3. 실행
model.compile(optimizer='adam', loss = 'mse')
model.fit(x, y, epochs =10000, batch_size = 13,callbacks = [es]
)
#4. 예측
x_predict = x_predict.reshape(1, 3, 1) # x값 (13, 3, 1)와 동일한 shape로 만들어 주기 위함
# (1, 3, 1) : 확인 1 * 3 * 1 = 3
# x_predict = x_predict.reshape(1, x_predict.shape[0], 1)
print(x_predict)
y_predict = model.predict(x_predict)
print(y_predict)
| [
"[email protected]"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.